drm/amdgpu/mes11: update mes11 api interface
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
6a99099f 51#include <drm/display/drm_hdcp_helper.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b 75
da68386d 76#include <drm/display/drm_dp_mst_helper.h>
4fc8cb47 77#include <drm/display/drm_hdmi_helper.h>
4562236b 78#include <drm/drm_atomic.h>
674e78ac 79#include <drm/drm_atomic_uapi.h>
4562236b 80#include <drm/drm_atomic_helper.h>
e7b07cee 81#include <drm/drm_fb_helper.h>
09d21852 82#include <drm/drm_fourcc.h>
e7b07cee 83#include <drm/drm_edid.h>
09d21852 84#include <drm/drm_vblank.h>
6ce8f316 85#include <drm/drm_audio_component.h>
4562236b 86
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
ff5ef992
AD
93
94#include "soc15_common.h"
ff5ef992 95
e7b07cee 96#include "modules/inc/mod_freesync.h"
bbf854dc 97#include "modules/power/power_helpers.h"
ecd0136b 98#include "modules/inc/mod_info_packet.h"
e7b07cee 99
743b9786
NK
100#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
102#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
104#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
106#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
110#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
112#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
114#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
b5b8ed44
QZ
116#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
117MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
de7cc1b4
PL
118#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
119MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
2200eb9e 120
a94d5569
DF
121#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 123
5ea23931
RL
124#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
125MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
126
8c7aea40
NK
127/* Number of bytes in PSP header for firmware. */
128#define PSP_HEADER_BYTES 0x100
129
130/* Number of bytes in PSP footer for firmware. */
131#define PSP_FOOTER_BYTES 0x100
132
b8592b48
LL
133/**
134 * DOC: overview
135 *
136 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 137 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
138 * requests into DC requests, and DC responses into DRM responses.
139 *
140 * The root control structure is &struct amdgpu_display_manager.
141 */
142
7578ecda
AD
143/* basic init/fini API */
144static int amdgpu_dm_init(struct amdgpu_device *adev);
145static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 146static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 147
0f877894
OV
148static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
149{
150 switch (link->dpcd_caps.dongle_type) {
151 case DISPLAY_DONGLE_NONE:
152 return DRM_MODE_SUBCONNECTOR_Native;
153 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
154 return DRM_MODE_SUBCONNECTOR_VGA;
155 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
156 case DISPLAY_DONGLE_DP_DVI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_DVID;
158 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
159 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
160 return DRM_MODE_SUBCONNECTOR_HDMIA;
161 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
162 default:
163 return DRM_MODE_SUBCONNECTOR_Unknown;
164 }
165}
166
167static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
168{
169 struct dc_link *link = aconnector->dc_link;
170 struct drm_connector *connector = &aconnector->base;
171 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
172
173 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
174 return;
175
176 if (aconnector->dc_sink)
177 subconnector = get_subconnector_type(link);
178
179 drm_object_property_set_value(&connector->base,
180 connector->dev->mode_config.dp_subconnector_property,
181 subconnector);
182}
183
1f6010a9
DF
184/*
185 * initializes drm_device display related structures, based on the information
7578ecda
AD
186 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
187 * drm_encoder, drm_mode_config
188 *
189 * Returns 0 on success
190 */
191static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
192/* removes and deallocates the drm structures, created by the above function */
193static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
194
7578ecda 195static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 196 struct drm_plane *plane,
cc1fec57
NK
197 unsigned long possible_crtcs,
198 const struct dc_plane_cap *plane_cap);
7578ecda
AD
199static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
200 struct drm_plane *plane,
201 uint32_t link_index);
202static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
203 struct amdgpu_dm_connector *amdgpu_dm_connector,
204 uint32_t link_index,
205 struct amdgpu_encoder *amdgpu_encoder);
206static int amdgpu_dm_encoder_init(struct drm_device *dev,
207 struct amdgpu_encoder *aencoder,
208 uint32_t link_index);
209
210static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
211
7578ecda
AD
212static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
213
214static int amdgpu_dm_atomic_check(struct drm_device *dev,
215 struct drm_atomic_state *state);
216
674e78ac
NK
217static void handle_cursor_update(struct drm_plane *plane,
218 struct drm_plane_state *old_plane_state);
7578ecda 219
dfbbfe3c
BN
220static const struct drm_format_info *
221amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222
e27c41d5 223static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 224static void handle_hpd_rx_irq(void *param);
e27c41d5 225
a85ba005
NC
226static bool
227is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
228 struct drm_crtc_state *new_crtc_state);
4562236b
HW
229/*
230 * dm_vblank_get_counter
231 *
232 * @brief
233 * Get counter for number of vertical blanks
234 *
235 * @param
236 * struct amdgpu_device *adev - [in] desired amdgpu device
237 * int disp_idx - [in] which CRTC to get the counter from
238 *
239 * @return
240 * Counter for vertical blanks
241 */
242static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243{
244 if (crtc >= adev->mode_info.num_crtc)
245 return 0;
246 else {
247 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
248
585d450c 249 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
250 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
251 crtc);
4562236b
HW
252 return 0;
253 }
254
585d450c 255 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
256 }
257}
258
259static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 260 u32 *vbl, u32 *position)
4562236b 261{
81c50963
ST
262 uint32_t v_blank_start, v_blank_end, h_position, v_position;
263
4562236b
HW
264 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265 return -EINVAL;
266 else {
267 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
268
585d450c 269 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
270 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 crtc);
4562236b
HW
272 return 0;
273 }
274
81c50963
ST
275 /*
276 * TODO rework base driver to use values directly.
277 * for now parse it back into reg-format
278 */
585d450c 279 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
280 &v_blank_start,
281 &v_blank_end,
282 &h_position,
283 &v_position);
284
e806208d
AG
285 *position = v_position | (h_position << 16);
286 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
287 }
288
289 return 0;
290}
291
292static bool dm_is_idle(void *handle)
293{
294 /* XXX todo */
295 return true;
296}
297
298static int dm_wait_for_idle(void *handle)
299{
300 /* XXX todo */
301 return 0;
302}
303
304static bool dm_check_soft_reset(void *handle)
305{
306 return false;
307}
308
309static int dm_soft_reset(void *handle)
310{
311 /* XXX todo */
312 return 0;
313}
314
3ee6b26b
AD
315static struct amdgpu_crtc *
316get_crtc_by_otg_inst(struct amdgpu_device *adev,
317 int otg_inst)
4562236b 318{
4a580877 319 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
320 struct drm_crtc *crtc;
321 struct amdgpu_crtc *amdgpu_crtc;
322
bcd74374 323 if (WARN_ON(otg_inst == -1))
4562236b 324 return adev->mode_info.crtcs[0];
4562236b
HW
325
326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
327 amdgpu_crtc = to_amdgpu_crtc(crtc);
328
329 if (amdgpu_crtc->otg_inst == otg_inst)
330 return amdgpu_crtc;
331 }
332
333 return NULL;
334}
335
585d450c
AP
336static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
337{
338 return acrtc->dm_irq_params.freesync_config.state ==
339 VRR_STATE_ACTIVE_VARIABLE ||
340 acrtc->dm_irq_params.freesync_config.state ==
341 VRR_STATE_ACTIVE_FIXED;
342}
343
66b0c973
MK
344static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
345{
346 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
347 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348}
349
a85ba005
NC
350static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
351 struct dm_crtc_state *new_state)
352{
353 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
354 return true;
355 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356 return true;
357 else
358 return false;
359}
360
b8e8c934
HW
361/**
362 * dm_pflip_high_irq() - Handle pageflip interrupt
363 * @interrupt_params: ignored
364 *
365 * Handles the pageflip interrupt by notifying all interested parties
366 * that the pageflip has been completed.
367 */
4562236b
HW
368static void dm_pflip_high_irq(void *interrupt_params)
369{
4562236b
HW
370 struct amdgpu_crtc *amdgpu_crtc;
371 struct common_irq_params *irq_params = interrupt_params;
372 struct amdgpu_device *adev = irq_params->adev;
373 unsigned long flags;
71bbe51a 374 struct drm_pending_vblank_event *e;
71bbe51a
MK
375 uint32_t vpos, hpos, v_blank_start, v_blank_end;
376 bool vrr_active;
4562236b
HW
377
378 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
379
380 /* IRQ could occur when in initial stage */
1f6010a9 381 /* TODO work and BO cleanup */
4562236b 382 if (amdgpu_crtc == NULL) {
cb2318b7 383 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
384 return;
385 }
386
4a580877 387 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
388
389 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 390 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
391 amdgpu_crtc->pflip_status,
392 AMDGPU_FLIP_SUBMITTED,
393 amdgpu_crtc->crtc_id,
394 amdgpu_crtc);
4a580877 395 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
396 return;
397 }
398
71bbe51a
MK
399 /* page flip completed. */
400 e = amdgpu_crtc->event;
401 amdgpu_crtc->event = NULL;
4562236b 402
bcd74374 403 WARN_ON(!e);
1159898a 404
585d450c 405 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
406
407 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
408 if (!vrr_active ||
585d450c 409 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
410 &v_blank_end, &hpos, &vpos) ||
411 (vpos < v_blank_start)) {
412 /* Update to correct count and vblank timestamp if racing with
413 * vblank irq. This also updates to the correct vblank timestamp
414 * even in VRR mode, as scanout is past the front-porch atm.
415 */
416 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 417
71bbe51a
MK
418 /* Wake up userspace by sending the pageflip event with proper
419 * count and timestamp of vblank of flip completion.
420 */
421 if (e) {
422 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
423
424 /* Event sent, so done with vblank for this flip */
425 drm_crtc_vblank_put(&amdgpu_crtc->base);
426 }
427 } else if (e) {
428 /* VRR active and inside front-porch: vblank count and
429 * timestamp for pageflip event will only be up to date after
430 * drm_crtc_handle_vblank() has been executed from late vblank
431 * irq handler after start of back-porch (vline 0). We queue the
432 * pageflip event for send-out by drm_crtc_handle_vblank() with
433 * updated timestamp and count, once it runs after us.
434 *
435 * We need to open-code this instead of using the helper
436 * drm_crtc_arm_vblank_event(), as that helper would
437 * call drm_crtc_accurate_vblank_count(), which we must
438 * not call in VRR mode while we are in front-porch!
439 */
440
441 /* sequence will be replaced by real count during send-out. */
442 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
443 e->pipe = amdgpu_crtc->crtc_id;
444
4a580877 445 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
446 e = NULL;
447 }
4562236b 448
fdd1fe57
MK
449 /* Keep track of vblank of this flip for flip throttling. We use the
450 * cooked hw counter, as that one incremented at start of this vblank
451 * of pageflip completion, so last_flip_vblank is the forbidden count
452 * for queueing new pageflips if vsync + VRR is enabled.
453 */
5d1c59c4 454 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 455 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 456
54f5499a 457 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 458 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 459
cb2318b7
VL
460 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
461 amdgpu_crtc->crtc_id, amdgpu_crtc,
462 vrr_active, (int) !e);
4562236b
HW
463}
464
d2574c33
MK
465static void dm_vupdate_high_irq(void *interrupt_params)
466{
467 struct common_irq_params *irq_params = interrupt_params;
468 struct amdgpu_device *adev = irq_params->adev;
469 struct amdgpu_crtc *acrtc;
47588233
RS
470 struct drm_device *drm_dev;
471 struct drm_vblank_crtc *vblank;
472 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 473 unsigned long flags;
585d450c 474 int vrr_active;
d2574c33
MK
475
476 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477
478 if (acrtc) {
585d450c 479 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
480 drm_dev = acrtc->base.dev;
481 vblank = &drm_dev->vblank[acrtc->base.index];
482 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
483 frame_duration_ns = vblank->time - previous_timestamp;
484
485 if (frame_duration_ns > 0) {
486 trace_amdgpu_refresh_rate_track(acrtc->base.index,
487 frame_duration_ns,
488 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
489 atomic64_set(&irq_params->previous_timestamp, vblank->time);
490 }
d2574c33 491
cb2318b7 492 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 493 acrtc->crtc_id,
585d450c 494 vrr_active);
d2574c33
MK
495
496 /* Core vblank handling is done here after end of front-porch in
497 * vrr mode, as vblank timestamping will give valid results
498 * while now done after front-porch. This will also deliver
499 * page-flip completion events that have been queued to us
500 * if a pageflip happened inside front-porch.
501 */
585d450c 502 if (vrr_active) {
d2574c33 503 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
504
505 /* BTR processing for pre-DCE12 ASICs */
585d450c 506 if (acrtc->dm_irq_params.stream &&
09aef2c4 507 adev->family < AMDGPU_FAMILY_AI) {
4a580877 508 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
509 mod_freesync_handle_v_update(
510 adev->dm.freesync_module,
585d450c
AP
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
513
514 dc_stream_adjust_vmin_vmax(
515 adev->dm.dc,
585d450c
AP
516 acrtc->dm_irq_params.stream,
517 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 518 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
519 }
520 }
d2574c33
MK
521 }
522}
523
b8e8c934
HW
524/**
525 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 526 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
527 *
528 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529 * event handler.
530 */
4562236b
HW
531static void dm_crtc_high_irq(void *interrupt_params)
532{
533 struct common_irq_params *irq_params = interrupt_params;
534 struct amdgpu_device *adev = irq_params->adev;
4562236b 535 struct amdgpu_crtc *acrtc;
09aef2c4 536 unsigned long flags;
585d450c 537 int vrr_active;
4562236b 538
b57de80a 539 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
540 if (!acrtc)
541 return;
542
585d450c 543 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 544
cb2318b7 545 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 546 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 547
2346ef47
NK
548 /**
549 * Core vblank handling at start of front-porch is only possible
550 * in non-vrr mode, as only there vblank timestamping will give
551 * valid results while done in front-porch. Otherwise defer it
552 * to dm_vupdate_high_irq after end of front-porch.
553 */
585d450c 554 if (!vrr_active)
2346ef47
NK
555 drm_crtc_handle_vblank(&acrtc->base);
556
557 /**
558 * Following stuff must happen at start of vblank, for crc
559 * computation and below-the-range btr support in vrr mode.
560 */
16f17eda 561 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
562
563 /* BTR updates need to happen before VUPDATE on Vega and above. */
564 if (adev->family < AMDGPU_FAMILY_AI)
565 return;
16f17eda 566
4a580877 567 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 568
585d450c
AP
569 if (acrtc->dm_irq_params.stream &&
570 acrtc->dm_irq_params.vrr_params.supported &&
571 acrtc->dm_irq_params.freesync_config.state ==
572 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 573 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
574 acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params);
16f17eda 576
585d450c
AP
577 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
578 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
579 }
580
2b5aed9a
MK
581 /*
582 * If there aren't any active_planes then DCH HUBP may be clock-gated.
583 * In that case, pageflip completion interrupts won't fire and pageflip
584 * completion events won't get delivered. Prevent this by sending
585 * pending pageflip events from here if a flip is still pending.
586 *
587 * If any planes are enabled, use dm_pflip_high_irq() instead, to
588 * avoid race conditions between flip programming and completion,
589 * which could cause too early flip completion events.
590 */
2346ef47
NK
591 if (adev->family >= AMDGPU_FAMILY_RV &&
592 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 593 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
594 if (acrtc->event) {
595 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
596 acrtc->event = NULL;
597 drm_crtc_vblank_put(&acrtc->base);
598 }
599 acrtc->pflip_status = AMDGPU_FLIP_NONE;
600 }
601
4a580877 602 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
603}
604
9e1178ef 605#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
606/**
607 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608 * DCN generation ASICs
48e01bf4 609 * @interrupt_params: interrupt parameters
86bc2219
WL
610 *
611 * Used to set crc window/read out crc value at vertical line 0 position
612 */
86bc2219
WL
613static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614{
615 struct common_irq_params *irq_params = interrupt_params;
616 struct amdgpu_device *adev = irq_params->adev;
617 struct amdgpu_crtc *acrtc;
618
619 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620
621 if (!acrtc)
622 return;
623
624 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625}
433e5dec 626#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 627
e27c41d5 628/**
03f2abb0 629 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
e27c41d5
JS
630 * @adev: amdgpu_device pointer
631 * @notify: dmub notification structure
632 *
633 * Dmub AUX or SET_CONFIG command completion processing callback
634 * Copies dmub notification to DM which is to be read by AUX command.
635 * issuing thread and also signals the event to wake up the thread.
636 */
240e6d25
IB
637static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638 struct dmub_notification *notify)
e27c41d5
JS
639{
640 if (adev->dm.dmub_notify)
641 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643 complete(&adev->dm.dmub_aux_transfer_done);
644}
645
646/**
647 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648 * @adev: amdgpu_device pointer
649 * @notify: dmub notification structure
650 *
651 * Dmub Hpd interrupt processing callback. Gets displayindex through the
652 * ink index and calls helper to do the processing.
653 */
240e6d25
IB
654static void dmub_hpd_callback(struct amdgpu_device *adev,
655 struct dmub_notification *notify)
e27c41d5
JS
656{
657 struct amdgpu_dm_connector *aconnector;
f6e03f80 658 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
659 struct drm_connector *connector;
660 struct drm_connector_list_iter iter;
661 struct dc_link *link;
662 uint8_t link_index = 0;
978ffac8 663 struct drm_device *dev;
e27c41d5
JS
664
665 if (adev == NULL)
666 return;
667
668 if (notify == NULL) {
669 DRM_ERROR("DMUB HPD callback notification was NULL");
670 return;
671 }
672
673 if (notify->link_index > adev->dm.dc->link_count) {
674 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675 return;
676 }
677
e27c41d5 678 link_index = notify->link_index;
e27c41d5 679 link = adev->dm.dc->links[link_index];
978ffac8 680 dev = adev->dm.ddev;
e27c41d5
JS
681
682 drm_connector_list_iter_begin(dev, &iter);
683 drm_for_each_connector_iter(connector, &iter) {
684 aconnector = to_amdgpu_dm_connector(connector);
685 if (link && aconnector->dc_link == link) {
686 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 687 hpd_aconnector = aconnector;
e27c41d5
JS
688 break;
689 }
690 }
691 drm_connector_list_iter_end(&iter);
e27c41d5 692
c40a09e5
NK
693 if (hpd_aconnector) {
694 if (notify->type == DMUB_NOTIFICATION_HPD)
695 handle_hpd_irq_helper(hpd_aconnector);
696 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697 handle_hpd_rx_irq(hpd_aconnector);
698 }
e27c41d5
JS
699}
700
701/**
702 * register_dmub_notify_callback - Sets callback for DMUB notify
703 * @adev: amdgpu_device pointer
704 * @type: Type of dmub notification
705 * @callback: Dmub interrupt callback function
706 * @dmub_int_thread_offload: offload indicator
707 *
708 * API to register a dmub callback handler for a dmub notification
709 * Also sets indicator whether callback processing to be offloaded.
710 * to dmub interrupt handling thread
711 * Return: true if successfully registered, false if there is existing registration
712 */
240e6d25
IB
713static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714 enum dmub_notification_type type,
715 dmub_notify_interrupt_callback_t callback,
716 bool dmub_int_thread_offload)
e27c41d5
JS
717{
718 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719 adev->dm.dmub_callback[type] = callback;
720 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721 } else
722 return false;
723
724 return true;
725}
726
727static void dm_handle_hpd_work(struct work_struct *work)
728{
729 struct dmub_hpd_work *dmub_hpd_wrk;
730
731 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732
733 if (!dmub_hpd_wrk->dmub_notify) {
734 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735 return;
736 }
737
738 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740 dmub_hpd_wrk->dmub_notify);
741 }
094b21c1
JS
742
743 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
744 kfree(dmub_hpd_wrk);
745
746}
747
e25515e2 748#define DMUB_TRACE_MAX_READ 64
81927e28
JS
749/**
750 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751 * @interrupt_params: used for determining the Outbox instance
752 *
753 * Handles the Outbox Interrupt
754 * event handler.
755 */
81927e28
JS
756static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757{
758 struct dmub_notification notify;
759 struct common_irq_params *irq_params = interrupt_params;
760 struct amdgpu_device *adev = irq_params->adev;
761 struct amdgpu_display_manager *dm = &adev->dm;
762 struct dmcub_trace_buf_entry entry = { 0 };
763 uint32_t count = 0;
e27c41d5 764 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 765 struct dc_link *plink = NULL;
81927e28 766
f6e03f80
JS
767 if (dc_enable_dmub_notifications(adev->dm.dc) &&
768 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 769
f6e03f80
JS
770 do {
771 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
a35faec3 772 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
f6e03f80
JS
773 DRM_ERROR("DM: notify type %d invalid!", notify.type);
774 continue;
775 }
c40a09e5
NK
776 if (!dm->dmub_callback[notify.type]) {
777 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778 continue;
779 }
f6e03f80 780 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
781 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782 if (!dmub_hpd_wrk) {
783 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784 return;
785 }
786 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787 if (!dmub_hpd_wrk->dmub_notify) {
788 kfree(dmub_hpd_wrk);
789 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790 return;
791 }
792 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793 if (dmub_hpd_wrk->dmub_notify)
794 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
795 dmub_hpd_wrk->adev = adev;
796 if (notify.type == DMUB_NOTIFICATION_HPD) {
797 plink = adev->dm.dc->links[notify.link_index];
798 if (plink) {
799 plink->hpd_status =
b97788e5 800 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 801 }
e27c41d5 802 }
f6e03f80
JS
803 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804 } else {
805 dm->dmub_callback[notify.type](adev, &notify);
806 }
807 } while (notify.pending_notification);
81927e28
JS
808 }
809
810
811 do {
812 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814 entry.param0, entry.param1);
815
816 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818 } else
819 break;
820
821 count++;
822
823 } while (count <= DMUB_TRACE_MAX_READ);
824
f6e03f80
JS
825 if (count > DMUB_TRACE_MAX_READ)
826 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 827}
86bc2219 828
4562236b
HW
829static int dm_set_clockgating_state(void *handle,
830 enum amd_clockgating_state state)
831{
832 return 0;
833}
834
835static int dm_set_powergating_state(void *handle,
836 enum amd_powergating_state state)
837{
838 return 0;
839}
840
841/* Prototypes of private functions */
842static int dm_early_init(void* handle);
843
a32e24b4 844/* Allocate memory for FBC compressed data */
3e332d3a 845static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 846{
3e332d3a 847 struct drm_device *dev = connector->dev;
1348969a 848 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 849 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
850 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
851 struct drm_display_mode *mode;
42e67c3b
RL
852 unsigned long max_size = 0;
853
854 if (adev->dm.dc->fbc_compressor == NULL)
855 return;
a32e24b4 856
3e332d3a 857 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
858 return;
859
3e332d3a
RL
860 if (compressor->bo_ptr)
861 return;
42e67c3b 862
42e67c3b 863
3e332d3a
RL
864 list_for_each_entry(mode, &connector->modes, head) {
865 if (max_size < mode->htotal * mode->vtotal)
866 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
867 }
868
869 if (max_size) {
870 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 871 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 872 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
873
874 if (r)
42e67c3b
RL
875 DRM_ERROR("DM: Failed to initialize FBC\n");
876 else {
877 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
878 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879 }
880
a32e24b4
RL
881 }
882
883}
a32e24b4 884
6ce8f316
NK
885static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
886 int pipe, bool *enabled,
887 unsigned char *buf, int max_bytes)
888{
889 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 890 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
891 struct drm_connector *connector;
892 struct drm_connector_list_iter conn_iter;
893 struct amdgpu_dm_connector *aconnector;
894 int ret = 0;
895
896 *enabled = false;
897
898 mutex_lock(&adev->dm.audio_lock);
899
900 drm_connector_list_iter_begin(dev, &conn_iter);
901 drm_for_each_connector_iter(connector, &conn_iter) {
902 aconnector = to_amdgpu_dm_connector(connector);
903 if (aconnector->audio_inst != port)
904 continue;
905
906 *enabled = true;
907 ret = drm_eld_size(connector->eld);
908 memcpy(buf, connector->eld, min(max_bytes, ret));
909
910 break;
911 }
912 drm_connector_list_iter_end(&conn_iter);
913
914 mutex_unlock(&adev->dm.audio_lock);
915
916 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
917
918 return ret;
919}
920
921static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
922 .get_eld = amdgpu_dm_audio_component_get_eld,
923};
924
925static int amdgpu_dm_audio_component_bind(struct device *kdev,
926 struct device *hda_kdev, void *data)
927{
928 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 929 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
930 struct drm_audio_component *acomp = data;
931
932 acomp->ops = &amdgpu_dm_audio_component_ops;
933 acomp->dev = kdev;
934 adev->dm.audio_component = acomp;
935
936 return 0;
937}
938
939static void amdgpu_dm_audio_component_unbind(struct device *kdev,
940 struct device *hda_kdev, void *data)
941{
942 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 943 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
944 struct drm_audio_component *acomp = data;
945
946 acomp->ops = NULL;
947 acomp->dev = NULL;
948 adev->dm.audio_component = NULL;
949}
950
951static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
952 .bind = amdgpu_dm_audio_component_bind,
953 .unbind = amdgpu_dm_audio_component_unbind,
954};
955
956static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957{
958 int i, ret;
959
960 if (!amdgpu_audio)
961 return 0;
962
963 adev->mode_info.audio.enabled = true;
964
965 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966
967 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
968 adev->mode_info.audio.pin[i].channels = -1;
969 adev->mode_info.audio.pin[i].rate = -1;
970 adev->mode_info.audio.pin[i].bits_per_sample = -1;
971 adev->mode_info.audio.pin[i].status_bits = 0;
972 adev->mode_info.audio.pin[i].category_code = 0;
973 adev->mode_info.audio.pin[i].connected = false;
974 adev->mode_info.audio.pin[i].id =
975 adev->dm.dc->res_pool->audios[i]->inst;
976 adev->mode_info.audio.pin[i].offset = 0;
977 }
978
979 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
980 if (ret < 0)
981 return ret;
982
983 adev->dm.audio_registered = true;
984
985 return 0;
986}
987
988static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
989{
990 if (!amdgpu_audio)
991 return;
992
993 if (!adev->mode_info.audio.enabled)
994 return;
995
996 if (adev->dm.audio_registered) {
997 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
998 adev->dm.audio_registered = false;
999 }
1000
1001 /* TODO: Disable audio? */
1002
1003 adev->mode_info.audio.enabled = false;
1004}
1005
dfd84d90 1006static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1007{
1008 struct drm_audio_component *acomp = adev->dm.audio_component;
1009
1010 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1011 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012
1013 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1014 pin, -1);
1015 }
1016}
1017
743b9786
NK
1018static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019{
743b9786
NK
1020 const struct dmcub_firmware_header_v1_0 *hdr;
1021 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1022 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1023 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1024 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1025 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1026 struct dmub_srv_hw_params hw_params;
1027 enum dmub_status status;
1028 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1029 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1030 bool has_hw_support;
1031
1032 if (!dmub_srv)
1033 /* DMUB isn't supported on the ASIC. */
1034 return 0;
1035
8c7aea40
NK
1036 if (!fb_info) {
1037 DRM_ERROR("No framebuffer info for DMUB service.\n");
1038 return -EINVAL;
1039 }
1040
743b9786
NK
1041 if (!dmub_fw) {
1042 /* Firmware required for DMUB support. */
1043 DRM_ERROR("No firmware provided for DMUB.\n");
1044 return -EINVAL;
1045 }
1046
1047 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048 if (status != DMUB_STATUS_OK) {
1049 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050 return -EINVAL;
1051 }
1052
1053 if (!has_hw_support) {
1054 DRM_INFO("DMUB unsupported on ASIC\n");
1055 return 0;
1056 }
1057
47e62dbd
NK
1058 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059 status = dmub_srv_hw_reset(dmub_srv);
1060 if (status != DMUB_STATUS_OK)
1061 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062
743b9786
NK
1063 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064
743b9786
NK
1065 fw_inst_const = dmub_fw->data +
1066 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1067 PSP_HEADER_BYTES;
743b9786
NK
1068
1069 fw_bss_data = dmub_fw->data +
1070 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071 le32_to_cpu(hdr->inst_const_bytes);
1072
1073 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1074 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076
1077 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078
ddde28a5
HW
1079 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080 * amdgpu_ucode_init_single_fw will load dmub firmware
1081 * fw_inst_const part to cw0; otherwise, the firmware back door load
1082 * will be done by dm_dmub_hw_init
1083 */
1084 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086 fw_inst_const_size);
1087 }
1088
a576b345
NK
1089 if (fw_bss_data_size)
1090 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1092
1093 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1094 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095 adev->bios_size);
1096
1097 /* Reset regions that need to be reset. */
1098 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100
1101 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103
1104 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1106
1107 /* Initialize hardware. */
1108 memset(&hw_params, 0, sizeof(hw_params));
1109 hw_params.fb_base = adev->gmc.fb_start;
1110 hw_params.fb_offset = adev->gmc.aper_base;
1111
31a7f4bb
HW
1112 /* backdoor load firmware and trigger dmub running */
1113 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114 hw_params.load_inst_const = true;
1115
743b9786
NK
1116 if (dmcu)
1117 hw_params.psp_version = dmcu->psp_version;
1118
8c7aea40
NK
1119 for (i = 0; i < fb_info->num_fb; ++i)
1120 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1121
3b36f50d
TH
1122 switch (adev->ip_versions[DCE_HWIP][0]) {
1123 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1124 hw_params.dpia_supported = true;
7367540b 1125 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
5b109397
JS
1126 break;
1127 default:
1128 break;
1129 }
1130
743b9786
NK
1131 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132 if (status != DMUB_STATUS_OK) {
1133 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134 return -EINVAL;
1135 }
1136
1137 /* Wait for firmware load to finish. */
1138 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139 if (status != DMUB_STATUS_OK)
1140 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141
1142 /* Init DMCU and ABM if available. */
1143 if (dmcu && abm) {
1144 dmcu->funcs->dmcu_init(dmcu);
1145 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146 }
1147
051b7887
RL
1148 if (!adev->dm.dc->ctx->dmub_srv)
1149 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1150 if (!adev->dm.dc->ctx->dmub_srv) {
1151 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152 return -ENOMEM;
1153 }
1154
743b9786
NK
1155 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156 adev->dm.dmcub_fw_version);
1157
1158 return 0;
1159}
1160
79d6b935
NK
1161static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1162{
1163 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1164 enum dmub_status status;
1165 bool init;
1166
1167 if (!dmub_srv) {
1168 /* DMUB isn't supported on the ASIC. */
1169 return;
1170 }
1171
1172 status = dmub_srv_is_hw_init(dmub_srv, &init);
1173 if (status != DMUB_STATUS_OK)
1174 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1175
1176 if (status == DMUB_STATUS_OK && init) {
1177 /* Wait for firmware load to finish. */
1178 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1179 if (status != DMUB_STATUS_OK)
1180 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1181 } else {
1182 /* Perform the full hardware initialization. */
1183 dm_dmub_hw_init(adev);
1184 }
1185}
1186
c0fb85ae 1187static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1188{
c0fb85ae
YZ
1189 uint64_t pt_base;
1190 uint32_t logical_addr_low;
1191 uint32_t logical_addr_high;
1192 uint32_t agp_base, agp_bot, agp_top;
1193 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1194
a0f884f5
NK
1195 memset(pa_config, 0, sizeof(*pa_config));
1196
c0fb85ae
YZ
1197 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1198 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1199
c0fb85ae
YZ
1200 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1201 /*
1202 * Raven2 has a HW issue that it is unable to use the vram which
1203 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1204 * workaround that increase system aperture high address (add 1)
1205 * to get rid of the VM fault and hardware hang.
1206 */
1207 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1208 else
1209 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1210
c0fb85ae
YZ
1211 agp_base = 0;
1212 agp_bot = adev->gmc.agp_start >> 24;
1213 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1214
c44a22b3 1215
c0fb85ae
YZ
1216 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1217 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1218 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1219 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1220 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1221 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1222
c0fb85ae
YZ
1223 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1224 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1225
1226 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1227 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1228 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1229
1230 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1231 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1232 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1233
1234 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1235 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1236 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1237
1238 pa_config->is_hvm_enabled = 0;
c44a22b3 1239
c44a22b3 1240}
cae5c1ab 1241
09a5df6c 1242static void vblank_control_worker(struct work_struct *work)
ea3b4242 1243{
09a5df6c
NK
1244 struct vblank_control_work *vblank_work =
1245 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1246 struct amdgpu_display_manager *dm = vblank_work->dm;
1247
1248 mutex_lock(&dm->dc_lock);
1249
1250 if (vblank_work->enable)
1251 dm->active_vblank_irq_count++;
5af50b0b 1252 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1253 dm->active_vblank_irq_count--;
1254
2cbcb78c 1255 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1256
4711c033 1257 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1258
58aa1c50
NK
1259 /* Control PSR based on vblank requirements from OS */
1260 if (vblank_work->stream && vblank_work->stream->link) {
1261 if (vblank_work->enable) {
1262 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1263 amdgpu_dm_psr_disable(vblank_work->stream);
1264 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1265 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1266 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1267 amdgpu_dm_psr_enable(vblank_work->stream);
1268 }
1269 }
1270
ea3b4242 1271 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1272
1273 dc_stream_release(vblank_work->stream);
1274
09a5df6c 1275 kfree(vblank_work);
ea3b4242
QZ
1276}
1277
8e794421
WL
1278static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1279{
1280 struct hpd_rx_irq_offload_work *offload_work;
1281 struct amdgpu_dm_connector *aconnector;
1282 struct dc_link *dc_link;
1283 struct amdgpu_device *adev;
1284 enum dc_connection_type new_connection_type = dc_connection_none;
1285 unsigned long flags;
1286
1287 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1288 aconnector = offload_work->offload_wq->aconnector;
1289
1290 if (!aconnector) {
1291 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1292 goto skip;
1293 }
1294
1295 adev = drm_to_adev(aconnector->base.dev);
1296 dc_link = aconnector->dc_link;
1297
1298 mutex_lock(&aconnector->hpd_lock);
1299 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1300 DRM_ERROR("KMS: Failed to detect connector\n");
1301 mutex_unlock(&aconnector->hpd_lock);
1302
1303 if (new_connection_type == dc_connection_none)
1304 goto skip;
1305
1306 if (amdgpu_in_reset(adev))
1307 goto skip;
1308
1309 mutex_lock(&adev->dm.dc_lock);
1310 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1311 dc_link_dp_handle_automated_test(dc_link);
1312 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1313 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1314 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1315 dc_link_dp_handle_link_loss(dc_link);
1316 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1317 offload_work->offload_wq->is_handling_link_loss = false;
1318 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1319 }
1320 mutex_unlock(&adev->dm.dc_lock);
1321
1322skip:
1323 kfree(offload_work);
1324
1325}
1326
1327static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1328{
1329 int max_caps = dc->caps.max_links;
1330 int i = 0;
1331 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1332
1333 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1334
1335 if (!hpd_rx_offload_wq)
1336 return NULL;
1337
1338
1339 for (i = 0; i < max_caps; i++) {
1340 hpd_rx_offload_wq[i].wq =
1341 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1342
1343 if (hpd_rx_offload_wq[i].wq == NULL) {
1344 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1345 return NULL;
1346 }
1347
1348 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1349 }
1350
1351 return hpd_rx_offload_wq;
1352}
1353
3ce51649
AD
1354struct amdgpu_stutter_quirk {
1355 u16 chip_vendor;
1356 u16 chip_device;
1357 u16 subsys_vendor;
1358 u16 subsys_device;
1359 u8 revision;
1360};
1361
1362static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1363 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1364 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1365 { 0, 0, 0, 0, 0 },
1366};
1367
1368static bool dm_should_disable_stutter(struct pci_dev *pdev)
1369{
1370 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1371
1372 while (p && p->chip_device != 0) {
1373 if (pdev->vendor == p->chip_vendor &&
1374 pdev->device == p->chip_device &&
1375 pdev->subsystem_vendor == p->subsys_vendor &&
1376 pdev->subsystem_device == p->subsys_device &&
1377 pdev->revision == p->revision) {
1378 return true;
1379 }
1380 ++p;
1381 }
1382 return false;
1383}
1384
7578ecda 1385static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1386{
1387 struct dc_init_data init_data;
52704fca
BL
1388#ifdef CONFIG_DRM_AMD_DC_HDCP
1389 struct dc_callback_init init_params;
1390#endif
743b9786 1391 int r;
52704fca 1392
4a580877 1393 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1394 adev->dm.adev = adev;
1395
4562236b
HW
1396 /* Zero all the fields */
1397 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1398#ifdef CONFIG_DRM_AMD_DC_HDCP
1399 memset(&init_params, 0, sizeof(init_params));
1400#endif
4562236b 1401
674e78ac 1402 mutex_init(&adev->dm.dc_lock);
6ce8f316 1403 mutex_init(&adev->dm.audio_lock);
ea3b4242 1404 spin_lock_init(&adev->dm.vblank_lock);
674e78ac 1405
4562236b
HW
1406 if(amdgpu_dm_irq_init(adev)) {
1407 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1408 goto error;
1409 }
1410
1411 init_data.asic_id.chip_family = adev->family;
1412
2dc31ca1 1413 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1414 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1415 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1416
770d13b1 1417 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1418 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1419 init_data.asic_id.atombios_base_address =
1420 adev->mode_info.atom_context->bios;
1421
1422 init_data.driver = adev;
1423
1424 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1425
1426 if (!adev->dm.cgs_device) {
1427 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1428 goto error;
1429 }
1430
1431 init_data.cgs_device = adev->dm.cgs_device;
1432
4562236b
HW
1433 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1434
fd546bc5
AD
1435 switch (adev->ip_versions[DCE_HWIP][0]) {
1436 case IP_VERSION(2, 1, 0):
1437 switch (adev->dm.dmcub_fw_version) {
1438 case 0: /* development */
1439 case 0x1: /* linux-firmware.git hash 6d9f399 */
1440 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1441 init_data.flags.disable_dmcu = false;
1442 break;
1443 default:
1444 init_data.flags.disable_dmcu = true;
1445 }
1446 break;
1447 case IP_VERSION(2, 0, 3):
1448 init_data.flags.disable_dmcu = true;
1449 break;
1450 default:
1451 break;
1452 }
1453
60fb100b
AD
1454 switch (adev->asic_type) {
1455 case CHIP_CARRIZO:
1456 case CHIP_STONEY:
1ebcaebd
NK
1457 init_data.flags.gpu_vm_support = true;
1458 break;
60fb100b 1459 default:
1d789535 1460 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
1461 case IP_VERSION(1, 0, 0):
1462 case IP_VERSION(1, 0, 1):
a7f520bf
AD
1463 /* enable S/G on PCO and RV2 */
1464 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1465 (adev->apu_flags & AMD_APU_IS_PICASSO))
1466 init_data.flags.gpu_vm_support = true;
1467 break;
fd546bc5 1468 case IP_VERSION(2, 1, 0):
c08182f2
AD
1469 case IP_VERSION(3, 0, 1):
1470 case IP_VERSION(3, 1, 2):
1471 case IP_VERSION(3, 1, 3):
b5b8ed44 1472 case IP_VERSION(3, 1, 5):
0fe382fb 1473 case IP_VERSION(3, 1, 6):
c08182f2
AD
1474 init_data.flags.gpu_vm_support = true;
1475 break;
c08182f2
AD
1476 default:
1477 break;
1478 }
60fb100b
AD
1479 break;
1480 }
6e227308 1481
a7f520bf
AD
1482 if (init_data.flags.gpu_vm_support)
1483 adev->mode_info.gpu_vm_support = true;
1484
04b94af4
AD
1485 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1486 init_data.flags.fbc_support = true;
1487
d99f38ae
AD
1488 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1489 init_data.flags.multi_mon_pp_mclk_switch = true;
1490
eaf56410
LL
1491 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1492 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1493
1494 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1495 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1496
12320274
AP
1497 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1498 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1499 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1500 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
12320274 1501
7aba117a 1502 init_data.flags.seamless_boot_edp_requested = false;
78ad75f8 1503
1edf5ae1 1504 if (check_seamless_boot_capability(adev)) {
7aba117a 1505 init_data.flags.seamless_boot_edp_requested = true;
1edf5ae1
ZL
1506 init_data.flags.allow_seamless_boot_optimization = true;
1507 DRM_INFO("Seamless boot condition check passed\n");
1508 }
1509
a8201902
LM
1510 init_data.flags.enable_mipi_converter_optimization = true;
1511
0dd79532 1512 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1513 /* Display Core create. */
1514 adev->dm.dc = dc_create(&init_data);
1515
423788c7 1516 if (adev->dm.dc) {
76121231 1517 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1518 } else {
76121231 1519 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1520 goto error;
1521 }
4562236b 1522
8a791dab
HW
1523 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1524 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1525 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1526 }
1527
f99d8762
HW
1528 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1529 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1530 if (dm_should_disable_stutter(adev->pdev))
1531 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1532
8a791dab
HW
1533 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1534 adev->dm.dc->debug.disable_stutter = true;
1535
2665f63a 1536 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1537 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1538 adev->dm.dc->debug.disable_dsc_edp = true;
1539 }
8a791dab
HW
1540
1541 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1542 adev->dm.dc->debug.disable_clock_gate = true;
1543
743b9786
NK
1544 r = dm_dmub_hw_init(adev);
1545 if (r) {
1546 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1547 goto error;
1548 }
1549
bb6785c1
NK
1550 dc_hardware_init(adev->dm.dc);
1551
8e794421
WL
1552 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1553 if (!adev->dm.hpd_rx_offload_wq) {
1554 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1555 goto error;
1556 }
1557
3ca001af 1558 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1559 struct dc_phy_addr_space_config pa_config;
1560
0b08c54b 1561 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1562
0b08c54b
YZ
1563 // Call the DC init_memory func
1564 dc_setup_system_context(adev->dm.dc, &pa_config);
1565 }
c0fb85ae 1566
4562236b
HW
1567 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1568 if (!adev->dm.freesync_module) {
1569 DRM_ERROR(
1570 "amdgpu: failed to initialize freesync_module.\n");
1571 } else
f1ad2f5e 1572 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1573 adev->dm.freesync_module);
1574
e277adc5
LSL
1575 amdgpu_dm_init_color_mod();
1576
ea3b4242 1577 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1578 adev->dm.vblank_control_workqueue =
1579 create_singlethread_workqueue("dm_vblank_control_workqueue");
1580 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1581 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242 1582 }
ea3b4242 1583
52704fca 1584#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1585 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1586 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1587
96a3b32e
BL
1588 if (!adev->dm.hdcp_workqueue)
1589 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1590 else
1591 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1592
96a3b32e
BL
1593 dc_init_callbacks(adev->dm.dc, &init_params);
1594 }
9a65df19
WL
1595#endif
1596#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1597 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1598#endif
81927e28
JS
1599 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1600 init_completion(&adev->dm.dmub_aux_transfer_done);
1601 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1602 if (!adev->dm.dmub_notify) {
1603 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1604 goto error;
1605 }
e27c41d5
JS
1606
1607 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1608 if (!adev->dm.delayed_hpd_wq) {
1609 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1610 goto error;
1611 }
1612
81927e28 1613 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1614 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1615 dmub_aux_setconfig_callback, false)) {
1616 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1617 goto error;
1618 }
1619 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1620 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1621 goto error;
1622 }
c40a09e5
NK
1623 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1624 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1625 goto error;
1626 }
81927e28
JS
1627 }
1628
4562236b
HW
1629 if (amdgpu_dm_initialize_drm_device(adev)) {
1630 DRM_ERROR(
1631 "amdgpu: failed to initialize sw for display support.\n");
1632 goto error;
1633 }
1634
f74367e4
AD
1635 /* create fake encoders for MST */
1636 dm_dp_create_fake_mst_encoders(adev);
1637
4562236b
HW
1638 /* TODO: Add_display_info? */
1639
1640 /* TODO use dynamic cursor width */
4a580877
LT
1641 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1642 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1643
4a580877 1644 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1645 DRM_ERROR(
1646 "amdgpu: failed to initialize sw for display support.\n");
1647 goto error;
1648 }
1649
c0fb85ae 1650
f1ad2f5e 1651 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1652
1653 return 0;
1654error:
1655 amdgpu_dm_fini(adev);
1656
59d0f396 1657 return -EINVAL;
4562236b
HW
1658}
1659
e9669fb7
AG
1660static int amdgpu_dm_early_fini(void *handle)
1661{
1662 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1663
1664 amdgpu_dm_audio_fini(adev);
1665
1666 return 0;
1667}
1668
7578ecda 1669static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1670{
f74367e4
AD
1671 int i;
1672
09a5df6c
NK
1673 if (adev->dm.vblank_control_workqueue) {
1674 destroy_workqueue(adev->dm.vblank_control_workqueue);
1675 adev->dm.vblank_control_workqueue = NULL;
1676 }
09a5df6c 1677
f74367e4
AD
1678 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1679 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1680 }
1681
4562236b 1682 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1683
9a65df19
WL
1684#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685 if (adev->dm.crc_rd_wrk) {
1686 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1687 kfree(adev->dm.crc_rd_wrk);
1688 adev->dm.crc_rd_wrk = NULL;
1689 }
1690#endif
52704fca
BL
1691#ifdef CONFIG_DRM_AMD_DC_HDCP
1692 if (adev->dm.hdcp_workqueue) {
e96b1b29 1693 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1694 adev->dm.hdcp_workqueue = NULL;
1695 }
1696
1697 if (adev->dm.dc)
1698 dc_deinit_callbacks(adev->dm.dc);
1699#endif
51ba6912 1700
3beac533 1701 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1702
81927e28
JS
1703 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1704 kfree(adev->dm.dmub_notify);
1705 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1706 destroy_workqueue(adev->dm.delayed_hpd_wq);
1707 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1708 }
1709
743b9786
NK
1710 if (adev->dm.dmub_bo)
1711 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1712 &adev->dm.dmub_bo_gpu_addr,
1713 &adev->dm.dmub_bo_cpu_addr);
52704fca 1714
006c26a0
AG
1715 if (adev->dm.hpd_rx_offload_wq) {
1716 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1717 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1718 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1719 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1720 }
1721 }
1722
1723 kfree(adev->dm.hpd_rx_offload_wq);
1724 adev->dm.hpd_rx_offload_wq = NULL;
1725 }
1726
c8bdf2b6
ED
1727 /* DC Destroy TODO: Replace destroy DAL */
1728 if (adev->dm.dc)
1729 dc_destroy(&adev->dm.dc);
4562236b
HW
1730 /*
1731 * TODO: pageflip, vlank interrupt
1732 *
1733 * amdgpu_dm_irq_fini(adev);
1734 */
1735
1736 if (adev->dm.cgs_device) {
1737 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1738 adev->dm.cgs_device = NULL;
1739 }
1740 if (adev->dm.freesync_module) {
1741 mod_freesync_destroy(adev->dm.freesync_module);
1742 adev->dm.freesync_module = NULL;
1743 }
674e78ac 1744
6ce8f316 1745 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1746 mutex_destroy(&adev->dm.dc_lock);
1747
4562236b
HW
1748 return;
1749}
1750
a94d5569 1751static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1752{
a7669aff 1753 const char *fw_name_dmcu = NULL;
a94d5569
DF
1754 int r;
1755 const struct dmcu_firmware_header_v1_0 *hdr;
1756
1757 switch(adev->asic_type) {
55e56389
MR
1758#if defined(CONFIG_DRM_AMD_DC_SI)
1759 case CHIP_TAHITI:
1760 case CHIP_PITCAIRN:
1761 case CHIP_VERDE:
1762 case CHIP_OLAND:
1763#endif
a94d5569
DF
1764 case CHIP_BONAIRE:
1765 case CHIP_HAWAII:
1766 case CHIP_KAVERI:
1767 case CHIP_KABINI:
1768 case CHIP_MULLINS:
1769 case CHIP_TONGA:
1770 case CHIP_FIJI:
1771 case CHIP_CARRIZO:
1772 case CHIP_STONEY:
1773 case CHIP_POLARIS11:
1774 case CHIP_POLARIS10:
1775 case CHIP_POLARIS12:
1776 case CHIP_VEGAM:
1777 case CHIP_VEGA10:
1778 case CHIP_VEGA12:
1779 case CHIP_VEGA20:
1780 return 0;
5ea23931
RL
1781 case CHIP_NAVI12:
1782 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1783 break;
a94d5569 1784 case CHIP_RAVEN:
a7669aff
HW
1785 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1786 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1788 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1789 else
a7669aff 1790 return 0;
a94d5569
DF
1791 break;
1792 default:
1d789535 1793 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1794 case IP_VERSION(2, 0, 2):
1795 case IP_VERSION(2, 0, 3):
1796 case IP_VERSION(2, 0, 0):
1797 case IP_VERSION(2, 1, 0):
1798 case IP_VERSION(3, 0, 0):
1799 case IP_VERSION(3, 0, 2):
1800 case IP_VERSION(3, 0, 3):
1801 case IP_VERSION(3, 0, 1):
1802 case IP_VERSION(3, 1, 2):
1803 case IP_VERSION(3, 1, 3):
b5b8ed44 1804 case IP_VERSION(3, 1, 5):
de7cc1b4 1805 case IP_VERSION(3, 1, 6):
c08182f2
AD
1806 return 0;
1807 default:
1808 break;
1809 }
a94d5569 1810 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1811 return -EINVAL;
a94d5569
DF
1812 }
1813
1814 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1815 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1816 return 0;
1817 }
1818
1819 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1820 if (r == -ENOENT) {
1821 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1822 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1823 adev->dm.fw_dmcu = NULL;
1824 return 0;
1825 }
1826 if (r) {
1827 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1828 fw_name_dmcu);
1829 return r;
1830 }
1831
1832 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1833 if (r) {
1834 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1835 fw_name_dmcu);
1836 release_firmware(adev->dm.fw_dmcu);
1837 adev->dm.fw_dmcu = NULL;
1838 return r;
1839 }
1840
1841 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1842 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1843 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1844 adev->firmware.fw_size +=
1845 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1846
1847 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1848 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1849 adev->firmware.fw_size +=
1850 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1851
ee6e89c0
DF
1852 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1853
a94d5569
DF
1854 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1855
4562236b
HW
1856 return 0;
1857}
1858
743b9786
NK
1859static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1860{
1861 struct amdgpu_device *adev = ctx;
1862
1863 return dm_read_reg(adev->dm.dc->ctx, address);
1864}
1865
1866static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1867 uint32_t value)
1868{
1869 struct amdgpu_device *adev = ctx;
1870
1871 return dm_write_reg(adev->dm.dc->ctx, address, value);
1872}
1873
1874static int dm_dmub_sw_init(struct amdgpu_device *adev)
1875{
1876 struct dmub_srv_create_params create_params;
8c7aea40
NK
1877 struct dmub_srv_region_params region_params;
1878 struct dmub_srv_region_info region_info;
1879 struct dmub_srv_fb_params fb_params;
1880 struct dmub_srv_fb_info *fb_info;
1881 struct dmub_srv *dmub_srv;
743b9786
NK
1882 const struct dmcub_firmware_header_v1_0 *hdr;
1883 const char *fw_name_dmub;
1884 enum dmub_asic dmub_asic;
1885 enum dmub_status status;
1886 int r;
1887
1d789535 1888 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1889 case IP_VERSION(2, 1, 0):
743b9786
NK
1890 dmub_asic = DMUB_ASIC_DCN21;
1891 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1892 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1893 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1894 break;
c08182f2 1895 case IP_VERSION(3, 0, 0):
1d789535 1896 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1897 dmub_asic = DMUB_ASIC_DCN30;
1898 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1899 } else {
1900 dmub_asic = DMUB_ASIC_DCN30;
1901 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1902 }
79037324 1903 break;
c08182f2 1904 case IP_VERSION(3, 0, 1):
469989ca
RL
1905 dmub_asic = DMUB_ASIC_DCN301;
1906 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1907 break;
c08182f2 1908 case IP_VERSION(3, 0, 2):
2a411205
BL
1909 dmub_asic = DMUB_ASIC_DCN302;
1910 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1911 break;
c08182f2 1912 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1913 dmub_asic = DMUB_ASIC_DCN303;
1914 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1915 break;
c08182f2
AD
1916 case IP_VERSION(3, 1, 2):
1917 case IP_VERSION(3, 1, 3):
3137f792 1918 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1919 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1920 break;
b5b8ed44
QZ
1921 case IP_VERSION(3, 1, 5):
1922 dmub_asic = DMUB_ASIC_DCN315;
1923 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1924 break;
de7cc1b4 1925 case IP_VERSION(3, 1, 6):
868f4357 1926 dmub_asic = DMUB_ASIC_DCN316;
de7cc1b4
PL
1927 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1928 break;
743b9786
NK
1929 default:
1930 /* ASIC doesn't support DMUB. */
1931 return 0;
1932 }
1933
743b9786
NK
1934 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1935 if (r) {
1936 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1937 return 0;
1938 }
1939
1940 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1941 if (r) {
1942 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1943 return 0;
1944 }
1945
743b9786 1946 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1947 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1948
9a6ed547
NK
1949 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1950 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1951 AMDGPU_UCODE_ID_DMCUB;
1952 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1953 adev->dm.dmub_fw;
1954 adev->firmware.fw_size +=
1955 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1956
9a6ed547
NK
1957 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1958 adev->dm.dmcub_fw_version);
1959 }
1960
743b9786 1961
8c7aea40
NK
1962 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1963 dmub_srv = adev->dm.dmub_srv;
1964
1965 if (!dmub_srv) {
1966 DRM_ERROR("Failed to allocate DMUB service!\n");
1967 return -ENOMEM;
1968 }
1969
1970 memset(&create_params, 0, sizeof(create_params));
1971 create_params.user_ctx = adev;
1972 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1973 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1974 create_params.asic = dmub_asic;
1975
1976 /* Create the DMUB service. */
1977 status = dmub_srv_create(dmub_srv, &create_params);
1978 if (status != DMUB_STATUS_OK) {
1979 DRM_ERROR("Error creating DMUB service: %d\n", status);
1980 return -EINVAL;
1981 }
1982
1983 /* Calculate the size of all the regions for the DMUB service. */
1984 memset(&region_params, 0, sizeof(region_params));
1985
1986 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1987 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1988 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1989 region_params.vbios_size = adev->bios_size;
0922b899 1990 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1991 adev->dm.dmub_fw->data +
1992 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1993 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1994 region_params.fw_inst_const =
1995 adev->dm.dmub_fw->data +
1996 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1997 PSP_HEADER_BYTES;
8c7aea40
NK
1998
1999 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2000 &region_info);
2001
2002 if (status != DMUB_STATUS_OK) {
2003 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2004 return -EINVAL;
2005 }
2006
2007 /*
2008 * Allocate a framebuffer based on the total size of all the regions.
2009 * TODO: Move this into GART.
2010 */
2011 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2012 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2013 &adev->dm.dmub_bo_gpu_addr,
2014 &adev->dm.dmub_bo_cpu_addr);
2015 if (r)
2016 return r;
2017
2018 /* Rebase the regions on the framebuffer address. */
2019 memset(&fb_params, 0, sizeof(fb_params));
2020 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2021 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2022 fb_params.region_info = &region_info;
2023
2024 adev->dm.dmub_fb_info =
2025 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2026 fb_info = adev->dm.dmub_fb_info;
2027
2028 if (!fb_info) {
2029 DRM_ERROR(
2030 "Failed to allocate framebuffer info for DMUB service!\n");
2031 return -ENOMEM;
2032 }
2033
2034 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2035 if (status != DMUB_STATUS_OK) {
2036 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2037 return -EINVAL;
2038 }
2039
743b9786
NK
2040 return 0;
2041}
2042
a94d5569
DF
2043static int dm_sw_init(void *handle)
2044{
2045 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2046 int r;
2047
2048 r = dm_dmub_sw_init(adev);
2049 if (r)
2050 return r;
a94d5569
DF
2051
2052 return load_dmcu_fw(adev);
2053}
2054
4562236b
HW
2055static int dm_sw_fini(void *handle)
2056{
a94d5569
DF
2057 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2058
8c7aea40
NK
2059 kfree(adev->dm.dmub_fb_info);
2060 adev->dm.dmub_fb_info = NULL;
2061
743b9786
NK
2062 if (adev->dm.dmub_srv) {
2063 dmub_srv_destroy(adev->dm.dmub_srv);
2064 adev->dm.dmub_srv = NULL;
2065 }
2066
75e1658e
ND
2067 release_firmware(adev->dm.dmub_fw);
2068 adev->dm.dmub_fw = NULL;
743b9786 2069
75e1658e
ND
2070 release_firmware(adev->dm.fw_dmcu);
2071 adev->dm.fw_dmcu = NULL;
a94d5569 2072
4562236b
HW
2073 return 0;
2074}
2075
7abcf6b5 2076static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2077{
c84dec2f 2078 struct amdgpu_dm_connector *aconnector;
4562236b 2079 struct drm_connector *connector;
f8d2d39e 2080 struct drm_connector_list_iter iter;
7abcf6b5 2081 int ret = 0;
4562236b 2082
f8d2d39e
LP
2083 drm_connector_list_iter_begin(dev, &iter);
2084 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2085 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2086 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2087 aconnector->mst_mgr.aux) {
f1ad2f5e 2088 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2089 aconnector,
2090 aconnector->base.base.id);
7abcf6b5
AG
2091
2092 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2093 if (ret < 0) {
2094 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2095 aconnector->dc_link->type =
2096 dc_connection_single;
2097 break;
7abcf6b5 2098 }
f8d2d39e 2099 }
4562236b 2100 }
f8d2d39e 2101 drm_connector_list_iter_end(&iter);
4562236b 2102
7abcf6b5
AG
2103 return ret;
2104}
2105
2106static int dm_late_init(void *handle)
2107{
42e67c3b 2108 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2109
bbf854dc
DF
2110 struct dmcu_iram_parameters params;
2111 unsigned int linear_lut[16];
2112 int i;
17bdb4a8 2113 struct dmcu *dmcu = NULL;
bbf854dc 2114
17bdb4a8
JFZ
2115 dmcu = adev->dm.dc->res_pool->dmcu;
2116
bbf854dc
DF
2117 for (i = 0; i < 16; i++)
2118 linear_lut[i] = 0xFFFF * i / 15;
2119
2120 params.set = 0;
75068994 2121 params.backlight_ramping_override = false;
bbf854dc
DF
2122 params.backlight_ramping_start = 0xCCCC;
2123 params.backlight_ramping_reduction = 0xCCCCCCCC;
2124 params.backlight_lut_array_size = 16;
2125 params.backlight_lut_array = linear_lut;
2126
2ad0cdf9
AK
2127 /* Min backlight level after ABM reduction, Don't allow below 1%
2128 * 0xFFFF x 0.01 = 0x28F
2129 */
2130 params.min_abm_backlight = 0x28F;
5cb32419 2131 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2132 * dmcu object will be null.
2133 * ABM 2.4 and up are implemented on dmcub.
2134 */
2135 if (dmcu) {
2136 if (!dmcu_load_iram(dmcu, params))
2137 return -EINVAL;
2138 } else if (adev->dm.dc->ctx->dmub_srv) {
2139 struct dc_link *edp_links[MAX_NUM_EDP];
2140 int edp_num;
bbf854dc 2141
6e568e43
JW
2142 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2143 for (i = 0; i < edp_num; i++) {
2144 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2145 return -EINVAL;
2146 }
2147 }
bbf854dc 2148
4a580877 2149 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2150}
2151
2152static void s3_handle_mst(struct drm_device *dev, bool suspend)
2153{
c84dec2f 2154 struct amdgpu_dm_connector *aconnector;
4562236b 2155 struct drm_connector *connector;
f8d2d39e 2156 struct drm_connector_list_iter iter;
fe7553be
LP
2157 struct drm_dp_mst_topology_mgr *mgr;
2158 int ret;
2159 bool need_hotplug = false;
4562236b 2160
f8d2d39e
LP
2161 drm_connector_list_iter_begin(dev, &iter);
2162 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2163 aconnector = to_amdgpu_dm_connector(connector);
2164 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2165 aconnector->mst_port)
2166 continue;
2167
2168 mgr = &aconnector->mst_mgr;
2169
2170 if (suspend) {
2171 drm_dp_mst_topology_mgr_suspend(mgr);
2172 } else {
6f85f738 2173 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2174 if (ret < 0) {
2175 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2176 need_hotplug = true;
2177 }
2178 }
4562236b 2179 }
f8d2d39e 2180 drm_connector_list_iter_end(&iter);
fe7553be
LP
2181
2182 if (need_hotplug)
2183 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2184}
2185
9340dfd3
HW
2186static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2187{
9340dfd3
HW
2188 int ret = 0;
2189
9340dfd3
HW
2190 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2191 * on window driver dc implementation.
2192 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2193 * should be passed to smu during boot up and resume from s3.
2194 * boot up: dc calculate dcn watermark clock settings within dc_create,
2195 * dcn20_resource_construct
2196 * then call pplib functions below to pass the settings to smu:
2197 * smu_set_watermarks_for_clock_ranges
2198 * smu_set_watermarks_table
2199 * navi10_set_watermarks_table
2200 * smu_write_watermarks_table
2201 *
2202 * For Renoir, clock settings of dcn watermark are also fixed values.
2203 * dc has implemented different flow for window driver:
2204 * dc_hardware_init / dc_set_power_state
2205 * dcn10_init_hw
2206 * notify_wm_ranges
2207 * set_wm_ranges
2208 * -- Linux
2209 * smu_set_watermarks_for_clock_ranges
2210 * renoir_set_watermarks_table
2211 * smu_write_watermarks_table
2212 *
2213 * For Linux,
2214 * dc_hardware_init -> amdgpu_dm_init
2215 * dc_set_power_state --> dm_resume
2216 *
2217 * therefore, this function apply to navi10/12/14 but not Renoir
2218 * *
2219 */
1d789535 2220 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2221 case IP_VERSION(2, 0, 2):
2222 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2223 break;
2224 default:
2225 return 0;
2226 }
2227
13f5dbd6 2228 ret = amdgpu_dpm_write_watermarks_table(adev);
e7a95eea
EQ
2229 if (ret) {
2230 DRM_ERROR("Failed to update WMTABLE!\n");
2231 return ret;
9340dfd3
HW
2232 }
2233
9340dfd3
HW
2234 return 0;
2235}
2236
b8592b48
LL
2237/**
2238 * dm_hw_init() - Initialize DC device
28d687ea 2239 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2240 *
2241 * Initialize the &struct amdgpu_display_manager device. This involves calling
2242 * the initializers of each DM component, then populating the struct with them.
2243 *
2244 * Although the function implies hardware initialization, both hardware and
2245 * software are initialized here. Splitting them out to their relevant init
2246 * hooks is a future TODO item.
2247 *
2248 * Some notable things that are initialized here:
2249 *
2250 * - Display Core, both software and hardware
2251 * - DC modules that we need (freesync and color management)
2252 * - DRM software states
2253 * - Interrupt sources and handlers
2254 * - Vblank support
2255 * - Debug FS entries, if enabled
2256 */
4562236b
HW
2257static int dm_hw_init(void *handle)
2258{
2259 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2260 /* Create DAL display manager */
2261 amdgpu_dm_init(adev);
4562236b
HW
2262 amdgpu_dm_hpd_init(adev);
2263
4562236b
HW
2264 return 0;
2265}
2266
b8592b48
LL
2267/**
2268 * dm_hw_fini() - Teardown DC device
28d687ea 2269 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2270 *
2271 * Teardown components within &struct amdgpu_display_manager that require
2272 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2273 * were loaded. Also flush IRQ workqueues and disable them.
2274 */
4562236b
HW
2275static int dm_hw_fini(void *handle)
2276{
2277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278
2279 amdgpu_dm_hpd_fini(adev);
2280
2281 amdgpu_dm_irq_fini(adev);
21de3396 2282 amdgpu_dm_fini(adev);
4562236b
HW
2283 return 0;
2284}
2285
cdaae837
BL
2286
2287static int dm_enable_vblank(struct drm_crtc *crtc);
2288static void dm_disable_vblank(struct drm_crtc *crtc);
2289
2290static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2291 struct dc_state *state, bool enable)
2292{
2293 enum dc_irq_source irq_source;
2294 struct amdgpu_crtc *acrtc;
2295 int rc = -EBUSY;
2296 int i = 0;
2297
2298 for (i = 0; i < state->stream_count; i++) {
2299 acrtc = get_crtc_by_otg_inst(
2300 adev, state->stream_status[i].primary_otg_inst);
2301
2302 if (acrtc && state->stream_status[i].plane_count != 0) {
2303 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2304 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2305 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2306 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2307 if (rc)
2308 DRM_WARN("Failed to %s pflip interrupts\n",
2309 enable ? "enable" : "disable");
2310
2311 if (enable) {
2312 rc = dm_enable_vblank(&acrtc->base);
2313 if (rc)
2314 DRM_WARN("Failed to enable vblank interrupts\n");
2315 } else {
2316 dm_disable_vblank(&acrtc->base);
2317 }
2318
2319 }
2320 }
2321
2322}
2323
dfd84d90 2324static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2325{
2326 struct dc_state *context = NULL;
2327 enum dc_status res = DC_ERROR_UNEXPECTED;
2328 int i;
2329 struct dc_stream_state *del_streams[MAX_PIPES];
2330 int del_streams_count = 0;
2331
2332 memset(del_streams, 0, sizeof(del_streams));
2333
2334 context = dc_create_state(dc);
2335 if (context == NULL)
2336 goto context_alloc_fail;
2337
2338 dc_resource_state_copy_construct_current(dc, context);
2339
2340 /* First remove from context all streams */
2341 for (i = 0; i < context->stream_count; i++) {
2342 struct dc_stream_state *stream = context->streams[i];
2343
2344 del_streams[del_streams_count++] = stream;
2345 }
2346
2347 /* Remove all planes for removed streams and then remove the streams */
2348 for (i = 0; i < del_streams_count; i++) {
2349 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2350 res = DC_FAIL_DETACH_SURFACES;
2351 goto fail;
2352 }
2353
2354 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2355 if (res != DC_OK)
2356 goto fail;
2357 }
2358
cdaae837
BL
2359 res = dc_commit_state(dc, context);
2360
2361fail:
2362 dc_release_state(context);
2363
2364context_alloc_fail:
2365 return res;
2366}
2367
8e794421
WL
2368static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2369{
2370 int i;
2371
2372 if (dm->hpd_rx_offload_wq) {
2373 for (i = 0; i < dm->dc->caps.max_links; i++)
2374 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2375 }
2376}
2377
4562236b
HW
2378static int dm_suspend(void *handle)
2379{
2380 struct amdgpu_device *adev = handle;
2381 struct amdgpu_display_manager *dm = &adev->dm;
2382 int ret = 0;
4562236b 2383
53b3f8f4 2384 if (amdgpu_in_reset(adev)) {
cdaae837 2385 mutex_lock(&dm->dc_lock);
98ab5f35 2386
98ab5f35 2387 dc_allow_idle_optimizations(adev->dm.dc, false);
98ab5f35 2388
cdaae837
BL
2389 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2390
2391 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2392
2393 amdgpu_dm_commit_zero_streams(dm->dc);
2394
2395 amdgpu_dm_irq_suspend(adev);
2396
8e794421
WL
2397 hpd_rx_irq_work_suspend(dm);
2398
cdaae837
BL
2399 return ret;
2400 }
4562236b 2401
d2f0b53b 2402 WARN_ON(adev->dm.cached_state);
4a580877 2403 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2404
4a580877 2405 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2406
4562236b
HW
2407 amdgpu_dm_irq_suspend(adev);
2408
8e794421
WL
2409 hpd_rx_irq_work_suspend(dm);
2410
32f5062d 2411 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2412
1c2075d4 2413 return 0;
4562236b
HW
2414}
2415
17ce8a69 2416struct amdgpu_dm_connector *
1daf8c63
AD
2417amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2418 struct drm_crtc *crtc)
4562236b
HW
2419{
2420 uint32_t i;
c2cea706 2421 struct drm_connector_state *new_con_state;
4562236b
HW
2422 struct drm_connector *connector;
2423 struct drm_crtc *crtc_from_state;
2424
c2cea706
LSL
2425 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2426 crtc_from_state = new_con_state->crtc;
4562236b
HW
2427
2428 if (crtc_from_state == crtc)
c84dec2f 2429 return to_amdgpu_dm_connector(connector);
4562236b
HW
2430 }
2431
2432 return NULL;
2433}
2434
fbbdadf2
BL
2435static void emulated_link_detect(struct dc_link *link)
2436{
2437 struct dc_sink_init_data sink_init_data = { 0 };
2438 struct display_sink_capability sink_caps = { 0 };
2439 enum dc_edid_status edid_status;
2440 struct dc_context *dc_ctx = link->ctx;
2441 struct dc_sink *sink = NULL;
2442 struct dc_sink *prev_sink = NULL;
2443
2444 link->type = dc_connection_none;
2445 prev_sink = link->local_sink;
2446
30164a16
VL
2447 if (prev_sink)
2448 dc_sink_release(prev_sink);
fbbdadf2
BL
2449
2450 switch (link->connector_signal) {
2451 case SIGNAL_TYPE_HDMI_TYPE_A: {
2452 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2453 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2454 break;
2455 }
2456
2457 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2458 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2459 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2460 break;
2461 }
2462
2463 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2464 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2465 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2466 break;
2467 }
2468
2469 case SIGNAL_TYPE_LVDS: {
2470 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471 sink_caps.signal = SIGNAL_TYPE_LVDS;
2472 break;
2473 }
2474
2475 case SIGNAL_TYPE_EDP: {
2476 sink_caps.transaction_type =
2477 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478 sink_caps.signal = SIGNAL_TYPE_EDP;
2479 break;
2480 }
2481
2482 case SIGNAL_TYPE_DISPLAY_PORT: {
2483 sink_caps.transaction_type =
2484 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2485 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2486 break;
2487 }
2488
2489 default:
2490 DC_ERROR("Invalid connector type! signal:%d\n",
2491 link->connector_signal);
2492 return;
2493 }
2494
2495 sink_init_data.link = link;
2496 sink_init_data.sink_signal = sink_caps.signal;
2497
2498 sink = dc_sink_create(&sink_init_data);
2499 if (!sink) {
2500 DC_ERROR("Failed to create sink!\n");
2501 return;
2502 }
2503
dcd5fb82 2504 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2505 link->local_sink = sink;
2506
2507 edid_status = dm_helpers_read_local_edid(
2508 link->ctx,
2509 link,
2510 sink);
2511
2512 if (edid_status != EDID_OK)
2513 DC_ERROR("Failed to read EDID");
2514
2515}
2516
cdaae837
BL
2517static void dm_gpureset_commit_state(struct dc_state *dc_state,
2518 struct amdgpu_display_manager *dm)
2519{
2520 struct {
2521 struct dc_surface_update surface_updates[MAX_SURFACES];
2522 struct dc_plane_info plane_infos[MAX_SURFACES];
2523 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2524 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2525 struct dc_stream_update stream_update;
2526 } * bundle;
2527 int k, m;
2528
2529 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2530
2531 if (!bundle) {
2532 dm_error("Failed to allocate update bundle\n");
2533 goto cleanup;
2534 }
2535
2536 for (k = 0; k < dc_state->stream_count; k++) {
2537 bundle->stream_update.stream = dc_state->streams[k];
2538
2539 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2540 bundle->surface_updates[m].surface =
2541 dc_state->stream_status->plane_states[m];
2542 bundle->surface_updates[m].surface->force_full_update =
2543 true;
2544 }
2545 dc_commit_updates_for_stream(
2546 dm->dc, bundle->surface_updates,
2547 dc_state->stream_status->plane_count,
efc8278e 2548 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2549 }
2550
2551cleanup:
2552 kfree(bundle);
2553
2554 return;
2555}
2556
035f5496 2557static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2558{
2559 struct dc_stream_state *stream_state;
2560 struct amdgpu_dm_connector *aconnector = link->priv;
2561 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2562 struct dc_stream_update stream_update;
2563 bool dpms_off = true;
2564
2565 memset(&stream_update, 0, sizeof(stream_update));
2566 stream_update.dpms_off = &dpms_off;
2567
2568 mutex_lock(&adev->dm.dc_lock);
2569 stream_state = dc_stream_find_from_link(link);
2570
2571 if (stream_state == NULL) {
2572 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2573 mutex_unlock(&adev->dm.dc_lock);
2574 return;
2575 }
2576
2577 stream_update.stream = stream_state;
035f5496 2578 acrtc_state->force_dpms_off = true;
3c4d55c9 2579 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2580 stream_state, &stream_update,
2581 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2582 mutex_unlock(&adev->dm.dc_lock);
2583}
2584
4562236b
HW
2585static int dm_resume(void *handle)
2586{
2587 struct amdgpu_device *adev = handle;
4a580877 2588 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2589 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2590 struct amdgpu_dm_connector *aconnector;
4562236b 2591 struct drm_connector *connector;
f8d2d39e 2592 struct drm_connector_list_iter iter;
4562236b 2593 struct drm_crtc *crtc;
c2cea706 2594 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2595 struct dm_crtc_state *dm_new_crtc_state;
2596 struct drm_plane *plane;
2597 struct drm_plane_state *new_plane_state;
2598 struct dm_plane_state *dm_new_plane_state;
113b7a01 2599 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2600 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2601 struct dc_state *dc_state;
2602 int i, r, j;
4562236b 2603
53b3f8f4 2604 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2605 dc_state = dm->cached_dc_state;
2606
6d63fcc2
NK
2607 /*
2608 * The dc->current_state is backed up into dm->cached_dc_state
2609 * before we commit 0 streams.
2610 *
2611 * DC will clear link encoder assignments on the real state
2612 * but the changes won't propagate over to the copy we made
2613 * before the 0 streams commit.
2614 *
2615 * DC expects that link encoder assignments are *not* valid
32685b32
NK
2616 * when committing a state, so as a workaround we can copy
2617 * off of the current state.
2618 *
2619 * We lose the previous assignments, but we had already
2620 * commit 0 streams anyway.
6d63fcc2 2621 */
32685b32 2622 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
6d63fcc2 2623
af6902ec
NK
2624 if (dc_enable_dmub_notifications(adev->dm.dc))
2625 amdgpu_dm_outbox_init(adev);
524a0ba6 2626
cdaae837
BL
2627 r = dm_dmub_hw_init(adev);
2628 if (r)
2629 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2630
2631 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2632 dc_resume(dm->dc);
2633
2634 amdgpu_dm_irq_resume_early(adev);
2635
2636 for (i = 0; i < dc_state->stream_count; i++) {
2637 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2638 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2639 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2640 = 0xffffffff;
2641 }
2642 }
2643
2644 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2645
cdaae837
BL
2646 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2647
2648 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2649
2650 dc_release_state(dm->cached_dc_state);
2651 dm->cached_dc_state = NULL;
2652
2653 amdgpu_dm_irq_resume_late(adev);
2654
2655 mutex_unlock(&dm->dc_lock);
2656
2657 return 0;
2658 }
113b7a01
LL
2659 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2660 dc_release_state(dm_state->context);
2661 dm_state->context = dc_create_state(dm->dc);
2662 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2663 dc_resource_state_construct(dm->dc, dm_state->context);
2664
af6902ec
NK
2665 /* Re-enable outbox interrupts for DPIA. */
2666 if (dc_enable_dmub_notifications(adev->dm.dc))
2667 amdgpu_dm_outbox_init(adev);
2668
8c7aea40 2669 /* Before powering on DC we need to re-initialize DMUB. */
79d6b935 2670 dm_dmub_hw_resume(adev);
8c7aea40 2671
a80aa93d
ML
2672 /* power on hardware */
2673 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2674
4562236b
HW
2675 /* program HPD filter */
2676 dc_resume(dm->dc);
2677
4562236b
HW
2678 /*
2679 * early enable HPD Rx IRQ, should be done before set mode as short
2680 * pulse interrupts are used for MST
2681 */
2682 amdgpu_dm_irq_resume_early(adev);
2683
d20ebea8 2684 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2685 s3_handle_mst(ddev, false);
2686
4562236b 2687 /* Do detection*/
f8d2d39e
LP
2688 drm_connector_list_iter_begin(ddev, &iter);
2689 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2690 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2691
2692 /*
2693 * this is the case when traversing through already created
2694 * MST connectors, should be skipped
2695 */
f4346fb3
RL
2696 if (aconnector->dc_link &&
2697 aconnector->dc_link->type == dc_connection_mst_branch)
4562236b
HW
2698 continue;
2699
03ea364c 2700 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2701 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2702 DRM_ERROR("KMS: Failed to detect connector\n");
2703
2704 if (aconnector->base.force && new_connection_type == dc_connection_none)
2705 emulated_link_detect(aconnector->dc_link);
2706 else
2707 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2708
2709 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2710 aconnector->fake_enable = false;
2711
dcd5fb82
MF
2712 if (aconnector->dc_sink)
2713 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2714 aconnector->dc_sink = NULL;
2715 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2716 mutex_unlock(&aconnector->hpd_lock);
4562236b 2717 }
f8d2d39e 2718 drm_connector_list_iter_end(&iter);
4562236b 2719
1f6010a9 2720 /* Force mode set in atomic commit */
a80aa93d 2721 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2722 new_crtc_state->active_changed = true;
4f346e65 2723
fcb4019e
LSL
2724 /*
2725 * atomic_check is expected to create the dc states. We need to release
2726 * them here, since they were duplicated as part of the suspend
2727 * procedure.
2728 */
a80aa93d 2729 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2730 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2731 if (dm_new_crtc_state->stream) {
2732 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2733 dc_stream_release(dm_new_crtc_state->stream);
2734 dm_new_crtc_state->stream = NULL;
2735 }
2736 }
2737
a80aa93d 2738 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2739 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2740 if (dm_new_plane_state->dc_state) {
2741 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2742 dc_plane_state_release(dm_new_plane_state->dc_state);
2743 dm_new_plane_state->dc_state = NULL;
2744 }
2745 }
2746
2d1af6a1 2747 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2748
a80aa93d 2749 dm->cached_state = NULL;
0a214e2f 2750
9faa4237 2751 amdgpu_dm_irq_resume_late(adev);
4562236b 2752
9340dfd3
HW
2753 amdgpu_dm_smu_write_watermarks_table(adev);
2754
2d1af6a1 2755 return 0;
4562236b
HW
2756}
2757
b8592b48
LL
2758/**
2759 * DOC: DM Lifecycle
2760 *
2761 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2762 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2763 * the base driver's device list to be initialized and torn down accordingly.
2764 *
2765 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2766 */
2767
4562236b
HW
2768static const struct amd_ip_funcs amdgpu_dm_funcs = {
2769 .name = "dm",
2770 .early_init = dm_early_init,
7abcf6b5 2771 .late_init = dm_late_init,
4562236b
HW
2772 .sw_init = dm_sw_init,
2773 .sw_fini = dm_sw_fini,
e9669fb7 2774 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2775 .hw_init = dm_hw_init,
2776 .hw_fini = dm_hw_fini,
2777 .suspend = dm_suspend,
2778 .resume = dm_resume,
2779 .is_idle = dm_is_idle,
2780 .wait_for_idle = dm_wait_for_idle,
2781 .check_soft_reset = dm_check_soft_reset,
2782 .soft_reset = dm_soft_reset,
2783 .set_clockgating_state = dm_set_clockgating_state,
2784 .set_powergating_state = dm_set_powergating_state,
2785};
2786
2787const struct amdgpu_ip_block_version dm_ip_block =
2788{
2789 .type = AMD_IP_BLOCK_TYPE_DCE,
2790 .major = 1,
2791 .minor = 0,
2792 .rev = 0,
2793 .funcs = &amdgpu_dm_funcs,
2794};
2795
ca3268c4 2796
b8592b48
LL
2797/**
2798 * DOC: atomic
2799 *
2800 * *WIP*
2801 */
0a323b84 2802
b3663f70 2803static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2804 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2805 .get_format_info = amd_get_format_info,
366c1baa 2806 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2807 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2808 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2809};
2810
2811static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2812 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2813};
2814
94562810
RS
2815static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2816{
2817 u32 max_cll, min_cll, max, min, q, r;
2818 struct amdgpu_dm_backlight_caps *caps;
2819 struct amdgpu_display_manager *dm;
2820 struct drm_connector *conn_base;
2821 struct amdgpu_device *adev;
ec11fe37 2822 struct dc_link *link = NULL;
94562810
RS
2823 static const u8 pre_computed_values[] = {
2824 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2825 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2826 int i;
94562810
RS
2827
2828 if (!aconnector || !aconnector->dc_link)
2829 return;
2830
ec11fe37 2831 link = aconnector->dc_link;
2832 if (link->connector_signal != SIGNAL_TYPE_EDP)
2833 return;
2834
94562810 2835 conn_base = &aconnector->base;
1348969a 2836 adev = drm_to_adev(conn_base->dev);
94562810 2837 dm = &adev->dm;
7fd13bae
AD
2838 for (i = 0; i < dm->num_of_edps; i++) {
2839 if (link == dm->backlight_link[i])
2840 break;
2841 }
2842 if (i >= dm->num_of_edps)
2843 return;
2844 caps = &dm->backlight_caps[i];
94562810
RS
2845 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2846 caps->aux_support = false;
2847 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2848 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2849
d0ae0b64 2850 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2851 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2852 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2853 caps->aux_support = true;
2854
7a46f05e
TI
2855 if (amdgpu_backlight == 0)
2856 caps->aux_support = false;
2857 else if (amdgpu_backlight == 1)
2858 caps->aux_support = true;
2859
94562810
RS
2860 /* From the specification (CTA-861-G), for calculating the maximum
2861 * luminance we need to use:
2862 * Luminance = 50*2**(CV/32)
2863 * Where CV is a one-byte value.
2864 * For calculating this expression we may need float point precision;
2865 * to avoid this complexity level, we take advantage that CV is divided
2866 * by a constant. From the Euclids division algorithm, we know that CV
2867 * can be written as: CV = 32*q + r. Next, we replace CV in the
2868 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2869 * need to pre-compute the value of r/32. For pre-computing the values
2870 * We just used the following Ruby line:
2871 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2872 * The results of the above expressions can be verified at
2873 * pre_computed_values.
2874 */
2875 q = max_cll >> 5;
2876 r = max_cll % 32;
2877 max = (1 << q) * pre_computed_values[r];
2878
2879 // min luminance: maxLum * (CV/255)^2 / 100
2880 q = DIV_ROUND_CLOSEST(min_cll, 255);
2881 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2882
2883 caps->aux_max_input_signal = max;
2884 caps->aux_min_input_signal = min;
2885}
2886
97e51c16
HW
2887void amdgpu_dm_update_connector_after_detect(
2888 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2889{
2890 struct drm_connector *connector = &aconnector->base;
2891 struct drm_device *dev = connector->dev;
b73a22d3 2892 struct dc_sink *sink;
4562236b
HW
2893
2894 /* MST handled by drm_mst framework */
2895 if (aconnector->mst_mgr.mst_state == true)
2896 return;
2897
4562236b 2898 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2899 if (sink)
2900 dc_sink_retain(sink);
4562236b 2901
1f6010a9
DF
2902 /*
2903 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2904 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2905 * Skip if already done during boot.
4562236b
HW
2906 */
2907 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2908 && aconnector->dc_em_sink) {
2909
1f6010a9
DF
2910 /*
2911 * For S3 resume with headless use eml_sink to fake stream
2912 * because on resume connector->sink is set to NULL
4562236b
HW
2913 */
2914 mutex_lock(&dev->mode_config.mutex);
2915
2916 if (sink) {
922aa1e1 2917 if (aconnector->dc_sink) {
98e6436d 2918 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2919 /*
2920 * retain and release below are used to
2921 * bump up refcount for sink because the link doesn't point
2922 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2923 * reshuffle by UMD we will get into unwanted dc_sink release
2924 */
dcd5fb82 2925 dc_sink_release(aconnector->dc_sink);
922aa1e1 2926 }
4562236b 2927 aconnector->dc_sink = sink;
dcd5fb82 2928 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2929 amdgpu_dm_update_freesync_caps(connector,
2930 aconnector->edid);
4562236b 2931 } else {
98e6436d 2932 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2933 if (!aconnector->dc_sink) {
4562236b 2934 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2935 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2936 }
4562236b
HW
2937 }
2938
2939 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2940
2941 if (sink)
2942 dc_sink_release(sink);
4562236b
HW
2943 return;
2944 }
2945
2946 /*
2947 * TODO: temporary guard to look for proper fix
2948 * if this sink is MST sink, we should not do anything
2949 */
dcd5fb82
MF
2950 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2951 dc_sink_release(sink);
4562236b 2952 return;
dcd5fb82 2953 }
4562236b
HW
2954
2955 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2956 /*
2957 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2958 * Do nothing!!
2959 */
f1ad2f5e 2960 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2961 aconnector->connector_id);
dcd5fb82
MF
2962 if (sink)
2963 dc_sink_release(sink);
4562236b
HW
2964 return;
2965 }
2966
f1ad2f5e 2967 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2968 aconnector->connector_id, aconnector->dc_sink, sink);
2969
2970 mutex_lock(&dev->mode_config.mutex);
2971
1f6010a9
DF
2972 /*
2973 * 1. Update status of the drm connector
2974 * 2. Send an event and let userspace tell us what to do
2975 */
4562236b 2976 if (sink) {
1f6010a9
DF
2977 /*
2978 * TODO: check if we still need the S3 mode update workaround.
2979 * If yes, put it here.
2980 */
c64b0d6b 2981 if (aconnector->dc_sink) {
98e6436d 2982 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2983 dc_sink_release(aconnector->dc_sink);
2984 }
4562236b
HW
2985
2986 aconnector->dc_sink = sink;
dcd5fb82 2987 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2988 if (sink->dc_edid.length == 0) {
4562236b 2989 aconnector->edid = NULL;
e6142dd5
AP
2990 if (aconnector->dc_link->aux_mode) {
2991 drm_dp_cec_unset_edid(
2992 &aconnector->dm_dp_aux.aux);
2993 }
900b3cb1 2994 } else {
4562236b 2995 aconnector->edid =
e6142dd5 2996 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2997
e6142dd5
AP
2998 if (aconnector->dc_link->aux_mode)
2999 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3000 aconnector->edid);
4562236b 3001 }
e6142dd5 3002
20543be9 3003 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 3004 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 3005 update_connector_ext_caps(aconnector);
4562236b 3006 } else {
e86e8947 3007 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 3008 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 3009 drm_connector_update_edid_property(connector, NULL);
4562236b 3010 aconnector->num_modes = 0;
dcd5fb82 3011 dc_sink_release(aconnector->dc_sink);
4562236b 3012 aconnector->dc_sink = NULL;
5326c452 3013 aconnector->edid = NULL;
0c8620d6
BL
3014#ifdef CONFIG_DRM_AMD_DC_HDCP
3015 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3016 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3017 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3018#endif
4562236b
HW
3019 }
3020
3021 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 3022
0f877894
OV
3023 update_subconnector_property(aconnector);
3024
dcd5fb82
MF
3025 if (sink)
3026 dc_sink_release(sink);
4562236b
HW
3027}
3028
e27c41d5 3029static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 3030{
4562236b
HW
3031 struct drm_connector *connector = &aconnector->base;
3032 struct drm_device *dev = connector->dev;
fbbdadf2 3033 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 3034 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 3035 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 3036 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 3037
b972b4f9
HW
3038 if (adev->dm.disable_hpd_irq)
3039 return;
3040
035f5496
AP
3041 if (dm_con_state->base.state && dm_con_state->base.crtc)
3042 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3043 dm_con_state->base.state,
3044 dm_con_state->base.crtc));
1f6010a9
DF
3045 /*
3046 * In case of failure or MST no need to update connector status or notify the OS
3047 * since (for MST case) MST does this in its own context.
4562236b
HW
3048 */
3049 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3050
0c8620d6 3051#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3052 if (adev->dm.hdcp_workqueue) {
96a3b32e 3053 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3054 dm_con_state->update_hdcp = true;
3055 }
0c8620d6 3056#endif
2e0ac3d6
HW
3057 if (aconnector->fake_enable)
3058 aconnector->fake_enable = false;
3059
fbbdadf2
BL
3060 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3061 DRM_ERROR("KMS: Failed to detect connector\n");
3062
3063 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3064 emulated_link_detect(aconnector->dc_link);
3065
fbbdadf2
BL
3066 drm_modeset_lock_all(dev);
3067 dm_restore_drm_connector_state(dev, connector);
3068 drm_modeset_unlock_all(dev);
3069
3070 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3071 drm_kms_helper_connector_hotplug_event(connector);
fbbdadf2
BL
3072
3073 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3074 if (new_connection_type == dc_connection_none &&
035f5496
AP
3075 aconnector->dc_link->type == dc_connection_none &&
3076 dm_crtc_state)
3077 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3078
3c4d55c9 3079 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3080
3081 drm_modeset_lock_all(dev);
3082 dm_restore_drm_connector_state(dev, connector);
3083 drm_modeset_unlock_all(dev);
3084
3085 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3086 drm_kms_helper_connector_hotplug_event(connector);
4562236b
HW
3087 }
3088 mutex_unlock(&aconnector->hpd_lock);
3089
3090}
3091
e27c41d5
JS
3092static void handle_hpd_irq(void *param)
3093{
3094 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3095
3096 handle_hpd_irq_helper(aconnector);
3097
3098}
3099
8e794421 3100static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3101{
3102 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3103 uint8_t dret;
3104 bool new_irq_handled = false;
3105 int dpcd_addr;
3106 int dpcd_bytes_to_read;
3107
3108 const int max_process_count = 30;
3109 int process_count = 0;
3110
3111 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3112
3113 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3114 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3115 /* DPCD 0x200 - 0x201 for downstream IRQ */
3116 dpcd_addr = DP_SINK_COUNT;
3117 } else {
3118 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3119 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3120 dpcd_addr = DP_SINK_COUNT_ESI;
3121 }
3122
3123 dret = drm_dp_dpcd_read(
3124 &aconnector->dm_dp_aux.aux,
3125 dpcd_addr,
3126 esi,
3127 dpcd_bytes_to_read);
3128
3129 while (dret == dpcd_bytes_to_read &&
3130 process_count < max_process_count) {
3131 uint8_t retry;
3132 dret = 0;
3133
3134 process_count++;
3135
f1ad2f5e 3136 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3137 /* handle HPD short pulse irq */
3138 if (aconnector->mst_mgr.mst_state)
3139 drm_dp_mst_hpd_irq(
3140 &aconnector->mst_mgr,
3141 esi,
3142 &new_irq_handled);
4562236b
HW
3143
3144 if (new_irq_handled) {
3145 /* ACK at DPCD to notify down stream */
3146 const int ack_dpcd_bytes_to_write =
3147 dpcd_bytes_to_read - 1;
3148
3149 for (retry = 0; retry < 3; retry++) {
3150 uint8_t wret;
3151
3152 wret = drm_dp_dpcd_write(
3153 &aconnector->dm_dp_aux.aux,
3154 dpcd_addr + 1,
3155 &esi[1],
3156 ack_dpcd_bytes_to_write);
3157 if (wret == ack_dpcd_bytes_to_write)
3158 break;
3159 }
3160
1f6010a9 3161 /* check if there is new irq to be handled */
4562236b
HW
3162 dret = drm_dp_dpcd_read(
3163 &aconnector->dm_dp_aux.aux,
3164 dpcd_addr,
3165 esi,
3166 dpcd_bytes_to_read);
3167
3168 new_irq_handled = false;
d4a6e8a9 3169 } else {
4562236b 3170 break;
d4a6e8a9 3171 }
4562236b
HW
3172 }
3173
3174 if (process_count == max_process_count)
f1ad2f5e 3175 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3176}
3177
8e794421
WL
3178static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3179 union hpd_irq_data hpd_irq_data)
3180{
3181 struct hpd_rx_irq_offload_work *offload_work =
3182 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3183
3184 if (!offload_work) {
3185 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3186 return;
3187 }
3188
3189 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3190 offload_work->data = hpd_irq_data;
3191 offload_work->offload_wq = offload_wq;
3192
3193 queue_work(offload_wq->wq, &offload_work->work);
3194 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3195}
3196
4562236b
HW
3197static void handle_hpd_rx_irq(void *param)
3198{
c84dec2f 3199 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3200 struct drm_connector *connector = &aconnector->base;
3201 struct drm_device *dev = connector->dev;
53cbf65c 3202 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3203 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3204 bool result = false;
fbbdadf2 3205 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3206 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3207 union hpd_irq_data hpd_irq_data;
8e794421
WL
3208 bool link_loss = false;
3209 bool has_left_work = false;
3210 int idx = aconnector->base.index;
3211 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3212
3213 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3214
b972b4f9
HW
3215 if (adev->dm.disable_hpd_irq)
3216 return;
3217
1f6010a9
DF
3218 /*
3219 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3220 * conflict, after implement i2c helper, this mutex should be
3221 * retired.
3222 */
b86e7eef 3223 mutex_lock(&aconnector->hpd_lock);
4562236b 3224
8e794421
WL
3225 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3226 &link_loss, true, &has_left_work);
3083a984 3227
8e794421
WL
3228 if (!has_left_work)
3229 goto out;
3230
3231 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3232 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3233 goto out;
3234 }
3235
3236 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3237 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3238 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3239 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3240 goto out;
3241 }
3083a984 3242
8e794421
WL
3243 if (link_loss) {
3244 bool skip = false;
d2aa1356 3245
8e794421
WL
3246 spin_lock(&offload_wq->offload_lock);
3247 skip = offload_wq->is_handling_link_loss;
3248
3249 if (!skip)
3250 offload_wq->is_handling_link_loss = true;
3251
3252 spin_unlock(&offload_wq->offload_lock);
3253
3254 if (!skip)
3255 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3256
3257 goto out;
3258 }
3259 }
c8ea79a8 3260
3083a984 3261out:
c8ea79a8 3262 if (result && !is_mst_root_connector) {
4562236b 3263 /* Downstream Port status changed. */
fbbdadf2
BL
3264 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3265 DRM_ERROR("KMS: Failed to detect connector\n");
3266
3267 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3268 emulated_link_detect(dc_link);
3269
3270 if (aconnector->fake_enable)
3271 aconnector->fake_enable = false;
3272
3273 amdgpu_dm_update_connector_after_detect(aconnector);
3274
3275
3276 drm_modeset_lock_all(dev);
3277 dm_restore_drm_connector_state(dev, connector);
3278 drm_modeset_unlock_all(dev);
3279
fc320a6f 3280 drm_kms_helper_connector_hotplug_event(connector);
fbbdadf2 3281 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3282
3283 if (aconnector->fake_enable)
3284 aconnector->fake_enable = false;
3285
4562236b
HW
3286 amdgpu_dm_update_connector_after_detect(aconnector);
3287
3288
3289 drm_modeset_lock_all(dev);
3290 dm_restore_drm_connector_state(dev, connector);
3291 drm_modeset_unlock_all(dev);
3292
fc320a6f 3293 drm_kms_helper_connector_hotplug_event(connector);
4562236b
HW
3294 }
3295 }
2a0f9270 3296#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3297 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3298 if (adev->dm.hdcp_workqueue)
3299 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3300 }
2a0f9270 3301#endif
4562236b 3302
b86e7eef 3303 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3304 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3305
3306 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3307}
3308
3309static void register_hpd_handlers(struct amdgpu_device *adev)
3310{
4a580877 3311 struct drm_device *dev = adev_to_drm(adev);
4562236b 3312 struct drm_connector *connector;
c84dec2f 3313 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3314 const struct dc_link *dc_link;
3315 struct dc_interrupt_params int_params = {0};
3316
3317 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3318 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3319
3320 list_for_each_entry(connector,
3321 &dev->mode_config.connector_list, head) {
3322
c84dec2f 3323 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3324 dc_link = aconnector->dc_link;
3325
3326 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3327 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3328 int_params.irq_source = dc_link->irq_source_hpd;
3329
3330 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3331 handle_hpd_irq,
3332 (void *) aconnector);
3333 }
3334
3335 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3336
3337 /* Also register for DP short pulse (hpd_rx). */
3338 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3339 int_params.irq_source = dc_link->irq_source_hpd_rx;
3340
3341 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3342 handle_hpd_rx_irq,
3343 (void *) aconnector);
8e794421
WL
3344
3345 if (adev->dm.hpd_rx_offload_wq)
3346 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3347 aconnector;
4562236b
HW
3348 }
3349 }
3350}
3351
55e56389
MR
3352#if defined(CONFIG_DRM_AMD_DC_SI)
3353/* Register IRQ sources and initialize IRQ callbacks */
3354static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3355{
3356 struct dc *dc = adev->dm.dc;
3357 struct common_irq_params *c_irq_params;
3358 struct dc_interrupt_params int_params = {0};
3359 int r;
3360 int i;
3361 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3362
3363 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3364 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3365
3366 /*
3367 * Actions of amdgpu_irq_add_id():
3368 * 1. Register a set() function with base driver.
3369 * Base driver will call set() function to enable/disable an
3370 * interrupt in DC hardware.
3371 * 2. Register amdgpu_dm_irq_handler().
3372 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3373 * coming from DC hardware.
3374 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3375 * for acknowledging and handling. */
3376
3377 /* Use VBLANK interrupt */
3378 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3379 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3380 if (r) {
3381 DRM_ERROR("Failed to add crtc irq id!\n");
3382 return r;
3383 }
3384
3385 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386 int_params.irq_source =
3387 dc_interrupt_to_irq_source(dc, i+1 , 0);
3388
3389 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3390
3391 c_irq_params->adev = adev;
3392 c_irq_params->irq_src = int_params.irq_source;
3393
3394 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 dm_crtc_high_irq, c_irq_params);
3396 }
3397
3398 /* Use GRPH_PFLIP interrupt */
3399 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3400 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3401 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3402 if (r) {
3403 DRM_ERROR("Failed to add page flip irq id!\n");
3404 return r;
3405 }
3406
3407 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3408 int_params.irq_source =
3409 dc_interrupt_to_irq_source(dc, i, 0);
3410
3411 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3412
3413 c_irq_params->adev = adev;
3414 c_irq_params->irq_src = int_params.irq_source;
3415
3416 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3417 dm_pflip_high_irq, c_irq_params);
3418
3419 }
3420
3421 /* HPD */
3422 r = amdgpu_irq_add_id(adev, client_id,
3423 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3424 if (r) {
3425 DRM_ERROR("Failed to add hpd irq id!\n");
3426 return r;
3427 }
3428
3429 register_hpd_handlers(adev);
3430
3431 return 0;
3432}
3433#endif
3434
4562236b
HW
3435/* Register IRQ sources and initialize IRQ callbacks */
3436static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3437{
3438 struct dc *dc = adev->dm.dc;
3439 struct common_irq_params *c_irq_params;
3440 struct dc_interrupt_params int_params = {0};
3441 int r;
3442 int i;
1ffdeca6 3443 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3444
c08182f2 3445 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3446 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3447
3448 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3449 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3450
1f6010a9
DF
3451 /*
3452 * Actions of amdgpu_irq_add_id():
4562236b
HW
3453 * 1. Register a set() function with base driver.
3454 * Base driver will call set() function to enable/disable an
3455 * interrupt in DC hardware.
3456 * 2. Register amdgpu_dm_irq_handler().
3457 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3458 * coming from DC hardware.
3459 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3460 * for acknowledging and handling. */
3461
b57de80a 3462 /* Use VBLANK interrupt */
e9029155 3463 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3464 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3465 if (r) {
3466 DRM_ERROR("Failed to add crtc irq id!\n");
3467 return r;
3468 }
3469
3470 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3471 int_params.irq_source =
3d761e79 3472 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3473
b57de80a 3474 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3475
3476 c_irq_params->adev = adev;
3477 c_irq_params->irq_src = int_params.irq_source;
3478
3479 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3480 dm_crtc_high_irq, c_irq_params);
3481 }
3482
d2574c33
MK
3483 /* Use VUPDATE interrupt */
3484 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3485 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3486 if (r) {
3487 DRM_ERROR("Failed to add vupdate irq id!\n");
3488 return r;
3489 }
3490
3491 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 int_params.irq_source =
3493 dc_interrupt_to_irq_source(dc, i, 0);
3494
3495 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3496
3497 c_irq_params->adev = adev;
3498 c_irq_params->irq_src = int_params.irq_source;
3499
3500 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 dm_vupdate_high_irq, c_irq_params);
3502 }
3503
3d761e79 3504 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3505 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3506 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3507 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3508 if (r) {
3509 DRM_ERROR("Failed to add page flip irq id!\n");
3510 return r;
3511 }
3512
3513 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 int_params.irq_source =
3515 dc_interrupt_to_irq_source(dc, i, 0);
3516
3517 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3518
3519 c_irq_params->adev = adev;
3520 c_irq_params->irq_src = int_params.irq_source;
3521
3522 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 dm_pflip_high_irq, c_irq_params);
3524
3525 }
3526
3527 /* HPD */
2c8ad2d5
AD
3528 r = amdgpu_irq_add_id(adev, client_id,
3529 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3530 if (r) {
3531 DRM_ERROR("Failed to add hpd irq id!\n");
3532 return r;
3533 }
3534
3535 register_hpd_handlers(adev);
3536
3537 return 0;
3538}
3539
ff5ef992
AD
3540/* Register IRQ sources and initialize IRQ callbacks */
3541static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3542{
3543 struct dc *dc = adev->dm.dc;
3544 struct common_irq_params *c_irq_params;
3545 struct dc_interrupt_params int_params = {0};
3546 int r;
3547 int i;
660d5406
WL
3548#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3549 static const unsigned int vrtl_int_srcid[] = {
3550 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3551 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3552 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3553 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3554 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3555 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3556 };
3557#endif
ff5ef992
AD
3558
3559 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3560 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3561
1f6010a9
DF
3562 /*
3563 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3564 * 1. Register a set() function with base driver.
3565 * Base driver will call set() function to enable/disable an
3566 * interrupt in DC hardware.
3567 * 2. Register amdgpu_dm_irq_handler().
3568 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3569 * coming from DC hardware.
3570 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3571 * for acknowledging and handling.
1f6010a9 3572 */
ff5ef992
AD
3573
3574 /* Use VSTARTUP interrupt */
3575 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3576 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3577 i++) {
3760f76c 3578 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3579
3580 if (r) {
3581 DRM_ERROR("Failed to add crtc irq id!\n");
3582 return r;
3583 }
3584
3585 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3586 int_params.irq_source =
3587 dc_interrupt_to_irq_source(dc, i, 0);
3588
3589 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3590
3591 c_irq_params->adev = adev;
3592 c_irq_params->irq_src = int_params.irq_source;
3593
2346ef47
NK
3594 amdgpu_dm_irq_register_interrupt(
3595 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3596 }
3597
86bc2219
WL
3598 /* Use otg vertical line interrupt */
3599#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3600 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3601 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3602 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3603
3604 if (r) {
3605 DRM_ERROR("Failed to add vline0 irq id!\n");
3606 return r;
3607 }
3608
3609 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3610 int_params.irq_source =
660d5406
WL
3611 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3612
3613 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3614 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3615 break;
3616 }
86bc2219
WL
3617
3618 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3619 - DC_IRQ_SOURCE_DC1_VLINE0];
3620
3621 c_irq_params->adev = adev;
3622 c_irq_params->irq_src = int_params.irq_source;
3623
3624 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3625 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3626 }
3627#endif
3628
2346ef47
NK
3629 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3630 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3631 * to trigger at end of each vblank, regardless of state of the lock,
3632 * matching DCE behaviour.
3633 */
3634 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3635 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3636 i++) {
3637 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3638
3639 if (r) {
3640 DRM_ERROR("Failed to add vupdate irq id!\n");
3641 return r;
3642 }
3643
3644 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3645 int_params.irq_source =
3646 dc_interrupt_to_irq_source(dc, i, 0);
3647
3648 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3649
3650 c_irq_params->adev = adev;
3651 c_irq_params->irq_src = int_params.irq_source;
3652
ff5ef992 3653 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3654 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3655 }
3656
ff5ef992
AD
3657 /* Use GRPH_PFLIP interrupt */
3658 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
de95753c 3659 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
ff5ef992 3660 i++) {
3760f76c 3661 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3662 if (r) {
3663 DRM_ERROR("Failed to add page flip irq id!\n");
3664 return r;
3665 }
3666
3667 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3668 int_params.irq_source =
3669 dc_interrupt_to_irq_source(dc, i, 0);
3670
3671 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3672
3673 c_irq_params->adev = adev;
3674 c_irq_params->irq_src = int_params.irq_source;
3675
3676 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3677 dm_pflip_high_irq, c_irq_params);
3678
3679 }
3680
81927e28
JS
3681 /* HPD */
3682 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3683 &adev->hpd_irq);
3684 if (r) {
3685 DRM_ERROR("Failed to add hpd irq id!\n");
3686 return r;
3687 }
a08f16cf 3688
81927e28 3689 register_hpd_handlers(adev);
a08f16cf 3690
81927e28
JS
3691 return 0;
3692}
3693/* Register Outbox IRQ sources and initialize IRQ callbacks */
3694static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3695{
3696 struct dc *dc = adev->dm.dc;
3697 struct common_irq_params *c_irq_params;
3698 struct dc_interrupt_params int_params = {0};
3699 int r, i;
3700
3701 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3702 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3703
3704 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3705 &adev->dmub_outbox_irq);
3706 if (r) {
3707 DRM_ERROR("Failed to add outbox irq id!\n");
3708 return r;
3709 }
3710
3711 if (dc->ctx->dmub_srv) {
3712 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3713 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3714 int_params.irq_source =
81927e28 3715 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3716
81927e28 3717 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3718
3719 c_irq_params->adev = adev;
3720 c_irq_params->irq_src = int_params.irq_source;
3721
3722 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3723 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3724 }
3725
ff5ef992
AD
3726 return 0;
3727}
ff5ef992 3728
eb3dc897
NK
3729/*
3730 * Acquires the lock for the atomic state object and returns
3731 * the new atomic state.
3732 *
3733 * This should only be called during atomic check.
3734 */
17ce8a69
RL
3735int dm_atomic_get_state(struct drm_atomic_state *state,
3736 struct dm_atomic_state **dm_state)
eb3dc897
NK
3737{
3738 struct drm_device *dev = state->dev;
1348969a 3739 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3740 struct amdgpu_display_manager *dm = &adev->dm;
3741 struct drm_private_state *priv_state;
eb3dc897
NK
3742
3743 if (*dm_state)
3744 return 0;
3745
eb3dc897
NK
3746 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3747 if (IS_ERR(priv_state))
3748 return PTR_ERR(priv_state);
3749
3750 *dm_state = to_dm_atomic_state(priv_state);
3751
3752 return 0;
3753}
3754
dfd84d90 3755static struct dm_atomic_state *
eb3dc897
NK
3756dm_atomic_get_new_state(struct drm_atomic_state *state)
3757{
3758 struct drm_device *dev = state->dev;
1348969a 3759 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3760 struct amdgpu_display_manager *dm = &adev->dm;
3761 struct drm_private_obj *obj;
3762 struct drm_private_state *new_obj_state;
3763 int i;
3764
3765 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3766 if (obj->funcs == dm->atomic_obj.funcs)
3767 return to_dm_atomic_state(new_obj_state);
3768 }
3769
3770 return NULL;
3771}
3772
eb3dc897
NK
3773static struct drm_private_state *
3774dm_atomic_duplicate_state(struct drm_private_obj *obj)
3775{
3776 struct dm_atomic_state *old_state, *new_state;
3777
3778 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3779 if (!new_state)
3780 return NULL;
3781
3782 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3783
813d20dc
AW
3784 old_state = to_dm_atomic_state(obj->state);
3785
3786 if (old_state && old_state->context)
3787 new_state->context = dc_copy_state(old_state->context);
3788
eb3dc897
NK
3789 if (!new_state->context) {
3790 kfree(new_state);
3791 return NULL;
3792 }
3793
eb3dc897
NK
3794 return &new_state->base;
3795}
3796
3797static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3798 struct drm_private_state *state)
3799{
3800 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3801
3802 if (dm_state && dm_state->context)
3803 dc_release_state(dm_state->context);
3804
3805 kfree(dm_state);
3806}
3807
3808static struct drm_private_state_funcs dm_atomic_state_funcs = {
3809 .atomic_duplicate_state = dm_atomic_duplicate_state,
3810 .atomic_destroy_state = dm_atomic_destroy_state,
3811};
3812
4562236b
HW
3813static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3814{
eb3dc897 3815 struct dm_atomic_state *state;
4562236b
HW
3816 int r;
3817
3818 adev->mode_info.mode_config_initialized = true;
3819
4a580877
LT
3820 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3821 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3822
4a580877
LT
3823 adev_to_drm(adev)->mode_config.max_width = 16384;
3824 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3825
4a580877
LT
3826 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3827 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3828 /* indicates support for immediate flip */
4a580877 3829 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3830
4a580877 3831 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3832
eb3dc897
NK
3833 state = kzalloc(sizeof(*state), GFP_KERNEL);
3834 if (!state)
3835 return -ENOMEM;
3836
813d20dc 3837 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3838 if (!state->context) {
3839 kfree(state);
3840 return -ENOMEM;
3841 }
3842
3843 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3844
4a580877 3845 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3846 &adev->dm.atomic_obj,
eb3dc897
NK
3847 &state->base,
3848 &dm_atomic_state_funcs);
3849
3dc9b1ce 3850 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3851 if (r) {
3852 dc_release_state(state->context);
3853 kfree(state);
4562236b 3854 return r;
b67a468a 3855 }
4562236b 3856
6ce8f316 3857 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3858 if (r) {
3859 dc_release_state(state->context);
3860 kfree(state);
6ce8f316 3861 return r;
b67a468a 3862 }
6ce8f316 3863
4562236b
HW
3864 return 0;
3865}
3866
206bbafe
DF
3867#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3868#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3869#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3870
4562236b
HW
3871#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3872 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3873
7fd13bae
AD
3874static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3875 int bl_idx)
206bbafe
DF
3876{
3877#if defined(CONFIG_ACPI)
3878 struct amdgpu_dm_backlight_caps caps;
3879
58965855
FS
3880 memset(&caps, 0, sizeof(caps));
3881
7fd13bae 3882 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3883 return;
3884
f9b7f370 3885 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3886 if (caps.caps_valid) {
7fd13bae 3887 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3888 if (caps.aux_support)
3889 return;
7fd13bae
AD
3890 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3891 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3892 } else {
7fd13bae 3893 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3894 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3895 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3896 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3897 }
3898#else
7fd13bae 3899 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3900 return;
3901
7fd13bae
AD
3902 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3903 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3904#endif
3905}
3906
69d9f427
AM
3907static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3908 unsigned *min, unsigned *max)
94562810 3909{
94562810 3910 if (!caps)
69d9f427 3911 return 0;
94562810 3912
69d9f427
AM
3913 if (caps->aux_support) {
3914 // Firmware limits are in nits, DC API wants millinits.
3915 *max = 1000 * caps->aux_max_input_signal;
3916 *min = 1000 * caps->aux_min_input_signal;
94562810 3917 } else {
69d9f427
AM
3918 // Firmware limits are 8-bit, PWM control is 16-bit.
3919 *max = 0x101 * caps->max_input_signal;
3920 *min = 0x101 * caps->min_input_signal;
94562810 3921 }
69d9f427
AM
3922 return 1;
3923}
94562810 3924
69d9f427
AM
3925static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3926 uint32_t brightness)
3927{
3928 unsigned min, max;
94562810 3929
69d9f427
AM
3930 if (!get_brightness_range(caps, &min, &max))
3931 return brightness;
3932
3933 // Rescale 0..255 to min..max
3934 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3935 AMDGPU_MAX_BL_LEVEL);
3936}
3937
3938static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3939 uint32_t brightness)
3940{
3941 unsigned min, max;
3942
3943 if (!get_brightness_range(caps, &min, &max))
3944 return brightness;
3945
3946 if (brightness < min)
3947 return 0;
3948 // Rescale min..max to 0..255
3949 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3950 max - min);
94562810
RS
3951}
3952
4052287a 3953static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3954 int bl_idx,
3d6c9164 3955 u32 user_brightness)
4562236b 3956{
206bbafe 3957 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3958 struct dc_link *link;
3959 u32 brightness;
94562810 3960 bool rc;
4562236b 3961
7fd13bae
AD
3962 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3963 caps = dm->backlight_caps[bl_idx];
94562810 3964
7fd13bae 3965 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3966 /* update scratch register */
3967 if (bl_idx == 0)
3968 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3969 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3970 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3971
3d6c9164 3972 /* Change brightness based on AUX property */
118b4627 3973 if (caps.aux_support) {
7fd13bae
AD
3974 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3975 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3976 if (!rc)
3977 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3978 } else {
7fd13bae
AD
3979 rc = dc_link_set_backlight_level(link, brightness, 0);
3980 if (!rc)
3981 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3982 }
94562810 3983
4052287a
S
3984 if (rc)
3985 dm->actual_brightness[bl_idx] = user_brightness;
4562236b
HW
3986}
3987
3d6c9164 3988static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3989{
620a0d27 3990 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3991 int i;
3d6c9164 3992
7fd13bae
AD
3993 for (i = 0; i < dm->num_of_edps; i++) {
3994 if (bd == dm->backlight_dev[i])
3995 break;
3996 }
3997 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3998 i = 0;
3999 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
4000
4001 return 0;
4002}
4003
7fd13bae
AD
4004static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4005 int bl_idx)
3d6c9164 4006{
0ad3e64e 4007 struct amdgpu_dm_backlight_caps caps;
7fd13bae 4008 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 4009
7fd13bae
AD
4010 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4011 caps = dm->backlight_caps[bl_idx];
620a0d27 4012
0ad3e64e 4013 if (caps.aux_support) {
0ad3e64e
AD
4014 u32 avg, peak;
4015 bool rc;
4016
4017 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4018 if (!rc)
7fd13bae 4019 return dm->brightness[bl_idx];
0ad3e64e
AD
4020 return convert_brightness_to_user(&caps, avg);
4021 } else {
7fd13bae 4022 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
4023
4024 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 4025 return dm->brightness[bl_idx];
0ad3e64e
AD
4026 return convert_brightness_to_user(&caps, ret);
4027 }
4562236b
HW
4028}
4029
3d6c9164
AD
4030static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4031{
4032 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4033 int i;
3d6c9164 4034
7fd13bae
AD
4035 for (i = 0; i < dm->num_of_edps; i++) {
4036 if (bd == dm->backlight_dev[i])
4037 break;
4038 }
4039 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4040 i = 0;
4041 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4042}
4043
4562236b 4044static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4045 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4046 .get_brightness = amdgpu_dm_backlight_get_brightness,
4047 .update_status = amdgpu_dm_backlight_update_status,
4048};
4049
7578ecda
AD
4050static void
4051amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4052{
4053 char bl_name[16];
4054 struct backlight_properties props = { 0 };
4055
7fd13bae
AD
4056 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4057 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4058
4562236b 4059 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4060 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4061 props.type = BACKLIGHT_RAW;
4062
4063 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4064 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4065
7fd13bae
AD
4066 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4067 adev_to_drm(dm->adev)->dev,
4068 dm,
4069 &amdgpu_dm_backlight_ops,
4070 &props);
4562236b 4071
7fd13bae 4072 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4073 DRM_ERROR("DM: Backlight registration failed!\n");
4074 else
f1ad2f5e 4075 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4076}
4562236b
HW
4077#endif
4078
df534fff 4079static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4080 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4081 enum drm_plane_type plane_type,
4082 const struct dc_plane_cap *plane_cap)
df534fff 4083{
f180b4bc 4084 struct drm_plane *plane;
df534fff
S
4085 unsigned long possible_crtcs;
4086 int ret = 0;
4087
f180b4bc 4088 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4089 if (!plane) {
4090 DRM_ERROR("KMS: Failed to allocate plane\n");
4091 return -ENOMEM;
4092 }
b2fddb13 4093 plane->type = plane_type;
df534fff
S
4094
4095 /*
b2fddb13
NK
4096 * HACK: IGT tests expect that the primary plane for a CRTC
4097 * can only have one possible CRTC. Only expose support for
4098 * any CRTC if they're not going to be used as a primary plane
4099 * for a CRTC - like overlay or underlay planes.
df534fff
S
4100 */
4101 possible_crtcs = 1 << plane_id;
4102 if (plane_id >= dm->dc->caps.max_streams)
4103 possible_crtcs = 0xff;
4104
cc1fec57 4105 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4106
4107 if (ret) {
4108 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4109 kfree(plane);
df534fff
S
4110 return ret;
4111 }
4112
54087768
NK
4113 if (mode_info)
4114 mode_info->planes[plane_id] = plane;
4115
df534fff
S
4116 return ret;
4117}
4118
89fc8d4e
HW
4119
4120static void register_backlight_device(struct amdgpu_display_manager *dm,
4121 struct dc_link *link)
4122{
4123#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4124 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4125
4126 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4127 link->type != dc_connection_none) {
1f6010a9
DF
4128 /*
4129 * Event if registration failed, we should continue with
89fc8d4e
HW
4130 * DM initialization because not having a backlight control
4131 * is better then a black screen.
4132 */
7fd13bae 4133 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4134 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4135
7fd13bae 4136 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4137 dm->backlight_link[dm->num_of_edps] = link;
4138 dm->num_of_edps++;
4139 }
89fc8d4e
HW
4140 }
4141#endif
4142}
4143
4144
1f6010a9
DF
4145/*
4146 * In this architecture, the association
4562236b
HW
4147 * connector -> encoder -> crtc
4148 * id not really requried. The crtc and connector will hold the
4149 * display_index as an abstraction to use with DAL component
4150 *
4151 * Returns 0 on success
4152 */
7578ecda 4153static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4154{
4155 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4156 int32_t i;
c84dec2f 4157 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4158 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4159 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4160 uint32_t link_cnt;
cc1fec57 4161 int32_t primary_planes;
fbbdadf2 4162 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4163 const struct dc_plane_cap *plane;
9470620e 4164 bool psr_feature_enabled = false;
4562236b 4165
d58159de
AD
4166 dm->display_indexes_num = dm->dc->caps.max_streams;
4167 /* Update the actual used number of crtc */
4168 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4169
4562236b 4170 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4171 if (amdgpu_dm_mode_config_init(dm->adev)) {
4172 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4173 return -EINVAL;
4562236b
HW
4174 }
4175
b2fddb13
NK
4176 /* There is one primary plane per CRTC */
4177 primary_planes = dm->dc->caps.max_streams;
54087768 4178 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4179
b2fddb13
NK
4180 /*
4181 * Initialize primary planes, implicit planes for legacy IOCTLS.
4182 * Order is reversed to match iteration order in atomic check.
4183 */
4184 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4185 plane = &dm->dc->caps.planes[i];
4186
b2fddb13 4187 if (initialize_plane(dm, mode_info, i,
cc1fec57 4188 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4189 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4190 goto fail;
d4e13b0d 4191 }
df534fff 4192 }
92f3ac40 4193
0d579c7e
NK
4194 /*
4195 * Initialize overlay planes, index starting after primary planes.
4196 * These planes have a higher DRM index than the primary planes since
4197 * they should be considered as having a higher z-order.
4198 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4199 *
4200 * Only support DCN for now, and only expose one so we don't encourage
4201 * userspace to use up all the pipes.
0d579c7e 4202 */
cc1fec57
NK
4203 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4204 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4205
4206 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4207 continue;
4208
4209 if (!plane->blends_with_above || !plane->blends_with_below)
4210 continue;
4211
ea36ad34 4212 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4213 continue;
4214
54087768 4215 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4216 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4217 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4218 goto fail;
d4e13b0d 4219 }
cc1fec57
NK
4220
4221 /* Only create one overlay plane. */
4222 break;
d4e13b0d 4223 }
4562236b 4224
d4e13b0d 4225 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4226 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4227 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4228 goto fail;
4562236b 4229 }
4562236b 4230
81927e28 4231 /* Use Outbox interrupt */
1d789535 4232 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4233 case IP_VERSION(3, 0, 0):
4234 case IP_VERSION(3, 1, 2):
4235 case IP_VERSION(3, 1, 3):
b5b8ed44 4236 case IP_VERSION(3, 1, 5):
de7cc1b4 4237 case IP_VERSION(3, 1, 6):
c08182f2 4238 case IP_VERSION(2, 1, 0):
81927e28
JS
4239 if (register_outbox_irq_handlers(dm->adev)) {
4240 DRM_ERROR("DM: Failed to initialize IRQ\n");
4241 goto fail;
4242 }
4243 break;
4244 default:
c08182f2 4245 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4246 adev->ip_versions[DCE_HWIP][0]);
81927e28 4247 }
9470620e
NK
4248
4249 /* Determine whether to enable PSR support by default. */
4250 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4251 switch (adev->ip_versions[DCE_HWIP][0]) {
4252 case IP_VERSION(3, 1, 2):
4253 case IP_VERSION(3, 1, 3):
b5b8ed44 4254 case IP_VERSION(3, 1, 5):
de7cc1b4 4255 case IP_VERSION(3, 1, 6):
9470620e
NK
4256 psr_feature_enabled = true;
4257 break;
4258 default:
4259 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4260 break;
4261 }
4262 }
81927e28 4263
fdda8f34
MD
4264 /* Disable vblank IRQs aggressively for power-saving. */
4265 adev_to_drm(adev)->vblank_disable_immediate = true;
4266
4562236b
HW
4267 /* loops over all connectors on the board */
4268 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4269 struct dc_link *link = NULL;
4562236b
HW
4270
4271 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4272 DRM_ERROR(
4273 "KMS: Cannot support more than %d display indexes\n",
4274 AMDGPU_DM_MAX_DISPLAY_INDEX);
4275 continue;
4276 }
4277
4278 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4279 if (!aconnector)
cd8a2ae8 4280 goto fail;
4562236b
HW
4281
4282 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4283 if (!aencoder)
cd8a2ae8 4284 goto fail;
4562236b
HW
4285
4286 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4287 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4288 goto fail;
4562236b
HW
4289 }
4290
4291 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4292 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4293 goto fail;
4562236b
HW
4294 }
4295
89fc8d4e
HW
4296 link = dc_get_link_at_index(dm->dc, i);
4297
fbbdadf2
BL
4298 if (!dc_link_detect_sink(link, &new_connection_type))
4299 DRM_ERROR("KMS: Failed to detect connector\n");
4300
4301 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4302 emulated_link_detect(link);
4303 amdgpu_dm_update_connector_after_detect(aconnector);
4304
4305 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4306 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4307 register_backlight_device(dm, link);
dab60582
RL
4308 if (dm->num_of_edps)
4309 update_connector_ext_caps(aconnector);
9470620e 4310 if (psr_feature_enabled)
397a9bc5 4311 amdgpu_dm_set_psr_caps(link);
fdda8f34
MD
4312
4313 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4314 * PSR is also supported.
4315 */
4316 if (link->psr_settings.psr_feature_enabled)
4317 adev_to_drm(adev)->vblank_disable_immediate = false;
89fc8d4e
HW
4318 }
4319
4320
4562236b
HW
4321 }
4322
4323 /* Software is initialized. Now we can register interrupt handlers. */
4324 switch (adev->asic_type) {
55e56389
MR
4325#if defined(CONFIG_DRM_AMD_DC_SI)
4326 case CHIP_TAHITI:
4327 case CHIP_PITCAIRN:
4328 case CHIP_VERDE:
4329 case CHIP_OLAND:
4330 if (dce60_register_irq_handlers(dm->adev)) {
4331 DRM_ERROR("DM: Failed to initialize IRQ\n");
4332 goto fail;
4333 }
4334 break;
4335#endif
4562236b
HW
4336 case CHIP_BONAIRE:
4337 case CHIP_HAWAII:
cd4b356f
AD
4338 case CHIP_KAVERI:
4339 case CHIP_KABINI:
4340 case CHIP_MULLINS:
4562236b
HW
4341 case CHIP_TONGA:
4342 case CHIP_FIJI:
4343 case CHIP_CARRIZO:
4344 case CHIP_STONEY:
4345 case CHIP_POLARIS11:
4346 case CHIP_POLARIS10:
b264d345 4347 case CHIP_POLARIS12:
7737de91 4348 case CHIP_VEGAM:
2c8ad2d5 4349 case CHIP_VEGA10:
2325ff30 4350 case CHIP_VEGA12:
1fe6bf2f 4351 case CHIP_VEGA20:
4562236b
HW
4352 if (dce110_register_irq_handlers(dm->adev)) {
4353 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4354 goto fail;
4562236b
HW
4355 }
4356 break;
4357 default:
1d789535 4358 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4359 case IP_VERSION(1, 0, 0):
4360 case IP_VERSION(1, 0, 1):
c08182f2
AD
4361 case IP_VERSION(2, 0, 2):
4362 case IP_VERSION(2, 0, 3):
4363 case IP_VERSION(2, 0, 0):
4364 case IP_VERSION(2, 1, 0):
4365 case IP_VERSION(3, 0, 0):
4366 case IP_VERSION(3, 0, 2):
4367 case IP_VERSION(3, 0, 3):
4368 case IP_VERSION(3, 0, 1):
4369 case IP_VERSION(3, 1, 2):
4370 case IP_VERSION(3, 1, 3):
b5b8ed44 4371 case IP_VERSION(3, 1, 5):
de7cc1b4 4372 case IP_VERSION(3, 1, 6):
c08182f2
AD
4373 if (dcn10_register_irq_handlers(dm->adev)) {
4374 DRM_ERROR("DM: Failed to initialize IRQ\n");
4375 goto fail;
4376 }
4377 break;
4378 default:
2cbc6f42 4379 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4380 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4381 goto fail;
c08182f2 4382 }
2cbc6f42 4383 break;
4562236b
HW
4384 }
4385
4562236b 4386 return 0;
cd8a2ae8 4387fail:
4562236b 4388 kfree(aencoder);
4562236b 4389 kfree(aconnector);
54087768 4390
59d0f396 4391 return -EINVAL;
4562236b
HW
4392}
4393
7578ecda 4394static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4395{
eb3dc897 4396 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4397 return;
4398}
4399
4400/******************************************************************************
4401 * amdgpu_display_funcs functions
4402 *****************************************************************************/
4403
1f6010a9 4404/*
4562236b
HW
4405 * dm_bandwidth_update - program display watermarks
4406 *
4407 * @adev: amdgpu_device pointer
4408 *
4409 * Calculate and program the display watermarks and line buffer allocation.
4410 */
4411static void dm_bandwidth_update(struct amdgpu_device *adev)
4412{
49c07a99 4413 /* TODO: implement later */
4562236b
HW
4414}
4415
39cc5be2 4416static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4417 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4418 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4419 .backlight_set_level = NULL, /* never called for DC */
4420 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4421 .hpd_sense = NULL,/* called unconditionally */
4422 .hpd_set_polarity = NULL, /* called unconditionally */
4423 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4424 .page_flip_get_scanoutpos =
4425 dm_crtc_get_scanoutpos,/* called unconditionally */
4426 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4427 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4428};
4429
4430#if defined(CONFIG_DEBUG_KERNEL_DC)
4431
3ee6b26b
AD
4432static ssize_t s3_debug_store(struct device *device,
4433 struct device_attribute *attr,
4434 const char *buf,
4435 size_t count)
4562236b
HW
4436{
4437 int ret;
4438 int s3_state;
ef1de361 4439 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4440 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4441
4442 ret = kstrtoint(buf, 0, &s3_state);
4443
4444 if (ret == 0) {
4445 if (s3_state) {
4446 dm_resume(adev);
4a580877 4447 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4448 } else
4449 dm_suspend(adev);
4450 }
4451
4452 return ret == 0 ? count : 0;
4453}
4454
4455DEVICE_ATTR_WO(s3_debug);
4456
4457#endif
4458
4459static int dm_early_init(void *handle)
4460{
4461 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4462
4562236b 4463 switch (adev->asic_type) {
55e56389
MR
4464#if defined(CONFIG_DRM_AMD_DC_SI)
4465 case CHIP_TAHITI:
4466 case CHIP_PITCAIRN:
4467 case CHIP_VERDE:
4468 adev->mode_info.num_crtc = 6;
4469 adev->mode_info.num_hpd = 6;
4470 adev->mode_info.num_dig = 6;
4471 break;
4472 case CHIP_OLAND:
4473 adev->mode_info.num_crtc = 2;
4474 adev->mode_info.num_hpd = 2;
4475 adev->mode_info.num_dig = 2;
4476 break;
4477#endif
4562236b
HW
4478 case CHIP_BONAIRE:
4479 case CHIP_HAWAII:
4480 adev->mode_info.num_crtc = 6;
4481 adev->mode_info.num_hpd = 6;
4482 adev->mode_info.num_dig = 6;
4562236b 4483 break;
cd4b356f
AD
4484 case CHIP_KAVERI:
4485 adev->mode_info.num_crtc = 4;
4486 adev->mode_info.num_hpd = 6;
4487 adev->mode_info.num_dig = 7;
cd4b356f
AD
4488 break;
4489 case CHIP_KABINI:
4490 case CHIP_MULLINS:
4491 adev->mode_info.num_crtc = 2;
4492 adev->mode_info.num_hpd = 6;
4493 adev->mode_info.num_dig = 6;
cd4b356f 4494 break;
4562236b
HW
4495 case CHIP_FIJI:
4496 case CHIP_TONGA:
4497 adev->mode_info.num_crtc = 6;
4498 adev->mode_info.num_hpd = 6;
4499 adev->mode_info.num_dig = 7;
4562236b
HW
4500 break;
4501 case CHIP_CARRIZO:
4502 adev->mode_info.num_crtc = 3;
4503 adev->mode_info.num_hpd = 6;
4504 adev->mode_info.num_dig = 9;
4562236b
HW
4505 break;
4506 case CHIP_STONEY:
4507 adev->mode_info.num_crtc = 2;
4508 adev->mode_info.num_hpd = 6;
4509 adev->mode_info.num_dig = 9;
4562236b
HW
4510 break;
4511 case CHIP_POLARIS11:
b264d345 4512 case CHIP_POLARIS12:
4562236b
HW
4513 adev->mode_info.num_crtc = 5;
4514 adev->mode_info.num_hpd = 5;
4515 adev->mode_info.num_dig = 5;
4562236b
HW
4516 break;
4517 case CHIP_POLARIS10:
7737de91 4518 case CHIP_VEGAM:
4562236b
HW
4519 adev->mode_info.num_crtc = 6;
4520 adev->mode_info.num_hpd = 6;
4521 adev->mode_info.num_dig = 6;
4562236b 4522 break;
2c8ad2d5 4523 case CHIP_VEGA10:
2325ff30 4524 case CHIP_VEGA12:
1fe6bf2f 4525 case CHIP_VEGA20:
2c8ad2d5
AD
4526 adev->mode_info.num_crtc = 6;
4527 adev->mode_info.num_hpd = 6;
4528 adev->mode_info.num_dig = 6;
4529 break;
4562236b 4530 default:
cae5c1ab 4531
1d789535 4532 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4533 case IP_VERSION(2, 0, 2):
4534 case IP_VERSION(3, 0, 0):
4535 adev->mode_info.num_crtc = 6;
4536 adev->mode_info.num_hpd = 6;
4537 adev->mode_info.num_dig = 6;
4538 break;
4539 case IP_VERSION(2, 0, 0):
4540 case IP_VERSION(3, 0, 2):
4541 adev->mode_info.num_crtc = 5;
4542 adev->mode_info.num_hpd = 5;
4543 adev->mode_info.num_dig = 5;
4544 break;
4545 case IP_VERSION(2, 0, 3):
4546 case IP_VERSION(3, 0, 3):
4547 adev->mode_info.num_crtc = 2;
4548 adev->mode_info.num_hpd = 2;
4549 adev->mode_info.num_dig = 2;
4550 break;
559f591d
AD
4551 case IP_VERSION(1, 0, 0):
4552 case IP_VERSION(1, 0, 1):
c08182f2
AD
4553 case IP_VERSION(3, 0, 1):
4554 case IP_VERSION(2, 1, 0):
4555 case IP_VERSION(3, 1, 2):
4556 case IP_VERSION(3, 1, 3):
b5b8ed44 4557 case IP_VERSION(3, 1, 5):
de7cc1b4 4558 case IP_VERSION(3, 1, 6):
c08182f2
AD
4559 adev->mode_info.num_crtc = 4;
4560 adev->mode_info.num_hpd = 4;
4561 adev->mode_info.num_dig = 4;
4562 break;
4563 default:
2cbc6f42 4564 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4565 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4566 return -EINVAL;
c08182f2 4567 }
2cbc6f42 4568 break;
4562236b
HW
4569 }
4570
c8dd5715
MD
4571 amdgpu_dm_set_irq_funcs(adev);
4572
39cc5be2
AD
4573 if (adev->mode_info.funcs == NULL)
4574 adev->mode_info.funcs = &dm_display_funcs;
4575
1f6010a9
DF
4576 /*
4577 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4578 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4579 * amdgpu_device_init()
4580 */
4562236b
HW
4581#if defined(CONFIG_DEBUG_KERNEL_DC)
4582 device_create_file(
4a580877 4583 adev_to_drm(adev)->dev,
4562236b
HW
4584 &dev_attr_s3_debug);
4585#endif
4586
4587 return 0;
4588}
4589
9b690ef3 4590static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4591 struct dc_stream_state *new_stream,
4592 struct dc_stream_state *old_stream)
9b690ef3 4593{
2afda735 4594 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4595}
4596
4597static bool modereset_required(struct drm_crtc_state *crtc_state)
4598{
2afda735 4599 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4600}
4601
7578ecda 4602static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4603{
4604 drm_encoder_cleanup(encoder);
4605 kfree(encoder);
4606}
4607
4608static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4609 .destroy = amdgpu_dm_encoder_destroy,
4610};
4611
e7b07cee 4612
6300b3bd
MK
4613static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4614 struct drm_framebuffer *fb,
4615 int *min_downscale, int *max_upscale)
4616{
4617 struct amdgpu_device *adev = drm_to_adev(dev);
4618 struct dc *dc = adev->dm.dc;
4619 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4620 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4621
4622 switch (fb->format->format) {
4623 case DRM_FORMAT_P010:
4624 case DRM_FORMAT_NV12:
4625 case DRM_FORMAT_NV21:
4626 *max_upscale = plane_cap->max_upscale_factor.nv12;
4627 *min_downscale = plane_cap->max_downscale_factor.nv12;
4628 break;
4629
4630 case DRM_FORMAT_XRGB16161616F:
4631 case DRM_FORMAT_ARGB16161616F:
4632 case DRM_FORMAT_XBGR16161616F:
4633 case DRM_FORMAT_ABGR16161616F:
4634 *max_upscale = plane_cap->max_upscale_factor.fp16;
4635 *min_downscale = plane_cap->max_downscale_factor.fp16;
4636 break;
4637
4638 default:
4639 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4640 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4641 break;
4642 }
4643
4644 /*
4645 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4646 * scaling factor of 1.0 == 1000 units.
4647 */
4648 if (*max_upscale == 1)
4649 *max_upscale = 1000;
4650
4651 if (*min_downscale == 1)
4652 *min_downscale = 1000;
4653}
4654
4655
4375d625
S
4656static int fill_dc_scaling_info(struct amdgpu_device *adev,
4657 const struct drm_plane_state *state,
695af5f9 4658 struct dc_scaling_info *scaling_info)
e7b07cee 4659{
6300b3bd 4660 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4661
695af5f9 4662 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4663
695af5f9
NK
4664 /* Source is fixed 16.16 but we ignore mantissa for now... */
4665 scaling_info->src_rect.x = state->src_x >> 16;
4666 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4667
d89f6048
HW
4668 /*
4669 * For reasons we don't (yet) fully understand a non-zero
4670 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4671 * system hang on DCN1x.
4672 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4673 * let's reject both non-zero src_x and src_y.
4674 *
4675 * We currently know of only one use-case to reproduce a
4676 * scenario with non-zero src_x and src_y for NV12, which
4677 * is to gesture the YouTube Android app into full screen
4678 * on ChromeOS.
4679 */
4375d625
S
4680 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4681 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4682 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4683 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4684 return -EINVAL;
4685
695af5f9
NK
4686 scaling_info->src_rect.width = state->src_w >> 16;
4687 if (scaling_info->src_rect.width == 0)
4688 return -EINVAL;
4689
4690 scaling_info->src_rect.height = state->src_h >> 16;
4691 if (scaling_info->src_rect.height == 0)
4692 return -EINVAL;
4693
4694 scaling_info->dst_rect.x = state->crtc_x;
4695 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4696
4697 if (state->crtc_w == 0)
695af5f9 4698 return -EINVAL;
e7b07cee 4699
695af5f9 4700 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4701
4702 if (state->crtc_h == 0)
695af5f9 4703 return -EINVAL;
e7b07cee 4704
695af5f9 4705 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4706
695af5f9
NK
4707 /* DRM doesn't specify clipping on destination output. */
4708 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4709
6300b3bd
MK
4710 /* Validate scaling per-format with DC plane caps */
4711 if (state->plane && state->plane->dev && state->fb) {
4712 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4713 &min_downscale, &max_upscale);
4714 } else {
4715 min_downscale = 250;
4716 max_upscale = 16000;
4717 }
4718
6491f0c0
NK
4719 scale_w = scaling_info->dst_rect.width * 1000 /
4720 scaling_info->src_rect.width;
e7b07cee 4721
6300b3bd 4722 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4723 return -EINVAL;
4724
4725 scale_h = scaling_info->dst_rect.height * 1000 /
4726 scaling_info->src_rect.height;
4727
6300b3bd 4728 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4729 return -EINVAL;
4730
695af5f9
NK
4731 /*
4732 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4733 * assume reasonable defaults based on the format.
4734 */
e7b07cee 4735
695af5f9 4736 return 0;
4562236b 4737}
695af5f9 4738
a3241991
BN
4739static void
4740fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4741 uint64_t tiling_flags)
e7b07cee 4742{
a3241991
BN
4743 /* Fill GFX8 params */
4744 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4745 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4746
a3241991
BN
4747 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4748 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4749 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4750 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4751 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4752
a3241991
BN
4753 /* XXX fix me for VI */
4754 tiling_info->gfx8.num_banks = num_banks;
4755 tiling_info->gfx8.array_mode =
4756 DC_ARRAY_2D_TILED_THIN1;
4757 tiling_info->gfx8.tile_split = tile_split;
4758 tiling_info->gfx8.bank_width = bankw;
4759 tiling_info->gfx8.bank_height = bankh;
4760 tiling_info->gfx8.tile_aspect = mtaspect;
4761 tiling_info->gfx8.tile_mode =
4762 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4763 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4764 == DC_ARRAY_1D_TILED_THIN1) {
4765 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4766 }
4767
a3241991
BN
4768 tiling_info->gfx8.pipe_config =
4769 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4770}
4771
a3241991
BN
4772static void
4773fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4774 union dc_tiling_info *tiling_info)
4775{
4776 tiling_info->gfx9.num_pipes =
4777 adev->gfx.config.gb_addr_config_fields.num_pipes;
4778 tiling_info->gfx9.num_banks =
4779 adev->gfx.config.gb_addr_config_fields.num_banks;
4780 tiling_info->gfx9.pipe_interleave =
4781 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4782 tiling_info->gfx9.num_shader_engines =
4783 adev->gfx.config.gb_addr_config_fields.num_se;
4784 tiling_info->gfx9.max_compressed_frags =
4785 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4786 tiling_info->gfx9.num_rb_per_se =
4787 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4788 tiling_info->gfx9.shaderEnable = 1;
1d789535 4789 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4790 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4791}
4792
695af5f9 4793static int
a3241991
BN
4794validate_dcc(struct amdgpu_device *adev,
4795 const enum surface_pixel_format format,
4796 const enum dc_rotation_angle rotation,
4797 const union dc_tiling_info *tiling_info,
4798 const struct dc_plane_dcc_param *dcc,
4799 const struct dc_plane_address *address,
4800 const struct plane_size *plane_size)
7df7e505
NK
4801{
4802 struct dc *dc = adev->dm.dc;
8daa1218
NC
4803 struct dc_dcc_surface_param input;
4804 struct dc_surface_dcc_cap output;
7df7e505 4805
8daa1218
NC
4806 memset(&input, 0, sizeof(input));
4807 memset(&output, 0, sizeof(output));
4808
a3241991 4809 if (!dcc->enable)
87b7ebc2
RS
4810 return 0;
4811
a3241991
BN
4812 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4813 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4814 return -EINVAL;
7df7e505 4815
695af5f9 4816 input.format = format;
12e2b2d4
DL
4817 input.surface_size.width = plane_size->surface_size.width;
4818 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4819 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4820
695af5f9 4821 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4822 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4823 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4824 input.scan = SCAN_DIRECTION_VERTICAL;
4825
4826 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4827 return -EINVAL;
7df7e505
NK
4828
4829 if (!output.capable)
09e5665a 4830 return -EINVAL;
7df7e505 4831
a3241991
BN
4832 if (dcc->independent_64b_blks == 0 &&
4833 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4834 return -EINVAL;
7df7e505 4835
a3241991
BN
4836 return 0;
4837}
4838
37384b3f
BN
4839static bool
4840modifier_has_dcc(uint64_t modifier)
4841{
4842 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4843}
4844
4845static unsigned
4846modifier_gfx9_swizzle_mode(uint64_t modifier)
4847{
4848 if (modifier == DRM_FORMAT_MOD_LINEAR)
4849 return 0;
4850
4851 return AMD_FMT_MOD_GET(TILE, modifier);
4852}
4853
dfbbfe3c
BN
4854static const struct drm_format_info *
4855amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4856{
816853f9 4857 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4858}
4859
37384b3f
BN
4860static void
4861fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4862 union dc_tiling_info *tiling_info,
4863 uint64_t modifier)
4864{
4865 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4866 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4867 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4868 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4869
4870 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4871
4872 if (!IS_AMD_FMT_MOD(modifier))
4873 return;
4874
4875 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4876 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4877
4878 if (adev->family >= AMDGPU_FAMILY_NV) {
4879 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4880 } else {
4881 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4882
4883 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4884 }
4885}
4886
faa37f54
BN
4887enum dm_micro_swizzle {
4888 MICRO_SWIZZLE_Z = 0,
4889 MICRO_SWIZZLE_S = 1,
4890 MICRO_SWIZZLE_D = 2,
4891 MICRO_SWIZZLE_R = 3
4892};
4893
4894static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4895 uint32_t format,
4896 uint64_t modifier)
4897{
4898 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4899 const struct drm_format_info *info = drm_format_info(format);
fe180178 4900 int i;
faa37f54
BN
4901
4902 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4903
4904 if (!info)
4905 return false;
4906
4907 /*
fe180178
QZ
4908 * We always have to allow these modifiers:
4909 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4910 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4911 */
fe180178
QZ
4912 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4913 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4914 return true;
fe180178 4915 }
faa37f54 4916
fe180178
QZ
4917 /* Check that the modifier is on the list of the plane's supported modifiers. */
4918 for (i = 0; i < plane->modifier_count; i++) {
4919 if (modifier == plane->modifiers[i])
4920 break;
4921 }
4922 if (i == plane->modifier_count)
faa37f54
BN
4923 return false;
4924
4925 /*
4926 * For D swizzle the canonical modifier depends on the bpp, so check
4927 * it here.
4928 */
4929 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4930 adev->family >= AMDGPU_FAMILY_NV) {
4931 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4932 return false;
4933 }
4934
4935 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4936 info->cpp[0] < 8)
4937 return false;
4938
4939 if (modifier_has_dcc(modifier)) {
4940 /* Per radeonsi comments 16/64 bpp are more complicated. */
4941 if (info->cpp[0] != 4)
4942 return false;
951796f2
SS
4943 /* We support multi-planar formats, but not when combined with
4944 * additional DCC metadata planes. */
4945 if (info->num_planes > 1)
4946 return false;
faa37f54
BN
4947 }
4948
4949 return true;
4950}
4951
4952static void
4953add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4954{
4955 if (!*mods)
4956 return;
4957
4958 if (*cap - *size < 1) {
4959 uint64_t new_cap = *cap * 2;
4960 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4961
4962 if (!new_mods) {
4963 kfree(*mods);
4964 *mods = NULL;
4965 return;
4966 }
4967
4968 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4969 kfree(*mods);
4970 *mods = new_mods;
4971 *cap = new_cap;
4972 }
4973
4974 (*mods)[*size] = mod;
4975 *size += 1;
4976}
4977
4978static void
4979add_gfx9_modifiers(const struct amdgpu_device *adev,
4980 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4981{
4982 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4983 int pipe_xor_bits = min(8, pipes +
4984 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4985 int bank_xor_bits = min(8 - pipe_xor_bits,
4986 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4987 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4988 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4989
4990
4991 if (adev->family == AMDGPU_FAMILY_RV) {
4992 /* Raven2 and later */
4993 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4994
4995 /*
4996 * No _D DCC swizzles yet because we only allow 32bpp, which
4997 * doesn't support _D on DCN
4998 */
4999
5000 if (has_constant_encode) {
5001 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5002 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5003 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5004 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5005 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5006 AMD_FMT_MOD_SET(DCC, 1) |
5007 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5009 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5010 }
5011
5012 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5013 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5014 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5015 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5016 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5017 AMD_FMT_MOD_SET(DCC, 1) |
5018 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5019 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5020 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5021
5022 if (has_constant_encode) {
5023 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5024 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5025 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5026 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5027 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5028 AMD_FMT_MOD_SET(DCC, 1) |
5029 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5030 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5031 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5032
5033 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5034 AMD_FMT_MOD_SET(RB, rb) |
5035 AMD_FMT_MOD_SET(PIPE, pipes));
5036 }
5037
5038 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5040 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5041 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5042 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5043 AMD_FMT_MOD_SET(DCC, 1) |
5044 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5045 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5046 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5047 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5048 AMD_FMT_MOD_SET(RB, rb) |
5049 AMD_FMT_MOD_SET(PIPE, pipes));
5050 }
5051
5052 /*
5053 * Only supported for 64bpp on Raven, will be filtered on format in
5054 * dm_plane_format_mod_supported.
5055 */
5056 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5058 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5059 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5061
5062 if (adev->family == AMDGPU_FAMILY_RV) {
5063 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5066 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5067 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5068 }
5069
5070 /*
5071 * Only supported for 64bpp on Raven, will be filtered on format in
5072 * dm_plane_format_mod_supported.
5073 */
5074 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077
5078 if (adev->family == AMDGPU_FAMILY_RV) {
5079 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082 }
5083}
5084
5085static void
5086add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5087 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5088{
5089 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5090
5091 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5094 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 AMD_FMT_MOD_SET(DCC, 1) |
5096 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5097 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5098 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5099
5100 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5101 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5102 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5103 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5104 AMD_FMT_MOD_SET(DCC, 1) |
5105 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5106 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5107 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5108 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5109
5110 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5111 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5112 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5113 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5114
5115 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5117 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5118 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5119
5120
5121 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5122 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5124 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5125
5126 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5128 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5129}
5130
5131static void
5132add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5133 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5134{
5135 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5136 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5137
5138 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5140 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5141 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5142 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5143 AMD_FMT_MOD_SET(DCC, 1) |
5144 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5145 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5146 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5147 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5148
7f6ab50a
JA
5149 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5150 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5151 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5152 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5153 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5154 AMD_FMT_MOD_SET(DCC, 1) |
5155 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5156 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5157 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5158
faa37f54
BN
5159 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5160 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5161 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5162 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5163 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5164 AMD_FMT_MOD_SET(DCC, 1) |
5165 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5166 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5167 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5168 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5169 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5170
7f6ab50a
JA
5171 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5172 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5173 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5174 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5175 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5176 AMD_FMT_MOD_SET(DCC, 1) |
5177 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5178 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5179 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5180 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5181
faa37f54
BN
5182 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5184 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5185 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5186 AMD_FMT_MOD_SET(PACKERS, pkrs));
5187
5188 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5190 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5191 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5192 AMD_FMT_MOD_SET(PACKERS, pkrs));
5193
5194 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5195 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5196 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5197 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5198
5199 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5200 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5201 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5202}
5203
5204static int
5205get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5206{
5207 uint64_t size = 0, capacity = 128;
5208 *mods = NULL;
5209
5210 /* We have not hooked up any pre-GFX9 modifiers. */
5211 if (adev->family < AMDGPU_FAMILY_AI)
5212 return 0;
5213
5214 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5215
5216 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5217 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5218 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5219 return *mods ? 0 : -ENOMEM;
5220 }
5221
5222 switch (adev->family) {
5223 case AMDGPU_FAMILY_AI:
5224 case AMDGPU_FAMILY_RV:
5225 add_gfx9_modifiers(adev, mods, &size, &capacity);
5226 break;
5227 case AMDGPU_FAMILY_NV:
5228 case AMDGPU_FAMILY_VGH:
1ebcaebd 5229 case AMDGPU_FAMILY_YC:
b5b8ed44 5230 case AMDGPU_FAMILY_GC_10_3_6:
de7cc1b4 5231 case AMDGPU_FAMILY_GC_10_3_7:
1d789535 5232 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5233 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5234 else
5235 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5236 break;
5237 }
5238
5239 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5240
5241 /* INVALID marks the end of the list. */
5242 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5243
5244 if (!*mods)
5245 return -ENOMEM;
5246
5247 return 0;
5248}
5249
37384b3f
BN
5250static int
5251fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5252 const struct amdgpu_framebuffer *afb,
5253 const enum surface_pixel_format format,
5254 const enum dc_rotation_angle rotation,
5255 const struct plane_size *plane_size,
5256 union dc_tiling_info *tiling_info,
5257 struct dc_plane_dcc_param *dcc,
5258 struct dc_plane_address *address,
5259 const bool force_disable_dcc)
5260{
5261 const uint64_t modifier = afb->base.modifier;
2be7f77f 5262 int ret = 0;
37384b3f
BN
5263
5264 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5265 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5266
5267 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5268 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5269 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5270 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5271
5272 dcc->enable = 1;
5273 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5274 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5275 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5276 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5277 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5278 else if (independent_128b_blks)
5279 dcc->dcc_ind_blk = hubp_ind_block_128b;
5280 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5281 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5282 else
5283 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5284 } else {
5285 if (independent_64b_blks)
5286 dcc->dcc_ind_blk = hubp_ind_block_64b;
5287 else
5288 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5289 }
37384b3f
BN
5290
5291 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5292 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5293 }
5294
5295 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5296 if (ret)
2be7f77f 5297 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5298
2be7f77f 5299 return ret;
09e5665a
NK
5300}
5301
5302static int
320932bf 5303fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5304 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5305 const enum surface_pixel_format format,
5306 const enum dc_rotation_angle rotation,
5307 const uint64_t tiling_flags,
09e5665a 5308 union dc_tiling_info *tiling_info,
12e2b2d4 5309 struct plane_size *plane_size,
09e5665a 5310 struct dc_plane_dcc_param *dcc,
87b7ebc2 5311 struct dc_plane_address *address,
5888f07a 5312 bool tmz_surface,
87b7ebc2 5313 bool force_disable_dcc)
09e5665a 5314{
320932bf 5315 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5316 int ret;
5317
5318 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5319 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5320 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5321 memset(address, 0, sizeof(*address));
5322
5888f07a
HW
5323 address->tmz_surface = tmz_surface;
5324
695af5f9 5325 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5326 uint64_t addr = afb->address + fb->offsets[0];
5327
12e2b2d4
DL
5328 plane_size->surface_size.x = 0;
5329 plane_size->surface_size.y = 0;
5330 plane_size->surface_size.width = fb->width;
5331 plane_size->surface_size.height = fb->height;
5332 plane_size->surface_pitch =
320932bf
NK
5333 fb->pitches[0] / fb->format->cpp[0];
5334
e0634e8d 5335 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5336 address->grph.addr.low_part = lower_32_bits(addr);
5337 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5338 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5339 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5340 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5341
12e2b2d4
DL
5342 plane_size->surface_size.x = 0;
5343 plane_size->surface_size.y = 0;
5344 plane_size->surface_size.width = fb->width;
5345 plane_size->surface_size.height = fb->height;
5346 plane_size->surface_pitch =
320932bf
NK
5347 fb->pitches[0] / fb->format->cpp[0];
5348
12e2b2d4
DL
5349 plane_size->chroma_size.x = 0;
5350 plane_size->chroma_size.y = 0;
320932bf 5351 /* TODO: set these based on surface format */
12e2b2d4
DL
5352 plane_size->chroma_size.width = fb->width / 2;
5353 plane_size->chroma_size.height = fb->height / 2;
320932bf 5354
12e2b2d4 5355 plane_size->chroma_pitch =
320932bf
NK
5356 fb->pitches[1] / fb->format->cpp[1];
5357
e0634e8d
NK
5358 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5359 address->video_progressive.luma_addr.low_part =
be7b9b32 5360 lower_32_bits(luma_addr);
e0634e8d 5361 address->video_progressive.luma_addr.high_part =
be7b9b32 5362 upper_32_bits(luma_addr);
e0634e8d
NK
5363 address->video_progressive.chroma_addr.low_part =
5364 lower_32_bits(chroma_addr);
5365 address->video_progressive.chroma_addr.high_part =
5366 upper_32_bits(chroma_addr);
5367 }
09e5665a 5368
a3241991 5369 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5370 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5371 rotation, plane_size,
5372 tiling_info, dcc,
5373 address,
5374 force_disable_dcc);
09e5665a
NK
5375 if (ret)
5376 return ret;
a3241991
BN
5377 } else {
5378 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5379 }
5380
5381 return 0;
7df7e505
NK
5382}
5383
d74004b6 5384static void
695af5f9 5385fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
76818cdd
SJK
5386 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5387 bool *global_alpha, int *global_alpha_value)
d74004b6
NK
5388{
5389 *per_pixel_alpha = false;
76818cdd 5390 *pre_multiplied_alpha = true;
d74004b6
NK
5391 *global_alpha = false;
5392 *global_alpha_value = 0xff;
5393
5394 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5395 return;
5396
76818cdd
SJK
5397 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5398 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
d74004b6
NK
5399 static const uint32_t alpha_formats[] = {
5400 DRM_FORMAT_ARGB8888,
5401 DRM_FORMAT_RGBA8888,
5402 DRM_FORMAT_ABGR8888,
5403 };
5404 uint32_t format = plane_state->fb->format->format;
5405 unsigned int i;
5406
5407 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5408 if (format == alpha_formats[i]) {
5409 *per_pixel_alpha = true;
5410 break;
5411 }
5412 }
76818cdd
SJK
5413
5414 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5415 *pre_multiplied_alpha = false;
d74004b6
NK
5416 }
5417
5418 if (plane_state->alpha < 0xffff) {
5419 *global_alpha = true;
5420 *global_alpha_value = plane_state->alpha >> 8;
5421 }
5422}
5423
004fefa3
NK
5424static int
5425fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5426 const enum surface_pixel_format format,
004fefa3
NK
5427 enum dc_color_space *color_space)
5428{
5429 bool full_range;
5430
5431 *color_space = COLOR_SPACE_SRGB;
5432
5433 /* DRM color properties only affect non-RGB formats. */
695af5f9 5434 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5435 return 0;
5436
5437 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5438
5439 switch (plane_state->color_encoding) {
5440 case DRM_COLOR_YCBCR_BT601:
5441 if (full_range)
5442 *color_space = COLOR_SPACE_YCBCR601;
5443 else
5444 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5445 break;
5446
5447 case DRM_COLOR_YCBCR_BT709:
5448 if (full_range)
5449 *color_space = COLOR_SPACE_YCBCR709;
5450 else
5451 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5452 break;
5453
5454 case DRM_COLOR_YCBCR_BT2020:
5455 if (full_range)
5456 *color_space = COLOR_SPACE_2020_YCBCR;
5457 else
5458 return -EINVAL;
5459 break;
5460
5461 default:
5462 return -EINVAL;
5463 }
5464
5465 return 0;
5466}
5467
695af5f9
NK
5468static int
5469fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5470 const struct drm_plane_state *plane_state,
5471 const uint64_t tiling_flags,
5472 struct dc_plane_info *plane_info,
87b7ebc2 5473 struct dc_plane_address *address,
5888f07a 5474 bool tmz_surface,
87b7ebc2 5475 bool force_disable_dcc)
695af5f9
NK
5476{
5477 const struct drm_framebuffer *fb = plane_state->fb;
5478 const struct amdgpu_framebuffer *afb =
5479 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5480 int ret;
5481
5482 memset(plane_info, 0, sizeof(*plane_info));
5483
5484 switch (fb->format->format) {
5485 case DRM_FORMAT_C8:
5486 plane_info->format =
5487 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5488 break;
5489 case DRM_FORMAT_RGB565:
5490 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5491 break;
5492 case DRM_FORMAT_XRGB8888:
5493 case DRM_FORMAT_ARGB8888:
5494 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5495 break;
5496 case DRM_FORMAT_XRGB2101010:
5497 case DRM_FORMAT_ARGB2101010:
5498 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5499 break;
5500 case DRM_FORMAT_XBGR2101010:
5501 case DRM_FORMAT_ABGR2101010:
5502 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5503 break;
5504 case DRM_FORMAT_XBGR8888:
5505 case DRM_FORMAT_ABGR8888:
5506 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5507 break;
5508 case DRM_FORMAT_NV21:
5509 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5510 break;
5511 case DRM_FORMAT_NV12:
5512 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5513 break;
cbec6477
SW
5514 case DRM_FORMAT_P010:
5515 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5516 break;
492548dc
SW
5517 case DRM_FORMAT_XRGB16161616F:
5518 case DRM_FORMAT_ARGB16161616F:
5519 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5520 break;
2a5195dc
MK
5521 case DRM_FORMAT_XBGR16161616F:
5522 case DRM_FORMAT_ABGR16161616F:
5523 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5524 break;
58020403
MK
5525 case DRM_FORMAT_XRGB16161616:
5526 case DRM_FORMAT_ARGB16161616:
5527 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5528 break;
5529 case DRM_FORMAT_XBGR16161616:
5530 case DRM_FORMAT_ABGR16161616:
5531 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5532 break;
695af5f9
NK
5533 default:
5534 DRM_ERROR(
92f1d09c
SA
5535 "Unsupported screen format %p4cc\n",
5536 &fb->format->format);
695af5f9
NK
5537 return -EINVAL;
5538 }
5539
5540 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5541 case DRM_MODE_ROTATE_0:
5542 plane_info->rotation = ROTATION_ANGLE_0;
5543 break;
5544 case DRM_MODE_ROTATE_90:
5545 plane_info->rotation = ROTATION_ANGLE_90;
5546 break;
5547 case DRM_MODE_ROTATE_180:
5548 plane_info->rotation = ROTATION_ANGLE_180;
5549 break;
5550 case DRM_MODE_ROTATE_270:
5551 plane_info->rotation = ROTATION_ANGLE_270;
5552 break;
5553 default:
5554 plane_info->rotation = ROTATION_ANGLE_0;
5555 break;
5556 }
5557
5558 plane_info->visible = true;
5559 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5560
6d83a32d
MS
5561 plane_info->layer_index = 0;
5562
695af5f9
NK
5563 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5564 &plane_info->color_space);
5565 if (ret)
5566 return ret;
5567
5568 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5569 plane_info->rotation, tiling_flags,
5570 &plane_info->tiling_info,
5571 &plane_info->plane_size,
5888f07a 5572 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5573 force_disable_dcc);
695af5f9
NK
5574 if (ret)
5575 return ret;
5576
5577 fill_blending_from_plane_state(
76818cdd 5578 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
695af5f9
NK
5579 &plane_info->global_alpha, &plane_info->global_alpha_value);
5580
5581 return 0;
5582}
5583
5584static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5585 struct dc_plane_state *dc_plane_state,
5586 struct drm_plane_state *plane_state,
5587 struct drm_crtc_state *crtc_state)
e7b07cee 5588{
cf020d49 5589 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5590 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5591 struct dc_scaling_info scaling_info;
5592 struct dc_plane_info plane_info;
695af5f9 5593 int ret;
87b7ebc2 5594 bool force_disable_dcc = false;
e7b07cee 5595
4375d625 5596 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5597 if (ret)
5598 return ret;
e7b07cee 5599
695af5f9
NK
5600 dc_plane_state->src_rect = scaling_info.src_rect;
5601 dc_plane_state->dst_rect = scaling_info.dst_rect;
5602 dc_plane_state->clip_rect = scaling_info.clip_rect;
5603 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5604
87b7ebc2 5605 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5606 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5607 afb->tiling_flags,
695af5f9 5608 &plane_info,
87b7ebc2 5609 &dc_plane_state->address,
6eed95b0 5610 afb->tmz_surface,
87b7ebc2 5611 force_disable_dcc);
004fefa3
NK
5612 if (ret)
5613 return ret;
5614
695af5f9
NK
5615 dc_plane_state->format = plane_info.format;
5616 dc_plane_state->color_space = plane_info.color_space;
5617 dc_plane_state->format = plane_info.format;
5618 dc_plane_state->plane_size = plane_info.plane_size;
5619 dc_plane_state->rotation = plane_info.rotation;
5620 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5621 dc_plane_state->stereo_format = plane_info.stereo_format;
5622 dc_plane_state->tiling_info = plane_info.tiling_info;
5623 dc_plane_state->visible = plane_info.visible;
5624 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
76818cdd 5625 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
695af5f9
NK
5626 dc_plane_state->global_alpha = plane_info.global_alpha;
5627 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5628 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5629 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5630 dc_plane_state->flip_int_enabled = true;
695af5f9 5631
e277adc5
LSL
5632 /*
5633 * Always set input transfer function, since plane state is refreshed
5634 * every time.
5635 */
cf020d49
NK
5636 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5637 if (ret)
5638 return ret;
e7b07cee 5639
cf020d49 5640 return 0;
e7b07cee
HW
5641}
5642
3ee6b26b
AD
5643static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5644 const struct dm_connector_state *dm_state,
5645 struct dc_stream_state *stream)
e7b07cee
HW
5646{
5647 enum amdgpu_rmx_type rmx_type;
5648
5649 struct rect src = { 0 }; /* viewport in composition space*/
5650 struct rect dst = { 0 }; /* stream addressable area */
5651
5652 /* no mode. nothing to be done */
5653 if (!mode)
5654 return;
5655
5656 /* Full screen scaling by default */
5657 src.width = mode->hdisplay;
5658 src.height = mode->vdisplay;
5659 dst.width = stream->timing.h_addressable;
5660 dst.height = stream->timing.v_addressable;
5661
f4791779
HW
5662 if (dm_state) {
5663 rmx_type = dm_state->scaling;
5664 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5665 if (src.width * dst.height <
5666 src.height * dst.width) {
5667 /* height needs less upscaling/more downscaling */
5668 dst.width = src.width *
5669 dst.height / src.height;
5670 } else {
5671 /* width needs less upscaling/more downscaling */
5672 dst.height = src.height *
5673 dst.width / src.width;
5674 }
5675 } else if (rmx_type == RMX_CENTER) {
5676 dst = src;
e7b07cee 5677 }
e7b07cee 5678
f4791779
HW
5679 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5680 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5681
f4791779
HW
5682 if (dm_state->underscan_enable) {
5683 dst.x += dm_state->underscan_hborder / 2;
5684 dst.y += dm_state->underscan_vborder / 2;
5685 dst.width -= dm_state->underscan_hborder;
5686 dst.height -= dm_state->underscan_vborder;
5687 }
e7b07cee
HW
5688 }
5689
5690 stream->src = src;
5691 stream->dst = dst;
5692
4711c033
LT
5693 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5694 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5695
5696}
5697
3ee6b26b 5698static enum dc_color_depth
42ba01fc 5699convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5700 bool is_y420, int requested_bpc)
e7b07cee 5701{
1bc22f20 5702 uint8_t bpc;
01c22997 5703
1bc22f20
SW
5704 if (is_y420) {
5705 bpc = 8;
5706
5707 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5708 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5709 bpc = 16;
5710 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5711 bpc = 12;
5712 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5713 bpc = 10;
5714 } else {
5715 bpc = (uint8_t)connector->display_info.bpc;
5716 /* Assume 8 bpc by default if no bpc is specified. */
5717 bpc = bpc ? bpc : 8;
5718 }
e7b07cee 5719
cbd14ae7 5720 if (requested_bpc > 0) {
01c22997
NK
5721 /*
5722 * Cap display bpc based on the user requested value.
5723 *
5724 * The value for state->max_bpc may not correctly updated
5725 * depending on when the connector gets added to the state
5726 * or if this was called outside of atomic check, so it
5727 * can't be used directly.
5728 */
cbd14ae7 5729 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5730
1825fd34
NK
5731 /* Round down to the nearest even number. */
5732 bpc = bpc - (bpc & 1);
5733 }
07e3a1cf 5734
e7b07cee
HW
5735 switch (bpc) {
5736 case 0:
1f6010a9
DF
5737 /*
5738 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5739 * EDID revision before 1.4
5740 * TODO: Fix edid parsing
5741 */
5742 return COLOR_DEPTH_888;
5743 case 6:
5744 return COLOR_DEPTH_666;
5745 case 8:
5746 return COLOR_DEPTH_888;
5747 case 10:
5748 return COLOR_DEPTH_101010;
5749 case 12:
5750 return COLOR_DEPTH_121212;
5751 case 14:
5752 return COLOR_DEPTH_141414;
5753 case 16:
5754 return COLOR_DEPTH_161616;
5755 default:
5756 return COLOR_DEPTH_UNDEFINED;
5757 }
5758}
5759
3ee6b26b
AD
5760static enum dc_aspect_ratio
5761get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5762{
e11d4147
LSL
5763 /* 1-1 mapping, since both enums follow the HDMI spec. */
5764 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5765}
5766
3ee6b26b
AD
5767static enum dc_color_space
5768get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5769{
5770 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5771
5772 switch (dc_crtc_timing->pixel_encoding) {
5773 case PIXEL_ENCODING_YCBCR422:
5774 case PIXEL_ENCODING_YCBCR444:
5775 case PIXEL_ENCODING_YCBCR420:
5776 {
5777 /*
5778 * 27030khz is the separation point between HDTV and SDTV
5779 * according to HDMI spec, we use YCbCr709 and YCbCr601
5780 * respectively
5781 */
380604e2 5782 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5783 if (dc_crtc_timing->flags.Y_ONLY)
5784 color_space =
5785 COLOR_SPACE_YCBCR709_LIMITED;
5786 else
5787 color_space = COLOR_SPACE_YCBCR709;
5788 } else {
5789 if (dc_crtc_timing->flags.Y_ONLY)
5790 color_space =
5791 COLOR_SPACE_YCBCR601_LIMITED;
5792 else
5793 color_space = COLOR_SPACE_YCBCR601;
5794 }
5795
5796 }
5797 break;
5798 case PIXEL_ENCODING_RGB:
5799 color_space = COLOR_SPACE_SRGB;
5800 break;
5801
5802 default:
5803 WARN_ON(1);
5804 break;
5805 }
5806
5807 return color_space;
5808}
5809
ea117312
TA
5810static bool adjust_colour_depth_from_display_info(
5811 struct dc_crtc_timing *timing_out,
5812 const struct drm_display_info *info)
400443e8 5813{
ea117312 5814 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5815 int normalized_clk;
400443e8 5816 do {
380604e2 5817 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5818 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5819 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5820 normalized_clk /= 2;
5821 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5822 switch (depth) {
5823 case COLOR_DEPTH_888:
5824 break;
400443e8
ML
5825 case COLOR_DEPTH_101010:
5826 normalized_clk = (normalized_clk * 30) / 24;
5827 break;
5828 case COLOR_DEPTH_121212:
5829 normalized_clk = (normalized_clk * 36) / 24;
5830 break;
5831 case COLOR_DEPTH_161616:
5832 normalized_clk = (normalized_clk * 48) / 24;
5833 break;
5834 default:
ea117312
TA
5835 /* The above depths are the only ones valid for HDMI. */
5836 return false;
400443e8 5837 }
ea117312
TA
5838 if (normalized_clk <= info->max_tmds_clock) {
5839 timing_out->display_color_depth = depth;
5840 return true;
5841 }
5842 } while (--depth > COLOR_DEPTH_666);
5843 return false;
400443e8 5844}
e7b07cee 5845
42ba01fc
NK
5846static void fill_stream_properties_from_drm_display_mode(
5847 struct dc_stream_state *stream,
5848 const struct drm_display_mode *mode_in,
5849 const struct drm_connector *connector,
5850 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5851 const struct dc_stream_state *old_stream,
5852 int requested_bpc)
e7b07cee
HW
5853{
5854 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5855 const struct drm_display_info *info = &connector->display_info;
d4252eee 5856 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5857 struct hdmi_vendor_infoframe hv_frame;
5858 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5859
acf83f86
WL
5860 memset(&hv_frame, 0, sizeof(hv_frame));
5861 memset(&avi_frame, 0, sizeof(avi_frame));
5862
e7b07cee
HW
5863 timing_out->h_border_left = 0;
5864 timing_out->h_border_right = 0;
5865 timing_out->v_border_top = 0;
5866 timing_out->v_border_bottom = 0;
5867 /* TODO: un-hardcode */
fe61a2f1 5868 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5869 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5870 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5871 else if (drm_mode_is_420_also(info, mode_in)
5872 && aconnector->force_yuv420_output)
5873 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
c03d0b52 5874 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
ceb3dbb4 5875 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5876 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5877 else
5878 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5879
5880 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5881 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5882 connector,
5883 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5884 requested_bpc);
e7b07cee
HW
5885 timing_out->scan_type = SCANNING_TYPE_NODATA;
5886 timing_out->hdmi_vic = 0;
b333730d
BL
5887
5888 if(old_stream) {
5889 timing_out->vic = old_stream->timing.vic;
5890 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5891 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5892 } else {
5893 timing_out->vic = drm_match_cea_mode(mode_in);
5894 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5895 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5896 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5897 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5898 }
e7b07cee 5899
1cb1d477
WL
5900 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5901 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5902 timing_out->vic = avi_frame.video_code;
5903 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5904 timing_out->hdmi_vic = hv_frame.vic;
5905 }
5906
fe8858bb
NC
5907 if (is_freesync_video_mode(mode_in, aconnector)) {
5908 timing_out->h_addressable = mode_in->hdisplay;
5909 timing_out->h_total = mode_in->htotal;
5910 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5911 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5912 timing_out->v_total = mode_in->vtotal;
5913 timing_out->v_addressable = mode_in->vdisplay;
5914 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5915 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5916 timing_out->pix_clk_100hz = mode_in->clock * 10;
5917 } else {
5918 timing_out->h_addressable = mode_in->crtc_hdisplay;
5919 timing_out->h_total = mode_in->crtc_htotal;
5920 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5921 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5922 timing_out->v_total = mode_in->crtc_vtotal;
5923 timing_out->v_addressable = mode_in->crtc_vdisplay;
5924 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5925 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5926 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5927 }
a85ba005 5928
e7b07cee 5929 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5930
5931 stream->output_color_space = get_output_color_space(timing_out);
5932
e43a432c
AK
5933 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5934 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5935 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5936 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5937 drm_mode_is_420_also(info, mode_in) &&
5938 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5939 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5940 adjust_colour_depth_from_display_info(timing_out, info);
5941 }
5942 }
e7b07cee
HW
5943}
5944
3ee6b26b
AD
5945static void fill_audio_info(struct audio_info *audio_info,
5946 const struct drm_connector *drm_connector,
5947 const struct dc_sink *dc_sink)
e7b07cee
HW
5948{
5949 int i = 0;
5950 int cea_revision = 0;
5951 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5952
5953 audio_info->manufacture_id = edid_caps->manufacturer_id;
5954 audio_info->product_id = edid_caps->product_id;
5955
5956 cea_revision = drm_connector->display_info.cea_rev;
5957
090afc1e 5958 strscpy(audio_info->display_name,
d2b2562c 5959 edid_caps->display_name,
090afc1e 5960 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5961
b830ebc9 5962 if (cea_revision >= 3) {
e7b07cee
HW
5963 audio_info->mode_count = edid_caps->audio_mode_count;
5964
5965 for (i = 0; i < audio_info->mode_count; ++i) {
5966 audio_info->modes[i].format_code =
5967 (enum audio_format_code)
5968 (edid_caps->audio_modes[i].format_code);
5969 audio_info->modes[i].channel_count =
5970 edid_caps->audio_modes[i].channel_count;
5971 audio_info->modes[i].sample_rates.all =
5972 edid_caps->audio_modes[i].sample_rate;
5973 audio_info->modes[i].sample_size =
5974 edid_caps->audio_modes[i].sample_size;
5975 }
5976 }
5977
5978 audio_info->flags.all = edid_caps->speaker_flags;
5979
5980 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5981 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5982 audio_info->video_latency = drm_connector->video_latency[0];
5983 audio_info->audio_latency = drm_connector->audio_latency[0];
5984 }
5985
5986 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5987
5988}
5989
3ee6b26b
AD
5990static void
5991copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5992 struct drm_display_mode *dst_mode)
e7b07cee
HW
5993{
5994 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5995 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5996 dst_mode->crtc_clock = src_mode->crtc_clock;
5997 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5998 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5999 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
6000 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6001 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6002 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6003 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6004 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6005 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6006 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6007 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6008}
6009
3ee6b26b
AD
6010static void
6011decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6012 const struct drm_display_mode *native_mode,
6013 bool scale_enabled)
e7b07cee
HW
6014{
6015 if (scale_enabled) {
6016 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6017 } else if (native_mode->clock == drm_mode->clock &&
6018 native_mode->htotal == drm_mode->htotal &&
6019 native_mode->vtotal == drm_mode->vtotal) {
6020 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6021 } else {
6022 /* no scaling nor amdgpu inserted, no need to patch */
6023 }
6024}
6025
aed15309
ML
6026static struct dc_sink *
6027create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 6028{
2e0ac3d6 6029 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 6030 struct dc_sink *sink = NULL;
2e0ac3d6
HW
6031 sink_init_data.link = aconnector->dc_link;
6032 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6033
6034 sink = dc_sink_create(&sink_init_data);
423788c7 6035 if (!sink) {
2e0ac3d6 6036 DRM_ERROR("Failed to create sink!\n");
aed15309 6037 return NULL;
423788c7 6038 }
2e0ac3d6 6039 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 6040
aed15309 6041 return sink;
2e0ac3d6
HW
6042}
6043
fa2123db
ML
6044static void set_multisync_trigger_params(
6045 struct dc_stream_state *stream)
6046{
ec372186
ML
6047 struct dc_stream_state *master = NULL;
6048
fa2123db 6049 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
6050 master = stream->triggered_crtc_reset.event_source;
6051 stream->triggered_crtc_reset.event =
6052 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6053 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6054 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
6055 }
6056}
6057
6058static void set_master_stream(struct dc_stream_state *stream_set[],
6059 int stream_count)
6060{
6061 int j, highest_rfr = 0, master_stream = 0;
6062
6063 for (j = 0; j < stream_count; j++) {
6064 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6065 int refresh_rate = 0;
6066
380604e2 6067 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6068 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6069 if (refresh_rate > highest_rfr) {
6070 highest_rfr = refresh_rate;
6071 master_stream = j;
6072 }
6073 }
6074 }
6075 for (j = 0; j < stream_count; j++) {
03736f4c 6076 if (stream_set[j])
fa2123db
ML
6077 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6078 }
6079}
6080
6081static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6082{
6083 int i = 0;
ec372186 6084 struct dc_stream_state *stream;
fa2123db
ML
6085
6086 if (context->stream_count < 2)
6087 return;
6088 for (i = 0; i < context->stream_count ; i++) {
6089 if (!context->streams[i])
6090 continue;
1f6010a9
DF
6091 /*
6092 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6093 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6094 * For now it's set to false
fa2123db 6095 */
fa2123db 6096 }
ec372186 6097
fa2123db 6098 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6099
6100 for (i = 0; i < context->stream_count ; i++) {
6101 stream = context->streams[i];
6102
6103 if (!stream)
6104 continue;
6105
6106 set_multisync_trigger_params(stream);
6107 }
fa2123db
ML
6108}
6109
ea2be5c0 6110#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6111static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6112 struct dc_sink *sink, struct dc_stream_state *stream,
6113 struct dsc_dec_dpcd_caps *dsc_caps)
6114{
6115 stream->timing.flags.DSC = 0;
63ad5371 6116 dsc_caps->is_dsc_supported = false;
998b7ad2 6117
2665f63a
ML
6118 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6119 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6120 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6121 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6122 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6123 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6124 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6125 dsc_caps);
998b7ad2
FZ
6126 }
6127}
6128
2665f63a
ML
6129static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6130 struct dc_sink *sink, struct dc_stream_state *stream,
6131 struct dsc_dec_dpcd_caps *dsc_caps,
6132 uint32_t max_dsc_target_bpp_limit_override)
6133{
6134 const struct dc_link_settings *verified_link_cap = NULL;
6135 uint32_t link_bw_in_kbps;
6136 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6137 struct dc *dc = sink->ctx->dc;
6138 struct dc_dsc_bw_range bw_range = {0};
6139 struct dc_dsc_config dsc_cfg = {0};
6140
6141 verified_link_cap = dc_link_get_link_cap(stream->link);
6142 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6143 edp_min_bpp_x16 = 8 * 16;
6144 edp_max_bpp_x16 = 8 * 16;
6145
6146 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6147 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6148
6149 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6150 edp_min_bpp_x16 = edp_max_bpp_x16;
6151
6152 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6153 dc->debug.dsc_min_slice_height_override,
6154 edp_min_bpp_x16, edp_max_bpp_x16,
6155 dsc_caps,
6156 &stream->timing,
6157 &bw_range)) {
6158
6159 if (bw_range.max_kbps < link_bw_in_kbps) {
6160 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6161 dsc_caps,
6162 dc->debug.dsc_min_slice_height_override,
6163 max_dsc_target_bpp_limit_override,
6164 0,
6165 &stream->timing,
6166 &dsc_cfg)) {
6167 stream->timing.dsc_cfg = dsc_cfg;
6168 stream->timing.flags.DSC = 1;
6169 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6170 }
6171 return;
6172 }
6173 }
6174
6175 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6176 dsc_caps,
6177 dc->debug.dsc_min_slice_height_override,
6178 max_dsc_target_bpp_limit_override,
6179 link_bw_in_kbps,
6180 &stream->timing,
6181 &dsc_cfg)) {
6182 stream->timing.dsc_cfg = dsc_cfg;
6183 stream->timing.flags.DSC = 1;
6184 }
6185}
6186
998b7ad2
FZ
6187static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6188 struct dc_sink *sink, struct dc_stream_state *stream,
6189 struct dsc_dec_dpcd_caps *dsc_caps)
6190{
6191 struct drm_connector *drm_connector = &aconnector->base;
6192 uint32_t link_bandwidth_kbps;
f1c1a982 6193 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6194 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6195 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6196 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6197
6198 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6199 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6200
6201 if (stream->link && stream->link->local_sink)
6202 max_dsc_target_bpp_limit_override =
6203 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
de7cc1b4 6204
998b7ad2
FZ
6205 /* Set DSC policy according to dsc_clock_en */
6206 dc_dsc_policy_set_enable_dsc_when_not_needed(
6207 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6208
2665f63a
ML
6209 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6210 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6211
6212 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6213
6214 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6215 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6216 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6217 dsc_caps,
6218 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6219 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6220 link_bandwidth_kbps,
6221 &stream->timing,
6222 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6223 stream->timing.flags.DSC = 1;
6224 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6225 __func__, drm_connector->name);
6226 }
6227 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6228 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6229 max_supported_bw_in_kbps = link_bandwidth_kbps;
6230 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6231
6232 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6233 max_supported_bw_in_kbps > 0 &&
6234 dsc_max_supported_bw_in_kbps > 0)
6235 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6236 dsc_caps,
6237 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6238 max_dsc_target_bpp_limit_override,
6239 dsc_max_supported_bw_in_kbps,
6240 &stream->timing,
6241 &stream->timing.dsc_cfg)) {
6242 stream->timing.flags.DSC = 1;
6243 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6244 __func__, drm_connector->name);
6245 }
998b7ad2
FZ
6246 }
6247 }
6248
6249 /* Overwrite the stream flag if DSC is enabled through debugfs */
6250 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6251 stream->timing.flags.DSC = 1;
6252
6253 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6254 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6255
6256 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6257 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6258
6259 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6260 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6261}
433e5dec 6262#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6263
5fd953a3
RS
6264/**
6265 * DOC: FreeSync Video
6266 *
6267 * When a userspace application wants to play a video, the content follows a
6268 * standard format definition that usually specifies the FPS for that format.
6269 * The below list illustrates some video format and the expected FPS,
6270 * respectively:
6271 *
6272 * - TV/NTSC (23.976 FPS)
6273 * - Cinema (24 FPS)
6274 * - TV/PAL (25 FPS)
6275 * - TV/NTSC (29.97 FPS)
6276 * - TV/NTSC (30 FPS)
6277 * - Cinema HFR (48 FPS)
6278 * - TV/PAL (50 FPS)
6279 * - Commonly used (60 FPS)
12cdff6b 6280 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6281 *
6282 * The list of standards video format is not huge and can be added to the
6283 * connector modeset list beforehand. With that, userspace can leverage
6284 * FreeSync to extends the front porch in order to attain the target refresh
6285 * rate. Such a switch will happen seamlessly, without screen blanking or
6286 * reprogramming of the output in any other way. If the userspace requests a
6287 * modesetting change compatible with FreeSync modes that only differ in the
6288 * refresh rate, DC will skip the full update and avoid blink during the
6289 * transition. For example, the video player can change the modesetting from
6290 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6291 * causing any display blink. This same concept can be applied to a mode
6292 * setting change.
6293 */
a85ba005
NC
6294static struct drm_display_mode *
6295get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6296 bool use_probed_modes)
6297{
6298 struct drm_display_mode *m, *m_pref = NULL;
6299 u16 current_refresh, highest_refresh;
6300 struct list_head *list_head = use_probed_modes ?
6301 &aconnector->base.probed_modes :
6302 &aconnector->base.modes;
6303
6304 if (aconnector->freesync_vid_base.clock != 0)
6305 return &aconnector->freesync_vid_base;
6306
6307 /* Find the preferred mode */
6308 list_for_each_entry (m, list_head, head) {
6309 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6310 m_pref = m;
6311 break;
6312 }
6313 }
6314
6315 if (!m_pref) {
6316 /* Probably an EDID with no preferred mode. Fallback to first entry */
6317 m_pref = list_first_entry_or_null(
6318 &aconnector->base.modes, struct drm_display_mode, head);
6319 if (!m_pref) {
6320 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6321 return NULL;
6322 }
6323 }
6324
6325 highest_refresh = drm_mode_vrefresh(m_pref);
6326
6327 /*
6328 * Find the mode with highest refresh rate with same resolution.
6329 * For some monitors, preferred mode is not the mode with highest
6330 * supported refresh rate.
6331 */
6332 list_for_each_entry (m, list_head, head) {
6333 current_refresh = drm_mode_vrefresh(m);
6334
6335 if (m->hdisplay == m_pref->hdisplay &&
6336 m->vdisplay == m_pref->vdisplay &&
6337 highest_refresh < current_refresh) {
6338 highest_refresh = current_refresh;
6339 m_pref = m;
6340 }
6341 }
6342
426c89aa 6343 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
a85ba005
NC
6344 return m_pref;
6345}
6346
fe8858bb 6347static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6348 struct amdgpu_dm_connector *aconnector)
6349{
6350 struct drm_display_mode *high_mode;
6351 int timing_diff;
6352
6353 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6354 if (!high_mode || !mode)
6355 return false;
6356
6357 timing_diff = high_mode->vtotal - mode->vtotal;
6358
6359 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6360 high_mode->hdisplay != mode->hdisplay ||
6361 high_mode->vdisplay != mode->vdisplay ||
6362 high_mode->hsync_start != mode->hsync_start ||
6363 high_mode->hsync_end != mode->hsync_end ||
6364 high_mode->htotal != mode->htotal ||
6365 high_mode->hskew != mode->hskew ||
6366 high_mode->vscan != mode->vscan ||
6367 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6368 high_mode->vsync_end - mode->vsync_end != timing_diff)
6369 return false;
6370 else
6371 return true;
6372}
6373
f11d9373 6374static struct dc_stream_state *
3ee6b26b
AD
6375create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6376 const struct drm_display_mode *drm_mode,
b333730d 6377 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6378 const struct dc_stream_state *old_stream,
6379 int requested_bpc)
e7b07cee
HW
6380{
6381 struct drm_display_mode *preferred_mode = NULL;
391ef035 6382 struct drm_connector *drm_connector;
42ba01fc
NK
6383 const struct drm_connector_state *con_state =
6384 dm_state ? &dm_state->base : NULL;
0971c40e 6385 struct dc_stream_state *stream = NULL;
e7b07cee 6386 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6387 struct drm_display_mode saved_mode;
6388 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6389 bool native_mode_found = false;
b0781603
NK
6390 bool recalculate_timing = false;
6391 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6392 int mode_refresh;
58124bf8 6393 int preferred_refresh = 0;
defeb878 6394#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6395 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6396#endif
aed15309 6397 struct dc_sink *sink = NULL;
a85ba005
NC
6398
6399 memset(&saved_mode, 0, sizeof(saved_mode));
6400
b830ebc9 6401 if (aconnector == NULL) {
e7b07cee 6402 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6403 return stream;
e7b07cee
HW
6404 }
6405
e7b07cee 6406 drm_connector = &aconnector->base;
2e0ac3d6 6407
f4ac176e 6408 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6409 sink = create_fake_sink(aconnector);
6410 if (!sink)
6411 return stream;
aed15309
ML
6412 } else {
6413 sink = aconnector->dc_sink;
dcd5fb82 6414 dc_sink_retain(sink);
f4ac176e 6415 }
2e0ac3d6 6416
aed15309 6417 stream = dc_create_stream_for_sink(sink);
4562236b 6418
b830ebc9 6419 if (stream == NULL) {
e7b07cee 6420 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6421 goto finish;
e7b07cee
HW
6422 }
6423
ceb3dbb4
JL
6424 stream->dm_stream_context = aconnector;
6425
4a36fcba
WL
6426 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6427 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6428
e7b07cee
HW
6429 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6430 /* Search for preferred mode */
6431 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6432 native_mode_found = true;
6433 break;
6434 }
6435 }
6436 if (!native_mode_found)
6437 preferred_mode = list_first_entry_or_null(
6438 &aconnector->base.modes,
6439 struct drm_display_mode,
6440 head);
6441
b333730d
BL
6442 mode_refresh = drm_mode_vrefresh(&mode);
6443
b830ebc9 6444 if (preferred_mode == NULL) {
1f6010a9
DF
6445 /*
6446 * This may not be an error, the use case is when we have no
e7b07cee
HW
6447 * usermode calls to reset and set mode upon hotplug. In this
6448 * case, we call set mode ourselves to restore the previous mode
6449 * and the modelist may not be filled in in time.
6450 */
f1ad2f5e 6451 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6452 } else {
de05abe6 6453 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
a85ba005
NC
6454 if (recalculate_timing) {
6455 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
426c89aa
VS
6456 drm_mode_copy(&saved_mode, &mode);
6457 drm_mode_copy(&mode, freesync_mode);
a85ba005
NC
6458 } else {
6459 decide_crtc_timing_for_drm_display_mode(
b0781603 6460 &mode, preferred_mode, scale);
a85ba005 6461
b0781603
NK
6462 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6463 }
e7b07cee
HW
6464 }
6465
a85ba005
NC
6466 if (recalculate_timing)
6467 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6468 else if (!dm_state)
f783577c
JFZ
6469 drm_mode_set_crtcinfo(&mode, 0);
6470
a85ba005 6471 /*
b333730d
BL
6472 * If scaling is enabled and refresh rate didn't change
6473 * we copy the vic and polarities of the old timings
6474 */
b0781603 6475 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6476 fill_stream_properties_from_drm_display_mode(
6477 stream, &mode, &aconnector->base, con_state, NULL,
6478 requested_bpc);
b333730d 6479 else
a85ba005
NC
6480 fill_stream_properties_from_drm_display_mode(
6481 stream, &mode, &aconnector->base, con_state, old_stream,
6482 requested_bpc);
b333730d 6483
defeb878 6484#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6485 /* SST DSC determination policy */
6486 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6487 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6488 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6489#endif
6490
e7b07cee
HW
6491 update_stream_scaling_settings(&mode, dm_state, stream);
6492
6493 fill_audio_info(
6494 &stream->audio_info,
6495 drm_connector,
aed15309 6496 sink);
e7b07cee 6497
ceb3dbb4 6498 update_stream_signal(stream, sink);
9182b4cb 6499
d832fc3b 6500 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6501 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6502
8a488f5d
RL
6503 if (stream->link->psr_settings.psr_feature_enabled) {
6504 //
6505 // should decide stream support vsc sdp colorimetry capability
6506 // before building vsc info packet
6507 //
6508 stream->use_vsc_sdp_for_colorimetry = false;
6509 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6510 stream->use_vsc_sdp_for_colorimetry =
6511 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6512 } else {
6513 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6514 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6515 }
0c5a0bbb 6516 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
1a365683
RL
6517 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6518
8c322309 6519 }
aed15309 6520finish:
dcd5fb82 6521 dc_sink_release(sink);
9e3efe3e 6522
e7b07cee
HW
6523 return stream;
6524}
6525
7578ecda 6526static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6527{
6528 drm_crtc_cleanup(crtc);
6529 kfree(crtc);
6530}
6531
6532static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6533 struct drm_crtc_state *state)
e7b07cee
HW
6534{
6535 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6536
6537 /* TODO Destroy dc_stream objects are stream object is flattened */
6538 if (cur->stream)
6539 dc_stream_release(cur->stream);
6540
6541
6542 __drm_atomic_helper_crtc_destroy_state(state);
6543
6544
6545 kfree(state);
6546}
6547
6548static void dm_crtc_reset_state(struct drm_crtc *crtc)
6549{
6550 struct dm_crtc_state *state;
6551
6552 if (crtc->state)
6553 dm_crtc_destroy_state(crtc, crtc->state);
6554
6555 state = kzalloc(sizeof(*state), GFP_KERNEL);
6556 if (WARN_ON(!state))
6557 return;
6558
1f8a52ec 6559 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6560}
6561
6562static struct drm_crtc_state *
6563dm_crtc_duplicate_state(struct drm_crtc *crtc)
6564{
6565 struct dm_crtc_state *state, *cur;
6566
6567 cur = to_dm_crtc_state(crtc->state);
6568
6569 if (WARN_ON(!crtc->state))
6570 return NULL;
6571
2004f45e 6572 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6573 if (!state)
6574 return NULL;
e7b07cee
HW
6575
6576 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6577
6578 if (cur->stream) {
6579 state->stream = cur->stream;
6580 dc_stream_retain(state->stream);
6581 }
6582
d6ef9b41 6583 state->active_planes = cur->active_planes;
98e6436d 6584 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6585 state->abm_level = cur->abm_level;
bb47de73
NK
6586 state->vrr_supported = cur->vrr_supported;
6587 state->freesync_config = cur->freesync_config;
cf020d49
NK
6588 state->cm_has_degamma = cur->cm_has_degamma;
6589 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6590 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6591 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6592
6593 return &state->base;
6594}
6595
86bc2219 6596#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6597static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6598{
6599 crtc_debugfs_init(crtc);
6600
6601 return 0;
6602}
6603#endif
6604
d2574c33
MK
6605static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6606{
6607 enum dc_irq_source irq_source;
6608 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6609 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6610 int rc;
6611
6612 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6613
6614 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6615
4711c033
LT
6616 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6617 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6618 return rc;
6619}
589d2739
HW
6620
6621static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6622{
6623 enum dc_irq_source irq_source;
6624 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6625 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6626 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
71338cb4 6627 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6628 struct vblank_control_work *work;
d2574c33
MK
6629 int rc = 0;
6630
6631 if (enable) {
6632 /* vblank irq on -> Only need vupdate irq in vrr mode */
6633 if (amdgpu_dm_vrr_active(acrtc_state))
6634 rc = dm_set_vupdate_irq(crtc, true);
6635 } else {
6636 /* vblank irq off -> vupdate irq off */
6637 rc = dm_set_vupdate_irq(crtc, false);
6638 }
6639
6640 if (rc)
6641 return rc;
589d2739
HW
6642
6643 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6644
6645 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6646 return -EBUSY;
6647
98ab5f35
BL
6648 if (amdgpu_in_reset(adev))
6649 return 0;
6650
06dd1888
NK
6651 if (dm->vblank_control_workqueue) {
6652 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6653 if (!work)
6654 return -ENOMEM;
09a5df6c 6655
06dd1888
NK
6656 INIT_WORK(&work->work, vblank_control_worker);
6657 work->dm = dm;
6658 work->acrtc = acrtc;
6659 work->enable = enable;
09a5df6c 6660
06dd1888
NK
6661 if (acrtc_state->stream) {
6662 dc_stream_retain(acrtc_state->stream);
6663 work->stream = acrtc_state->stream;
6664 }
58aa1c50 6665
06dd1888
NK
6666 queue_work(dm->vblank_control_workqueue, &work->work);
6667 }
71338cb4 6668
71338cb4 6669 return 0;
589d2739
HW
6670}
6671
6672static int dm_enable_vblank(struct drm_crtc *crtc)
6673{
6674 return dm_set_vblank(crtc, true);
6675}
6676
6677static void dm_disable_vblank(struct drm_crtc *crtc)
6678{
6679 dm_set_vblank(crtc, false);
6680}
6681
faf26f2b 6682/* Implemented only the options currently available for the driver */
e7b07cee
HW
6683static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6684 .reset = dm_crtc_reset_state,
6685 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6686 .set_config = drm_atomic_helper_set_config,
6687 .page_flip = drm_atomic_helper_page_flip,
6688 .atomic_duplicate_state = dm_crtc_duplicate_state,
6689 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6690 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6691 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6692 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6693 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6694 .enable_vblank = dm_enable_vblank,
6695 .disable_vblank = dm_disable_vblank,
e3eff4b5 6696 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6697#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6698 .late_register = amdgpu_dm_crtc_late_register,
6699#endif
e7b07cee
HW
6700};
6701
6702static enum drm_connector_status
6703amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6704{
6705 bool connected;
c84dec2f 6706 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6707
1f6010a9
DF
6708 /*
6709 * Notes:
e7b07cee
HW
6710 * 1. This interface is NOT called in context of HPD irq.
6711 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6712 * makes it a bad place for *any* MST-related activity.
6713 */
e7b07cee 6714
8580d60b
HW
6715 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6716 !aconnector->fake_enable)
e7b07cee
HW
6717 connected = (aconnector->dc_sink != NULL);
6718 else
6719 connected = (aconnector->base.force == DRM_FORCE_ON);
6720
0f877894
OV
6721 update_subconnector_property(aconnector);
6722
e7b07cee
HW
6723 return (connected ? connector_status_connected :
6724 connector_status_disconnected);
6725}
6726
3ee6b26b
AD
6727int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6728 struct drm_connector_state *connector_state,
6729 struct drm_property *property,
6730 uint64_t val)
e7b07cee
HW
6731{
6732 struct drm_device *dev = connector->dev;
1348969a 6733 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6734 struct dm_connector_state *dm_old_state =
6735 to_dm_connector_state(connector->state);
6736 struct dm_connector_state *dm_new_state =
6737 to_dm_connector_state(connector_state);
6738
6739 int ret = -EINVAL;
6740
6741 if (property == dev->mode_config.scaling_mode_property) {
6742 enum amdgpu_rmx_type rmx_type;
6743
6744 switch (val) {
6745 case DRM_MODE_SCALE_CENTER:
6746 rmx_type = RMX_CENTER;
6747 break;
6748 case DRM_MODE_SCALE_ASPECT:
6749 rmx_type = RMX_ASPECT;
6750 break;
6751 case DRM_MODE_SCALE_FULLSCREEN:
6752 rmx_type = RMX_FULL;
6753 break;
6754 case DRM_MODE_SCALE_NONE:
6755 default:
6756 rmx_type = RMX_OFF;
6757 break;
6758 }
6759
6760 if (dm_old_state->scaling == rmx_type)
6761 return 0;
6762
6763 dm_new_state->scaling = rmx_type;
6764 ret = 0;
6765 } else if (property == adev->mode_info.underscan_hborder_property) {
6766 dm_new_state->underscan_hborder = val;
6767 ret = 0;
6768 } else if (property == adev->mode_info.underscan_vborder_property) {
6769 dm_new_state->underscan_vborder = val;
6770 ret = 0;
6771 } else if (property == adev->mode_info.underscan_property) {
6772 dm_new_state->underscan_enable = val;
6773 ret = 0;
c1ee92f9
DF
6774 } else if (property == adev->mode_info.abm_level_property) {
6775 dm_new_state->abm_level = val;
6776 ret = 0;
e7b07cee
HW
6777 }
6778
6779 return ret;
6780}
6781
3ee6b26b
AD
6782int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6783 const struct drm_connector_state *state,
6784 struct drm_property *property,
6785 uint64_t *val)
e7b07cee
HW
6786{
6787 struct drm_device *dev = connector->dev;
1348969a 6788 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6789 struct dm_connector_state *dm_state =
6790 to_dm_connector_state(state);
6791 int ret = -EINVAL;
6792
6793 if (property == dev->mode_config.scaling_mode_property) {
6794 switch (dm_state->scaling) {
6795 case RMX_CENTER:
6796 *val = DRM_MODE_SCALE_CENTER;
6797 break;
6798 case RMX_ASPECT:
6799 *val = DRM_MODE_SCALE_ASPECT;
6800 break;
6801 case RMX_FULL:
6802 *val = DRM_MODE_SCALE_FULLSCREEN;
6803 break;
6804 case RMX_OFF:
6805 default:
6806 *val = DRM_MODE_SCALE_NONE;
6807 break;
6808 }
6809 ret = 0;
6810 } else if (property == adev->mode_info.underscan_hborder_property) {
6811 *val = dm_state->underscan_hborder;
6812 ret = 0;
6813 } else if (property == adev->mode_info.underscan_vborder_property) {
6814 *val = dm_state->underscan_vborder;
6815 ret = 0;
6816 } else if (property == adev->mode_info.underscan_property) {
6817 *val = dm_state->underscan_enable;
6818 ret = 0;
c1ee92f9
DF
6819 } else if (property == adev->mode_info.abm_level_property) {
6820 *val = dm_state->abm_level;
6821 ret = 0;
e7b07cee 6822 }
c1ee92f9 6823
e7b07cee
HW
6824 return ret;
6825}
6826
526c654a
ED
6827static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6828{
6829 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6830
6831 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6832}
6833
7578ecda 6834static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6835{
c84dec2f 6836 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6837 const struct dc_link *link = aconnector->dc_link;
1348969a 6838 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6839 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6840 int i;
ada8ce15 6841
5dff80bd
AG
6842 /*
6843 * Call only if mst_mgr was iniitalized before since it's not done
6844 * for all connector types.
6845 */
6846 if (aconnector->mst_mgr.dev)
6847 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6848
e7b07cee
HW
6849#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6851 for (i = 0; i < dm->num_of_edps; i++) {
6852 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6853 backlight_device_unregister(dm->backlight_dev[i]);
6854 dm->backlight_dev[i] = NULL;
6855 }
e7b07cee
HW
6856 }
6857#endif
dcd5fb82
MF
6858
6859 if (aconnector->dc_em_sink)
6860 dc_sink_release(aconnector->dc_em_sink);
6861 aconnector->dc_em_sink = NULL;
6862 if (aconnector->dc_sink)
6863 dc_sink_release(aconnector->dc_sink);
6864 aconnector->dc_sink = NULL;
6865
e86e8947 6866 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6867 drm_connector_unregister(connector);
6868 drm_connector_cleanup(connector);
526c654a
ED
6869 if (aconnector->i2c) {
6870 i2c_del_adapter(&aconnector->i2c->base);
6871 kfree(aconnector->i2c);
6872 }
7daec99f 6873 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6874
e7b07cee
HW
6875 kfree(connector);
6876}
6877
6878void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6879{
6880 struct dm_connector_state *state =
6881 to_dm_connector_state(connector->state);
6882
df099b9b
LSL
6883 if (connector->state)
6884 __drm_atomic_helper_connector_destroy_state(connector->state);
6885
e7b07cee
HW
6886 kfree(state);
6887
6888 state = kzalloc(sizeof(*state), GFP_KERNEL);
6889
6890 if (state) {
6891 state->scaling = RMX_OFF;
6892 state->underscan_enable = false;
6893 state->underscan_hborder = 0;
6894 state->underscan_vborder = 0;
01933ba4 6895 state->base.max_requested_bpc = 8;
3261e013
ML
6896 state->vcpi_slots = 0;
6897 state->pbn = 0;
c3e50f89
NK
6898 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6899 state->abm_level = amdgpu_dm_abm_level;
6900
df099b9b 6901 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6902 }
6903}
6904
3ee6b26b
AD
6905struct drm_connector_state *
6906amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6907{
6908 struct dm_connector_state *state =
6909 to_dm_connector_state(connector->state);
6910
6911 struct dm_connector_state *new_state =
6912 kmemdup(state, sizeof(*state), GFP_KERNEL);
6913
98e6436d
AK
6914 if (!new_state)
6915 return NULL;
e7b07cee 6916
98e6436d
AK
6917 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6918
6919 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6920 new_state->abm_level = state->abm_level;
922454c2
NK
6921 new_state->scaling = state->scaling;
6922 new_state->underscan_enable = state->underscan_enable;
6923 new_state->underscan_hborder = state->underscan_hborder;
6924 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6925 new_state->vcpi_slots = state->vcpi_slots;
6926 new_state->pbn = state->pbn;
98e6436d 6927 return &new_state->base;
e7b07cee
HW
6928}
6929
14f04fa4
AD
6930static int
6931amdgpu_dm_connector_late_register(struct drm_connector *connector)
6932{
6933 struct amdgpu_dm_connector *amdgpu_dm_connector =
6934 to_amdgpu_dm_connector(connector);
00a8037e 6935 int r;
14f04fa4 6936
00a8037e
AD
6937 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6938 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6939 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6940 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6941 if (r)
6942 return r;
6943 }
6944
6945#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6946 connector_debugfs_init(amdgpu_dm_connector);
6947#endif
6948
6949 return 0;
6950}
6951
e7b07cee
HW
6952static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6953 .reset = amdgpu_dm_connector_funcs_reset,
6954 .detect = amdgpu_dm_connector_detect,
6955 .fill_modes = drm_helper_probe_single_connector_modes,
6956 .destroy = amdgpu_dm_connector_destroy,
6957 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6958 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6959 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6960 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6961 .late_register = amdgpu_dm_connector_late_register,
526c654a 6962 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6963};
6964
e7b07cee
HW
6965static int get_modes(struct drm_connector *connector)
6966{
6967 return amdgpu_dm_connector_get_modes(connector);
6968}
6969
c84dec2f 6970static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6971{
6972 struct dc_sink_init_data init_params = {
6973 .link = aconnector->dc_link,
6974 .sink_signal = SIGNAL_TYPE_VIRTUAL
6975 };
70e8ffc5 6976 struct edid *edid;
e7b07cee 6977
a89ff457 6978 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6979 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6980 aconnector->base.name);
6981
6982 aconnector->base.force = DRM_FORCE_OFF;
6983 aconnector->base.override_edid = false;
6984 return;
6985 }
6986
70e8ffc5
HW
6987 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6988
e7b07cee
HW
6989 aconnector->edid = edid;
6990
6991 aconnector->dc_em_sink = dc_link_add_remote_sink(
6992 aconnector->dc_link,
6993 (uint8_t *)edid,
6994 (edid->extensions + 1) * EDID_LENGTH,
6995 &init_params);
6996
dcd5fb82 6997 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6998 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6999 aconnector->dc_link->local_sink :
7000 aconnector->dc_em_sink;
dcd5fb82
MF
7001 dc_sink_retain(aconnector->dc_sink);
7002 }
e7b07cee
HW
7003}
7004
c84dec2f 7005static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
7006{
7007 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7008
1f6010a9
DF
7009 /*
7010 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
7011 * Those settings have to be != 0 to get initial modeset
7012 */
7013 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7014 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7015 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7016 }
7017
7018
7019 aconnector->base.override_edid = true;
7020 create_eml_sink(aconnector);
7021}
7022
17ce8a69 7023struct dc_stream_state *
cbd14ae7
SW
7024create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7025 const struct drm_display_mode *drm_mode,
7026 const struct dm_connector_state *dm_state,
7027 const struct dc_stream_state *old_stream)
7028{
7029 struct drm_connector *connector = &aconnector->base;
1348969a 7030 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 7031 struct dc_stream_state *stream;
4b7da34b
SW
7032 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7033 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
7034 enum dc_status dc_result = DC_OK;
7035
7036 do {
7037 stream = create_stream_for_sink(aconnector, drm_mode,
7038 dm_state, old_stream,
7039 requested_bpc);
7040 if (stream == NULL) {
7041 DRM_ERROR("Failed to create stream for sink!\n");
7042 break;
7043 }
7044
7045 dc_result = dc_validate_stream(adev->dm.dc, stream);
7046
7047 if (dc_result != DC_OK) {
74a16675 7048 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
7049 drm_mode->hdisplay,
7050 drm_mode->vdisplay,
7051 drm_mode->clock,
74a16675
RS
7052 dc_result,
7053 dc_status_to_str(dc_result));
cbd14ae7
SW
7054
7055 dc_stream_release(stream);
7056 stream = NULL;
7057 requested_bpc -= 2; /* lower bpc to retry validation */
7058 }
7059
7060 } while (stream == NULL && requested_bpc >= 6);
7061
68eb3ae3
WS
7062 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7063 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7064
7065 aconnector->force_yuv420_output = true;
7066 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7067 dm_state, old_stream);
7068 aconnector->force_yuv420_output = false;
7069 }
7070
cbd14ae7
SW
7071 return stream;
7072}
7073
ba9ca088 7074enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7075 struct drm_display_mode *mode)
e7b07cee
HW
7076{
7077 int result = MODE_ERROR;
7078 struct dc_sink *dc_sink;
e7b07cee 7079 /* TODO: Unhardcode stream count */
0971c40e 7080 struct dc_stream_state *stream;
c84dec2f 7081 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7082
7083 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7084 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7085 return result;
7086
1f6010a9
DF
7087 /*
7088 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7089 * EDID mgmt
7090 */
7091 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7092 !aconnector->dc_em_sink)
7093 handle_edid_mgmt(aconnector);
7094
c84dec2f 7095 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7096
ad975f44
VL
7097 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7098 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7099 DRM_ERROR("dc_sink is NULL!\n");
7100 goto fail;
7101 }
7102
cbd14ae7
SW
7103 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7104 if (stream) {
7105 dc_stream_release(stream);
e7b07cee 7106 result = MODE_OK;
cbd14ae7 7107 }
e7b07cee
HW
7108
7109fail:
7110 /* TODO: error handling*/
7111 return result;
7112}
7113
88694af9
NK
7114static int fill_hdr_info_packet(const struct drm_connector_state *state,
7115 struct dc_info_packet *out)
7116{
7117 struct hdmi_drm_infoframe frame;
7118 unsigned char buf[30]; /* 26 + 4 */
7119 ssize_t len;
7120 int ret, i;
7121
7122 memset(out, 0, sizeof(*out));
7123
7124 if (!state->hdr_output_metadata)
7125 return 0;
7126
7127 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7128 if (ret)
7129 return ret;
7130
7131 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7132 if (len < 0)
7133 return (int)len;
7134
7135 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7136 if (len != 30)
7137 return -EINVAL;
7138
7139 /* Prepare the infopacket for DC. */
7140 switch (state->connector->connector_type) {
7141 case DRM_MODE_CONNECTOR_HDMIA:
7142 out->hb0 = 0x87; /* type */
7143 out->hb1 = 0x01; /* version */
7144 out->hb2 = 0x1A; /* length */
7145 out->sb[0] = buf[3]; /* checksum */
7146 i = 1;
7147 break;
7148
7149 case DRM_MODE_CONNECTOR_DisplayPort:
7150 case DRM_MODE_CONNECTOR_eDP:
7151 out->hb0 = 0x00; /* sdp id, zero */
7152 out->hb1 = 0x87; /* type */
7153 out->hb2 = 0x1D; /* payload len - 1 */
7154 out->hb3 = (0x13 << 2); /* sdp version */
7155 out->sb[0] = 0x01; /* version */
7156 out->sb[1] = 0x1A; /* length */
7157 i = 2;
7158 break;
7159
7160 default:
7161 return -EINVAL;
7162 }
7163
7164 memcpy(&out->sb[i], &buf[4], 26);
7165 out->valid = true;
7166
7167 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7168 sizeof(out->sb), false);
7169
7170 return 0;
7171}
7172
88694af9
NK
7173static int
7174amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7175 struct drm_atomic_state *state)
88694af9 7176{
51e857af
SP
7177 struct drm_connector_state *new_con_state =
7178 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7179 struct drm_connector_state *old_con_state =
7180 drm_atomic_get_old_connector_state(state, conn);
7181 struct drm_crtc *crtc = new_con_state->crtc;
7182 struct drm_crtc_state *new_crtc_state;
7183 int ret;
7184
e8a98235
RS
7185 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7186
88694af9
NK
7187 if (!crtc)
7188 return 0;
7189
72921cdf 7190 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7191 struct dc_info_packet hdr_infopacket;
7192
7193 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7194 if (ret)
7195 return ret;
7196
7197 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7198 if (IS_ERR(new_crtc_state))
7199 return PTR_ERR(new_crtc_state);
7200
7201 /*
7202 * DC considers the stream backends changed if the
7203 * static metadata changes. Forcing the modeset also
7204 * gives a simple way for userspace to switch from
b232d4ed
NK
7205 * 8bpc to 10bpc when setting the metadata to enter
7206 * or exit HDR.
7207 *
7208 * Changing the static metadata after it's been
7209 * set is permissible, however. So only force a
7210 * modeset if we're entering or exiting HDR.
88694af9 7211 */
b232d4ed
NK
7212 new_crtc_state->mode_changed =
7213 !old_con_state->hdr_output_metadata ||
7214 !new_con_state->hdr_output_metadata;
88694af9
NK
7215 }
7216
7217 return 0;
7218}
7219
e7b07cee
HW
7220static const struct drm_connector_helper_funcs
7221amdgpu_dm_connector_helper_funcs = {
7222 /*
1f6010a9 7223 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7224 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7225 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7226 * in get_modes call back, not just return the modes count
7227 */
e7b07cee
HW
7228 .get_modes = get_modes,
7229 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7230 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7231};
7232
7233static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7234{
7235}
7236
d6ef9b41 7237static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7238{
7239 struct drm_atomic_state *state = new_crtc_state->state;
7240 struct drm_plane *plane;
7241 int num_active = 0;
7242
7243 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7244 struct drm_plane_state *new_plane_state;
7245
7246 /* Cursor planes are "fake". */
7247 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7248 continue;
7249
7250 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7251
7252 if (!new_plane_state) {
7253 /*
7254 * The plane is enable on the CRTC and hasn't changed
7255 * state. This means that it previously passed
7256 * validation and is therefore enabled.
7257 */
7258 num_active += 1;
7259 continue;
7260 }
7261
7262 /* We need a framebuffer to be considered enabled. */
7263 num_active += (new_plane_state->fb != NULL);
7264 }
7265
d6ef9b41
NK
7266 return num_active;
7267}
7268
8fe684e9
NK
7269static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7270 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7271{
7272 struct dm_crtc_state *dm_new_crtc_state =
7273 to_dm_crtc_state(new_crtc_state);
7274
7275 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7276
7277 if (!dm_new_crtc_state->stream)
7278 return;
7279
7280 dm_new_crtc_state->active_planes =
7281 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7282}
7283
3ee6b26b 7284static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7285 struct drm_atomic_state *state)
e7b07cee 7286{
29b77ad7
MR
7287 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7288 crtc);
1348969a 7289 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7290 struct dc *dc = adev->dm.dc;
29b77ad7 7291 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7292 int ret = -EINVAL;
7293
5b8c5969 7294 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7295
29b77ad7 7296 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7297
bcd74374
ND
7298 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7299 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7300 return ret;
7301 }
7302
bc92c065 7303 /*
b836a274
MD
7304 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7305 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7306 * planes are disabled, which is not supported by the hardware. And there is legacy
7307 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7308 */
29b77ad7 7309 if (crtc_state->enable &&
ea9522f5
SS
7310 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7311 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7312 return -EINVAL;
ea9522f5 7313 }
c14a005c 7314
b836a274
MD
7315 /* In some use cases, like reset, no stream is attached */
7316 if (!dm_crtc_state->stream)
7317 return 0;
7318
62c933f9 7319 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7320 return 0;
7321
ea9522f5 7322 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7323 return ret;
7324}
7325
3ee6b26b
AD
7326static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7327 const struct drm_display_mode *mode,
7328 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7329{
7330 return true;
7331}
7332
7333static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7334 .disable = dm_crtc_helper_disable,
7335 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7336 .mode_fixup = dm_crtc_helper_mode_fixup,
7337 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7338};
7339
7340static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7341{
7342
7343}
7344
3261e013
ML
7345static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7346{
7347 switch (display_color_depth) {
7348 case COLOR_DEPTH_666:
7349 return 6;
7350 case COLOR_DEPTH_888:
7351 return 8;
7352 case COLOR_DEPTH_101010:
7353 return 10;
7354 case COLOR_DEPTH_121212:
7355 return 12;
7356 case COLOR_DEPTH_141414:
7357 return 14;
7358 case COLOR_DEPTH_161616:
7359 return 16;
7360 default:
7361 break;
7362 }
7363 return 0;
7364}
7365
3ee6b26b
AD
7366static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7367 struct drm_crtc_state *crtc_state,
7368 struct drm_connector_state *conn_state)
e7b07cee 7369{
3261e013
ML
7370 struct drm_atomic_state *state = crtc_state->state;
7371 struct drm_connector *connector = conn_state->connector;
7372 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7373 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7374 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7375 struct drm_dp_mst_topology_mgr *mst_mgr;
7376 struct drm_dp_mst_port *mst_port;
7377 enum dc_color_depth color_depth;
7378 int clock, bpp = 0;
1bc22f20 7379 bool is_y420 = false;
3261e013
ML
7380
7381 if (!aconnector->port || !aconnector->dc_sink)
7382 return 0;
7383
7384 mst_port = aconnector->port;
7385 mst_mgr = &aconnector->mst_port->mst_mgr;
7386
7387 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7388 return 0;
7389
7390 if (!state->duplicated) {
cbd14ae7 7391 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7392 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7393 aconnector->force_yuv420_output;
cbd14ae7
SW
7394 color_depth = convert_color_depth_from_display_info(connector,
7395 is_y420,
7396 max_bpc);
3261e013
ML
7397 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7398 clock = adjusted_mode->clock;
dc48529f 7399 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7400 }
7401 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7402 mst_mgr,
7403 mst_port,
1c6c1cb5 7404 dm_new_connector_state->pbn,
03ca9600 7405 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7406 if (dm_new_connector_state->vcpi_slots < 0) {
7407 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7408 return dm_new_connector_state->vcpi_slots;
7409 }
e7b07cee
HW
7410 return 0;
7411}
7412
7413const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7414 .disable = dm_encoder_helper_disable,
7415 .atomic_check = dm_encoder_helper_atomic_check
7416};
7417
d9fe1a4c 7418#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7419static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7420 struct dc_state *dc_state,
7421 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7422{
7423 struct dc_stream_state *stream = NULL;
7424 struct drm_connector *connector;
5760dcb9 7425 struct drm_connector_state *new_con_state;
29b9ba74
ML
7426 struct amdgpu_dm_connector *aconnector;
7427 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7428 int i, j;
7429 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7430
5760dcb9 7431 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7432
7433 aconnector = to_amdgpu_dm_connector(connector);
7434
7435 if (!aconnector->port)
7436 continue;
7437
7438 if (!new_con_state || !new_con_state->crtc)
7439 continue;
7440
7441 dm_conn_state = to_dm_connector_state(new_con_state);
7442
7443 for (j = 0; j < dc_state->stream_count; j++) {
7444 stream = dc_state->streams[j];
7445 if (!stream)
7446 continue;
7447
7448 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7449 break;
7450
7451 stream = NULL;
7452 }
7453
7454 if (!stream)
7455 continue;
7456
29b9ba74 7457 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7458 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7459 for (j = 0; j < dc_state->stream_count; j++) {
7460 if (vars[j].aconnector == aconnector) {
7461 pbn = vars[j].pbn;
7462 break;
7463 }
7464 }
7465
a550bb16
HW
7466 if (j == dc_state->stream_count)
7467 continue;
7468
7469 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7470
7471 if (stream->timing.flags.DSC != 1) {
7472 dm_conn_state->pbn = pbn;
7473 dm_conn_state->vcpi_slots = slot_num;
7474
7475 drm_dp_mst_atomic_enable_dsc(state,
7476 aconnector->port,
7477 dm_conn_state->pbn,
7478 0,
7479 false);
7480 continue;
7481 }
7482
29b9ba74
ML
7483 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7484 aconnector->port,
7485 pbn, pbn_div,
7486 true);
7487 if (vcpi < 0)
7488 return vcpi;
7489
7490 dm_conn_state->pbn = pbn;
7491 dm_conn_state->vcpi_slots = vcpi;
7492 }
7493 return 0;
7494}
d9fe1a4c 7495#endif
29b9ba74 7496
e7b07cee
HW
7497static void dm_drm_plane_reset(struct drm_plane *plane)
7498{
7499 struct dm_plane_state *amdgpu_state = NULL;
7500
7501 if (plane->state)
7502 plane->funcs->atomic_destroy_state(plane, plane->state);
7503
7504 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7505 WARN_ON(amdgpu_state == NULL);
1f6010a9 7506
7ddaef96
NK
7507 if (amdgpu_state)
7508 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7509}
7510
7511static struct drm_plane_state *
7512dm_drm_plane_duplicate_state(struct drm_plane *plane)
7513{
7514 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7515
7516 old_dm_plane_state = to_dm_plane_state(plane->state);
7517 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7518 if (!dm_plane_state)
7519 return NULL;
7520
7521 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7522
3be5262e
HW
7523 if (old_dm_plane_state->dc_state) {
7524 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7525 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7526 }
7527
7528 return &dm_plane_state->base;
7529}
7530
dfd84d90 7531static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7532 struct drm_plane_state *state)
e7b07cee
HW
7533{
7534 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7535
3be5262e
HW
7536 if (dm_plane_state->dc_state)
7537 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7538
0627bbd3 7539 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7540}
7541
7542static const struct drm_plane_funcs dm_plane_funcs = {
7543 .update_plane = drm_atomic_helper_update_plane,
7544 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7545 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7546 .reset = dm_drm_plane_reset,
7547 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7548 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7549 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7550};
7551
3ee6b26b
AD
7552static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7553 struct drm_plane_state *new_state)
e7b07cee
HW
7554{
7555 struct amdgpu_framebuffer *afb;
7556 struct drm_gem_object *obj;
5d43be0c 7557 struct amdgpu_device *adev;
e7b07cee 7558 struct amdgpu_bo *rbo;
e7b07cee 7559 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5d43be0c
CK
7560 uint32_t domain;
7561 int r;
e7b07cee
HW
7562
7563 if (!new_state->fb) {
4711c033 7564 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7565 return 0;
7566 }
7567
7568 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7569 obj = new_state->fb->obj[0];
e7b07cee 7570 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7571 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09 7572
f06e2167 7573 r = amdgpu_bo_reserve(rbo, true);
0f257b09
CZ
7574 if (r) {
7575 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7576 return r;
0f257b09 7577 }
e7b07cee 7578
f06e2167
CK
7579 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7580 if (r) {
7581 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7582 goto error_unlock;
7583 }
7584
5d43be0c 7585 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7586 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7587 else
7588 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7589
7b7c6c81 7590 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7591 if (unlikely(r != 0)) {
30b7c614
HW
7592 if (r != -ERESTARTSYS)
7593 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
f06e2167 7594 goto error_unlock;
e7b07cee
HW
7595 }
7596
bb812f1e
JZ
7597 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7598 if (unlikely(r != 0)) {
bb812f1e 7599 DRM_ERROR("%p bind failed\n", rbo);
f06e2167 7600 goto error_unpin;
e7b07cee 7601 }
7df7e505 7602
f06e2167 7603 amdgpu_bo_unreserve(rbo);
bb812f1e 7604
7b7c6c81 7605 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7606
7607 amdgpu_bo_ref(rbo);
7608
cf322b49
NK
7609 /**
7610 * We don't do surface updates on planes that have been newly created,
7611 * but we also don't have the afb->address during atomic check.
7612 *
7613 * Fill in buffer attributes depending on the address here, but only on
7614 * newly created planes since they're not being used by DC yet and this
7615 * won't modify global state.
7616 */
7617 dm_plane_state_old = to_dm_plane_state(plane->state);
7618 dm_plane_state_new = to_dm_plane_state(new_state);
7619
3be5262e 7620 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7621 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7622 struct dc_plane_state *plane_state =
7623 dm_plane_state_new->dc_state;
7624 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7625
320932bf 7626 fill_plane_buffer_attributes(
695af5f9 7627 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7628 afb->tiling_flags,
cf322b49
NK
7629 &plane_state->tiling_info, &plane_state->plane_size,
7630 &plane_state->dcc, &plane_state->address,
6eed95b0 7631 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7632 }
7633
e7b07cee 7634 return 0;
f06e2167
CK
7635
7636error_unpin:
7637 amdgpu_bo_unpin(rbo);
7638
7639error_unlock:
7640 amdgpu_bo_unreserve(rbo);
7641 return r;
e7b07cee
HW
7642}
7643
3ee6b26b
AD
7644static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7645 struct drm_plane_state *old_state)
e7b07cee
HW
7646{
7647 struct amdgpu_bo *rbo;
e7b07cee
HW
7648 int r;
7649
7650 if (!old_state->fb)
7651 return;
7652
e68d14dd 7653 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7654 r = amdgpu_bo_reserve(rbo, false);
7655 if (unlikely(r)) {
7656 DRM_ERROR("failed to reserve rbo before unpin\n");
7657 return;
b830ebc9
HW
7658 }
7659
7660 amdgpu_bo_unpin(rbo);
7661 amdgpu_bo_unreserve(rbo);
7662 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7663}
7664
8c44515b
AP
7665static int dm_plane_helper_check_state(struct drm_plane_state *state,
7666 struct drm_crtc_state *new_crtc_state)
7667{
6300b3bd
MK
7668 struct drm_framebuffer *fb = state->fb;
7669 int min_downscale, max_upscale;
7670 int min_scale = 0;
7671 int max_scale = INT_MAX;
7672
40d916a2 7673 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7674 if (fb && state->crtc) {
40d916a2
NC
7675 /* Validate viewport to cover the case when only the position changes */
7676 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7677 int viewport_width = state->crtc_w;
7678 int viewport_height = state->crtc_h;
7679
7680 if (state->crtc_x < 0)
7681 viewport_width += state->crtc_x;
7682 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7683 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7684
7685 if (state->crtc_y < 0)
7686 viewport_height += state->crtc_y;
7687 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7688 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7689
4abdb72b
NC
7690 if (viewport_width < 0 || viewport_height < 0) {
7691 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7692 return -EINVAL;
7693 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7694 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7695 return -EINVAL;
4abdb72b
NC
7696 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7697 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7698 return -EINVAL;
4abdb72b
NC
7699 }
7700
40d916a2
NC
7701 }
7702
7703 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7704 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7705 &min_downscale, &max_upscale);
7706 /*
7707 * Convert to drm convention: 16.16 fixed point, instead of dc's
7708 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7709 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7710 */
7711 min_scale = (1000 << 16) / max_upscale;
7712 max_scale = (1000 << 16) / min_downscale;
7713 }
8c44515b 7714
8c44515b 7715 return drm_atomic_helper_check_plane_state(
6300b3bd 7716 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7717}
7718
7578ecda 7719static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7720 struct drm_atomic_state *state)
cbd19488 7721{
7c11b99a
MR
7722 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7723 plane);
1348969a 7724 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7725 struct dc *dc = adev->dm.dc;
78171832 7726 struct dm_plane_state *dm_plane_state;
695af5f9 7727 struct dc_scaling_info scaling_info;
8c44515b 7728 struct drm_crtc_state *new_crtc_state;
695af5f9 7729 int ret;
78171832 7730
ba5c1649 7731 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7732
ba5c1649 7733 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7734
3be5262e 7735 if (!dm_plane_state->dc_state)
9a3329b1 7736 return 0;
cbd19488 7737
8c44515b 7738 new_crtc_state =
dec92020 7739 drm_atomic_get_new_crtc_state(state,
ba5c1649 7740 new_plane_state->crtc);
8c44515b
AP
7741 if (!new_crtc_state)
7742 return -EINVAL;
7743
ba5c1649 7744 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7745 if (ret)
7746 return ret;
7747
4375d625 7748 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7749 if (ret)
7750 return ret;
a05bcff1 7751
62c933f9 7752 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7753 return 0;
7754
7755 return -EINVAL;
7756}
7757
674e78ac 7758static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7759 struct drm_atomic_state *state)
674e78ac
NK
7760{
7761 /* Only support async updates on cursor planes. */
7762 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7763 return -EINVAL;
7764
7765 return 0;
7766}
7767
7768static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7769 struct drm_atomic_state *state)
674e78ac 7770{
5ddb0bd4
MR
7771 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7772 plane);
674e78ac 7773 struct drm_plane_state *old_state =
5ddb0bd4 7774 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7775
e8a98235
RS
7776 trace_amdgpu_dm_atomic_update_cursor(new_state);
7777
332af874 7778 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7779
7780 plane->state->src_x = new_state->src_x;
7781 plane->state->src_y = new_state->src_y;
7782 plane->state->src_w = new_state->src_w;
7783 plane->state->src_h = new_state->src_h;
7784 plane->state->crtc_x = new_state->crtc_x;
7785 plane->state->crtc_y = new_state->crtc_y;
7786 plane->state->crtc_w = new_state->crtc_w;
7787 plane->state->crtc_h = new_state->crtc_h;
7788
7789 handle_cursor_update(plane, old_state);
7790}
7791
e7b07cee
HW
7792static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7793 .prepare_fb = dm_plane_helper_prepare_fb,
7794 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7795 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7796 .atomic_async_check = dm_plane_atomic_async_check,
7797 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7798};
7799
7800/*
7801 * TODO: these are currently initialized to rgb formats only.
7802 * For future use cases we should either initialize them dynamically based on
7803 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7804 * check will succeed, and let DC implement proper check
e7b07cee 7805 */
d90371b0 7806static const uint32_t rgb_formats[] = {
e7b07cee
HW
7807 DRM_FORMAT_XRGB8888,
7808 DRM_FORMAT_ARGB8888,
7809 DRM_FORMAT_RGBA8888,
7810 DRM_FORMAT_XRGB2101010,
7811 DRM_FORMAT_XBGR2101010,
7812 DRM_FORMAT_ARGB2101010,
7813 DRM_FORMAT_ABGR2101010,
58020403
MK
7814 DRM_FORMAT_XRGB16161616,
7815 DRM_FORMAT_XBGR16161616,
7816 DRM_FORMAT_ARGB16161616,
7817 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7818 DRM_FORMAT_XBGR8888,
7819 DRM_FORMAT_ABGR8888,
46dd9ff7 7820 DRM_FORMAT_RGB565,
e7b07cee
HW
7821};
7822
0d579c7e
NK
7823static const uint32_t overlay_formats[] = {
7824 DRM_FORMAT_XRGB8888,
7825 DRM_FORMAT_ARGB8888,
7826 DRM_FORMAT_RGBA8888,
7827 DRM_FORMAT_XBGR8888,
7828 DRM_FORMAT_ABGR8888,
7267a1a9 7829 DRM_FORMAT_RGB565
e7b07cee
HW
7830};
7831
7832static const u32 cursor_formats[] = {
7833 DRM_FORMAT_ARGB8888
7834};
7835
37c6a93b
NK
7836static int get_plane_formats(const struct drm_plane *plane,
7837 const struct dc_plane_cap *plane_cap,
7838 uint32_t *formats, int max_formats)
e7b07cee 7839{
37c6a93b
NK
7840 int i, num_formats = 0;
7841
7842 /*
7843 * TODO: Query support for each group of formats directly from
7844 * DC plane caps. This will require adding more formats to the
7845 * caps list.
7846 */
e7b07cee 7847
f180b4bc 7848 switch (plane->type) {
e7b07cee 7849 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7850 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7851 if (num_formats >= max_formats)
7852 break;
7853
7854 formats[num_formats++] = rgb_formats[i];
7855 }
7856
ea36ad34 7857 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7858 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7859 if (plane_cap && plane_cap->pixel_format_support.p010)
7860 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7861 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7862 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7863 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7864 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7865 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7866 }
e7b07cee 7867 break;
37c6a93b 7868
e7b07cee 7869 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7870 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7871 if (num_formats >= max_formats)
7872 break;
7873
7874 formats[num_formats++] = overlay_formats[i];
7875 }
e7b07cee 7876 break;
37c6a93b 7877
e7b07cee 7878 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7879 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7880 if (num_formats >= max_formats)
7881 break;
7882
7883 formats[num_formats++] = cursor_formats[i];
7884 }
e7b07cee
HW
7885 break;
7886 }
7887
37c6a93b
NK
7888 return num_formats;
7889}
7890
7891static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7892 struct drm_plane *plane,
7893 unsigned long possible_crtcs,
7894 const struct dc_plane_cap *plane_cap)
7895{
7896 uint32_t formats[32];
7897 int num_formats;
7898 int res = -EPERM;
ecc874a6 7899 unsigned int supported_rotations;
faa37f54 7900 uint64_t *modifiers = NULL;
37c6a93b
NK
7901
7902 num_formats = get_plane_formats(plane, plane_cap, formats,
7903 ARRAY_SIZE(formats));
7904
faa37f54
BN
7905 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7906 if (res)
7907 return res;
7908
2af10429
TE
7909 if (modifiers == NULL)
7910 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7911
4a580877 7912 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7913 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7914 modifiers, plane->type, NULL);
7915 kfree(modifiers);
37c6a93b
NK
7916 if (res)
7917 return res;
7918
cc1fec57
NK
7919 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7920 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6 7921 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
76818cdd
SJK
7922 BIT(DRM_MODE_BLEND_PREMULTI) |
7923 BIT(DRM_MODE_BLEND_COVERAGE);
d74004b6
NK
7924
7925 drm_plane_create_alpha_property(plane);
7926 drm_plane_create_blend_mode_property(plane, blend_caps);
7927 }
7928
fc8e5230 7929 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7930 plane_cap &&
7931 (plane_cap->pixel_format_support.nv12 ||
7932 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7933 /* This only affects YUV formats. */
7934 drm_plane_create_color_properties(
7935 plane,
7936 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7937 BIT(DRM_COLOR_YCBCR_BT709) |
7938 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7939 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7940 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7941 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7942 }
7943
ecc874a6
PLG
7944 supported_rotations =
7945 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7946 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7947
1347385f
SS
7948 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7949 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7950 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7951 supported_rotations);
ecc874a6 7952
f180b4bc 7953 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7954
96719c54 7955 /* Create (reset) the plane state */
f180b4bc
HW
7956 if (plane->funcs->reset)
7957 plane->funcs->reset(plane);
96719c54 7958
37c6a93b 7959 return 0;
e7b07cee
HW
7960}
7961
7578ecda
AD
7962static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7963 struct drm_plane *plane,
7964 uint32_t crtc_index)
e7b07cee
HW
7965{
7966 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7967 struct drm_plane *cursor_plane;
e7b07cee
HW
7968
7969 int res = -ENOMEM;
7970
7971 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7972 if (!cursor_plane)
7973 goto fail;
7974
f180b4bc 7975 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7976 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7977
7978 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7979 if (!acrtc)
7980 goto fail;
7981
7982 res = drm_crtc_init_with_planes(
7983 dm->ddev,
7984 &acrtc->base,
7985 plane,
f180b4bc 7986 cursor_plane,
e7b07cee
HW
7987 &amdgpu_dm_crtc_funcs, NULL);
7988
7989 if (res)
7990 goto fail;
7991
7992 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7993
96719c54
HW
7994 /* Create (reset) the plane state */
7995 if (acrtc->base.funcs->reset)
7996 acrtc->base.funcs->reset(&acrtc->base);
7997
e7b07cee
HW
7998 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7999 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8000
8001 acrtc->crtc_id = crtc_index;
8002 acrtc->base.enabled = false;
c37e2d29 8003 acrtc->otg_inst = -1;
e7b07cee
HW
8004
8005 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
8006 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8007 true, MAX_COLOR_LUT_ENTRIES);
086247a4 8008 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 8009
e7b07cee
HW
8010 return 0;
8011
8012fail:
b830ebc9
HW
8013 kfree(acrtc);
8014 kfree(cursor_plane);
e7b07cee
HW
8015 return res;
8016}
8017
8018
8019static int to_drm_connector_type(enum signal_type st)
8020{
8021 switch (st) {
8022 case SIGNAL_TYPE_HDMI_TYPE_A:
8023 return DRM_MODE_CONNECTOR_HDMIA;
8024 case SIGNAL_TYPE_EDP:
8025 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
8026 case SIGNAL_TYPE_LVDS:
8027 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
8028 case SIGNAL_TYPE_RGB:
8029 return DRM_MODE_CONNECTOR_VGA;
8030 case SIGNAL_TYPE_DISPLAY_PORT:
8031 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8032 return DRM_MODE_CONNECTOR_DisplayPort;
8033 case SIGNAL_TYPE_DVI_DUAL_LINK:
8034 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8035 return DRM_MODE_CONNECTOR_DVID;
8036 case SIGNAL_TYPE_VIRTUAL:
8037 return DRM_MODE_CONNECTOR_VIRTUAL;
8038
8039 default:
8040 return DRM_MODE_CONNECTOR_Unknown;
8041 }
8042}
8043
2b4c1c05
DV
8044static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8045{
62afb4ad
JRS
8046 struct drm_encoder *encoder;
8047
8048 /* There is only one encoder per connector */
8049 drm_connector_for_each_possible_encoder(connector, encoder)
8050 return encoder;
8051
8052 return NULL;
2b4c1c05
DV
8053}
8054
e7b07cee
HW
8055static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8056{
e7b07cee
HW
8057 struct drm_encoder *encoder;
8058 struct amdgpu_encoder *amdgpu_encoder;
8059
2b4c1c05 8060 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
8061
8062 if (encoder == NULL)
8063 return;
8064
8065 amdgpu_encoder = to_amdgpu_encoder(encoder);
8066
8067 amdgpu_encoder->native_mode.clock = 0;
8068
8069 if (!list_empty(&connector->probed_modes)) {
8070 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8071
e7b07cee 8072 list_for_each_entry(preferred_mode,
b830ebc9
HW
8073 &connector->probed_modes,
8074 head) {
8075 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8076 amdgpu_encoder->native_mode = *preferred_mode;
8077
e7b07cee
HW
8078 break;
8079 }
8080
8081 }
8082}
8083
3ee6b26b
AD
8084static struct drm_display_mode *
8085amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8086 char *name,
8087 int hdisplay, int vdisplay)
e7b07cee
HW
8088{
8089 struct drm_device *dev = encoder->dev;
8090 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8091 struct drm_display_mode *mode = NULL;
8092 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8093
8094 mode = drm_mode_duplicate(dev, native_mode);
8095
b830ebc9 8096 if (mode == NULL)
e7b07cee
HW
8097 return NULL;
8098
8099 mode->hdisplay = hdisplay;
8100 mode->vdisplay = vdisplay;
8101 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8102 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8103
8104 return mode;
8105
8106}
8107
8108static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8109 struct drm_connector *connector)
e7b07cee
HW
8110{
8111 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8112 struct drm_display_mode *mode = NULL;
8113 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8114 struct amdgpu_dm_connector *amdgpu_dm_connector =
8115 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8116 int i;
8117 int n;
8118 struct mode_size {
8119 char name[DRM_DISPLAY_MODE_LEN];
8120 int w;
8121 int h;
b830ebc9 8122 } common_modes[] = {
e7b07cee
HW
8123 { "640x480", 640, 480},
8124 { "800x600", 800, 600},
8125 { "1024x768", 1024, 768},
8126 { "1280x720", 1280, 720},
8127 { "1280x800", 1280, 800},
8128 {"1280x1024", 1280, 1024},
8129 { "1440x900", 1440, 900},
8130 {"1680x1050", 1680, 1050},
8131 {"1600x1200", 1600, 1200},
8132 {"1920x1080", 1920, 1080},
8133 {"1920x1200", 1920, 1200}
8134 };
8135
b830ebc9 8136 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8137
8138 for (i = 0; i < n; i++) {
8139 struct drm_display_mode *curmode = NULL;
8140 bool mode_existed = false;
8141
8142 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8143 common_modes[i].h > native_mode->vdisplay ||
8144 (common_modes[i].w == native_mode->hdisplay &&
8145 common_modes[i].h == native_mode->vdisplay))
8146 continue;
e7b07cee
HW
8147
8148 list_for_each_entry(curmode, &connector->probed_modes, head) {
8149 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8150 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8151 mode_existed = true;
8152 break;
8153 }
8154 }
8155
8156 if (mode_existed)
8157 continue;
8158
8159 mode = amdgpu_dm_create_common_mode(encoder,
8160 common_modes[i].name, common_modes[i].w,
8161 common_modes[i].h);
588a7017
ZQ
8162 if (!mode)
8163 continue;
8164
e7b07cee 8165 drm_mode_probed_add(connector, mode);
c84dec2f 8166 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8167 }
8168}
8169
d77de788
SS
8170static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8171{
8172 struct drm_encoder *encoder;
8173 struct amdgpu_encoder *amdgpu_encoder;
8174 const struct drm_display_mode *native_mode;
8175
8176 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8177 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8178 return;
8179
8180 encoder = amdgpu_dm_connector_to_encoder(connector);
8181 if (!encoder)
8182 return;
8183
8184 amdgpu_encoder = to_amdgpu_encoder(encoder);
8185
8186 native_mode = &amdgpu_encoder->native_mode;
8187 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8188 return;
8189
8190 drm_connector_set_panel_orientation_with_quirk(connector,
8191 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8192 native_mode->hdisplay,
8193 native_mode->vdisplay);
8194}
8195
3ee6b26b
AD
8196static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8197 struct edid *edid)
e7b07cee 8198{
c84dec2f
HW
8199 struct amdgpu_dm_connector *amdgpu_dm_connector =
8200 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8201
8202 if (edid) {
8203 /* empty probed_modes */
8204 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8205 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8206 drm_add_edid_modes(connector, edid);
8207
f1e5e913
YMM
8208 /* sorting the probed modes before calling function
8209 * amdgpu_dm_get_native_mode() since EDID can have
8210 * more than one preferred mode. The modes that are
8211 * later in the probed mode list could be of higher
8212 * and preferred resolution. For example, 3840x2160
8213 * resolution in base EDID preferred timing and 4096x2160
8214 * preferred resolution in DID extension block later.
8215 */
8216 drm_mode_sort(&connector->probed_modes);
e7b07cee 8217 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8218
8219 /* Freesync capabilities are reset by calling
8220 * drm_add_edid_modes() and need to be
8221 * restored here.
8222 */
8223 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8224
8225 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8226 } else {
c84dec2f 8227 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8228 }
e7b07cee
HW
8229}
8230
a85ba005
NC
8231static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8232 struct drm_display_mode *mode)
8233{
8234 struct drm_display_mode *m;
8235
8236 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8237 if (drm_mode_equal(m, mode))
8238 return true;
8239 }
8240
8241 return false;
8242}
8243
8244static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8245{
8246 const struct drm_display_mode *m;
8247 struct drm_display_mode *new_mode;
8248 uint i;
8249 uint32_t new_modes_count = 0;
8250
8251 /* Standard FPS values
8252 *
12cdff6b
SC
8253 * 23.976 - TV/NTSC
8254 * 24 - Cinema
8255 * 25 - TV/PAL
8256 * 29.97 - TV/NTSC
8257 * 30 - TV/NTSC
8258 * 48 - Cinema HFR
8259 * 50 - TV/PAL
8260 * 60 - Commonly used
8261 * 48,72,96,120 - Multiples of 24
a85ba005 8262 */
9ce5ed6e
CIK
8263 static const uint32_t common_rates[] = {
8264 23976, 24000, 25000, 29970, 30000,
12cdff6b 8265 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8266 };
a85ba005
NC
8267
8268 /*
8269 * Find mode with highest refresh rate with the same resolution
8270 * as the preferred mode. Some monitors report a preferred mode
8271 * with lower resolution than the highest refresh rate supported.
8272 */
8273
8274 m = get_highest_refresh_rate_mode(aconnector, true);
8275 if (!m)
8276 return 0;
8277
8278 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8279 uint64_t target_vtotal, target_vtotal_diff;
8280 uint64_t num, den;
8281
8282 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8283 continue;
8284
8285 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8286 common_rates[i] > aconnector->max_vfreq * 1000)
8287 continue;
8288
8289 num = (unsigned long long)m->clock * 1000 * 1000;
8290 den = common_rates[i] * (unsigned long long)m->htotal;
8291 target_vtotal = div_u64(num, den);
8292 target_vtotal_diff = target_vtotal - m->vtotal;
8293
8294 /* Check for illegal modes */
8295 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8296 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8297 m->vtotal + target_vtotal_diff < m->vsync_end)
8298 continue;
8299
8300 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8301 if (!new_mode)
8302 goto out;
8303
8304 new_mode->vtotal += (u16)target_vtotal_diff;
8305 new_mode->vsync_start += (u16)target_vtotal_diff;
8306 new_mode->vsync_end += (u16)target_vtotal_diff;
8307 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8308 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8309
8310 if (!is_duplicate_mode(aconnector, new_mode)) {
8311 drm_mode_probed_add(&aconnector->base, new_mode);
8312 new_modes_count += 1;
8313 } else
8314 drm_mode_destroy(aconnector->base.dev, new_mode);
8315 }
8316 out:
8317 return new_modes_count;
8318}
8319
8320static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8321 struct edid *edid)
8322{
8323 struct amdgpu_dm_connector *amdgpu_dm_connector =
8324 to_amdgpu_dm_connector(connector);
8325
de05abe6 8326 if (!edid)
a85ba005 8327 return;
fe8858bb 8328
a85ba005
NC
8329 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8330 amdgpu_dm_connector->num_modes +=
8331 add_fs_modes(amdgpu_dm_connector);
8332}
8333
7578ecda 8334static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8335{
c84dec2f
HW
8336 struct amdgpu_dm_connector *amdgpu_dm_connector =
8337 to_amdgpu_dm_connector(connector);
e7b07cee 8338 struct drm_encoder *encoder;
c84dec2f 8339 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8340
2b4c1c05 8341 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8342
5c0e6840 8343 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8344 amdgpu_dm_connector->num_modes =
8345 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8346 } else {
8347 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8348 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8349 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8350 }
3e332d3a 8351 amdgpu_dm_fbc_init(connector);
5099114b 8352
c84dec2f 8353 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8354}
8355
3ee6b26b
AD
8356void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8357 struct amdgpu_dm_connector *aconnector,
8358 int connector_type,
8359 struct dc_link *link,
8360 int link_index)
e7b07cee 8361{
1348969a 8362 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8363
f04bee34
NK
8364 /*
8365 * Some of the properties below require access to state, like bpc.
8366 * Allocate some default initial connector state with our reset helper.
8367 */
8368 if (aconnector->base.funcs->reset)
8369 aconnector->base.funcs->reset(&aconnector->base);
8370
e7b07cee
HW
8371 aconnector->connector_id = link_index;
8372 aconnector->dc_link = link;
8373 aconnector->base.interlace_allowed = false;
8374 aconnector->base.doublescan_allowed = false;
8375 aconnector->base.stereo_allowed = false;
8376 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8377 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8378 aconnector->audio_inst = -1;
e7b07cee
HW
8379 mutex_init(&aconnector->hpd_lock);
8380
1f6010a9
DF
8381 /*
8382 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8383 * which means HPD hot plug not supported
8384 */
e7b07cee
HW
8385 switch (connector_type) {
8386 case DRM_MODE_CONNECTOR_HDMIA:
8387 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8388 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8389 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8390 break;
8391 case DRM_MODE_CONNECTOR_DisplayPort:
8392 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
d715c9a2 8393 link->link_enc = link_enc_cfg_get_link_enc(link);
7b201d53 8394 ASSERT(link->link_enc);
f6e03f80
JS
8395 if (link->link_enc)
8396 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8397 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8398 break;
8399 case DRM_MODE_CONNECTOR_DVID:
8400 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8401 break;
8402 default:
8403 break;
8404 }
8405
8406 drm_object_attach_property(&aconnector->base.base,
8407 dm->ddev->mode_config.scaling_mode_property,
8408 DRM_MODE_SCALE_NONE);
8409
8410 drm_object_attach_property(&aconnector->base.base,
8411 adev->mode_info.underscan_property,
8412 UNDERSCAN_OFF);
8413 drm_object_attach_property(&aconnector->base.base,
8414 adev->mode_info.underscan_hborder_property,
8415 0);
8416 drm_object_attach_property(&aconnector->base.base,
8417 adev->mode_info.underscan_vborder_property,
8418 0);
1825fd34 8419
8c61b31e
JFZ
8420 if (!aconnector->mst_port)
8421 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8422
4a8ca46b
RL
8423 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8424 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8425 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8426
c1ee92f9 8427 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8428 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8429 drm_object_attach_property(&aconnector->base.base,
8430 adev->mode_info.abm_level_property, 0);
8431 }
bb47de73
NK
8432
8433 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8434 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8435 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8436 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8437
8c61b31e
JFZ
8438 if (!aconnector->mst_port)
8439 drm_connector_attach_vrr_capable_property(&aconnector->base);
8440
0c8620d6 8441#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8442 if (adev->dm.hdcp_workqueue)
53e108aa 8443 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8444#endif
bb47de73 8445 }
e7b07cee
HW
8446}
8447
7578ecda
AD
8448static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8449 struct i2c_msg *msgs, int num)
e7b07cee
HW
8450{
8451 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8452 struct ddc_service *ddc_service = i2c->ddc_service;
8453 struct i2c_command cmd;
8454 int i;
8455 int result = -EIO;
8456
b830ebc9 8457 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8458
8459 if (!cmd.payloads)
8460 return result;
8461
8462 cmd.number_of_payloads = num;
8463 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8464 cmd.speed = 100;
8465
8466 for (i = 0; i < num; i++) {
8467 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8468 cmd.payloads[i].address = msgs[i].addr;
8469 cmd.payloads[i].length = msgs[i].len;
8470 cmd.payloads[i].data = msgs[i].buf;
8471 }
8472
c85e6e54
DF
8473 if (dc_submit_i2c(
8474 ddc_service->ctx->dc,
8475 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8476 &cmd))
8477 result = num;
8478
8479 kfree(cmd.payloads);
8480 return result;
8481}
8482
7578ecda 8483static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8484{
8485 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8486}
8487
8488static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8489 .master_xfer = amdgpu_dm_i2c_xfer,
8490 .functionality = amdgpu_dm_i2c_func,
8491};
8492
3ee6b26b
AD
8493static struct amdgpu_i2c_adapter *
8494create_i2c(struct ddc_service *ddc_service,
8495 int link_index,
8496 int *res)
e7b07cee
HW
8497{
8498 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8499 struct amdgpu_i2c_adapter *i2c;
8500
b830ebc9 8501 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8502 if (!i2c)
8503 return NULL;
e7b07cee
HW
8504 i2c->base.owner = THIS_MODULE;
8505 i2c->base.class = I2C_CLASS_DDC;
8506 i2c->base.dev.parent = &adev->pdev->dev;
8507 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8508 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8509 i2c_set_adapdata(&i2c->base, i2c);
8510 i2c->ddc_service = ddc_service;
f6e03f80
JS
8511 if (i2c->ddc_service->ddc_pin)
8512 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8513
8514 return i2c;
8515}
8516
89fc8d4e 8517
1f6010a9
DF
8518/*
8519 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8520 * dc_link which will be represented by this aconnector.
8521 */
7578ecda
AD
8522static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8523 struct amdgpu_dm_connector *aconnector,
8524 uint32_t link_index,
8525 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8526{
8527 int res = 0;
8528 int connector_type;
8529 struct dc *dc = dm->dc;
8530 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8531 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8532
8533 link->priv = aconnector;
e7b07cee 8534
f1ad2f5e 8535 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8536
8537 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8538 if (!i2c) {
8539 DRM_ERROR("Failed to create i2c adapter data\n");
8540 return -ENOMEM;
8541 }
8542
e7b07cee
HW
8543 aconnector->i2c = i2c;
8544 res = i2c_add_adapter(&i2c->base);
8545
8546 if (res) {
8547 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8548 goto out_free;
8549 }
8550
8551 connector_type = to_drm_connector_type(link->connector_signal);
8552
17165de2 8553 res = drm_connector_init_with_ddc(
e7b07cee
HW
8554 dm->ddev,
8555 &aconnector->base,
8556 &amdgpu_dm_connector_funcs,
17165de2
AP
8557 connector_type,
8558 &i2c->base);
e7b07cee
HW
8559
8560 if (res) {
8561 DRM_ERROR("connector_init failed\n");
8562 aconnector->connector_id = -1;
8563 goto out_free;
8564 }
8565
8566 drm_connector_helper_add(
8567 &aconnector->base,
8568 &amdgpu_dm_connector_helper_funcs);
8569
8570 amdgpu_dm_connector_init_helper(
8571 dm,
8572 aconnector,
8573 connector_type,
8574 link,
8575 link_index);
8576
cde4c44d 8577 drm_connector_attach_encoder(
e7b07cee
HW
8578 &aconnector->base, &aencoder->base);
8579
e7b07cee
HW
8580 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8581 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8582 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8583
e7b07cee
HW
8584out_free:
8585 if (res) {
8586 kfree(i2c);
8587 aconnector->i2c = NULL;
8588 }
8589 return res;
8590}
8591
8592int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8593{
8594 switch (adev->mode_info.num_crtc) {
8595 case 1:
8596 return 0x1;
8597 case 2:
8598 return 0x3;
8599 case 3:
8600 return 0x7;
8601 case 4:
8602 return 0xf;
8603 case 5:
8604 return 0x1f;
8605 case 6:
8606 default:
8607 return 0x3f;
8608 }
8609}
8610
7578ecda
AD
8611static int amdgpu_dm_encoder_init(struct drm_device *dev,
8612 struct amdgpu_encoder *aencoder,
8613 uint32_t link_index)
e7b07cee 8614{
1348969a 8615 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8616
8617 int res = drm_encoder_init(dev,
8618 &aencoder->base,
8619 &amdgpu_dm_encoder_funcs,
8620 DRM_MODE_ENCODER_TMDS,
8621 NULL);
8622
8623 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8624
8625 if (!res)
8626 aencoder->encoder_id = link_index;
8627 else
8628 aencoder->encoder_id = -1;
8629
8630 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8631
8632 return res;
8633}
8634
3ee6b26b
AD
8635static void manage_dm_interrupts(struct amdgpu_device *adev,
8636 struct amdgpu_crtc *acrtc,
8637 bool enable)
e7b07cee
HW
8638{
8639 /*
8fe684e9
NK
8640 * We have no guarantee that the frontend index maps to the same
8641 * backend index - some even map to more than one.
8642 *
8643 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8644 */
8645 int irq_type =
734dd01d 8646 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8647 adev,
8648 acrtc->crtc_id);
8649
8650 if (enable) {
8651 drm_crtc_vblank_on(&acrtc->base);
8652 amdgpu_irq_get(
8653 adev,
8654 &adev->pageflip_irq,
8655 irq_type);
86bc2219
WL
8656#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8657 amdgpu_irq_get(
8658 adev,
8659 &adev->vline0_irq,
8660 irq_type);
8661#endif
e7b07cee 8662 } else {
86bc2219
WL
8663#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8664 amdgpu_irq_put(
8665 adev,
8666 &adev->vline0_irq,
8667 irq_type);
8668#endif
e7b07cee
HW
8669 amdgpu_irq_put(
8670 adev,
8671 &adev->pageflip_irq,
8672 irq_type);
8673 drm_crtc_vblank_off(&acrtc->base);
8674 }
8675}
8676
8fe684e9
NK
8677static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8678 struct amdgpu_crtc *acrtc)
8679{
8680 int irq_type =
8681 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8682
8683 /**
8684 * This reads the current state for the IRQ and force reapplies
8685 * the setting to hardware.
8686 */
8687 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8688}
8689
3ee6b26b
AD
8690static bool
8691is_scaling_state_different(const struct dm_connector_state *dm_state,
8692 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8693{
8694 if (dm_state->scaling != old_dm_state->scaling)
8695 return true;
8696 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8697 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8698 return true;
8699 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8700 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8701 return true;
b830ebc9
HW
8702 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8703 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8704 return true;
e7b07cee
HW
8705 return false;
8706}
8707
0c8620d6
BL
8708#ifdef CONFIG_DRM_AMD_DC_HDCP
8709static bool is_content_protection_different(struct drm_connector_state *state,
8710 const struct drm_connector_state *old_state,
8711 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8712{
8713 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8714 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8715
31c0ed90 8716 /* Handle: Type0/1 change */
53e108aa
BL
8717 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8718 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8719 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8720 return true;
8721 }
8722
31c0ed90
BL
8723 /* CP is being re enabled, ignore this
8724 *
8725 * Handles: ENABLED -> DESIRED
8726 */
0c8620d6
BL
8727 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8728 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8729 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8730 return false;
8731 }
8732
31c0ed90
BL
8733 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8734 *
8735 * Handles: UNDESIRED -> ENABLED
8736 */
0c8620d6
BL
8737 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8738 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8739 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8740
0d9a947b
QZ
8741 /* Stream removed and re-enabled
8742 *
8743 * Can sometimes overlap with the HPD case,
8744 * thus set update_hdcp to false to avoid
8745 * setting HDCP multiple times.
8746 *
8747 * Handles: DESIRED -> DESIRED (Special case)
8748 */
8749 if (!(old_state->crtc && old_state->crtc->enabled) &&
8750 state->crtc && state->crtc->enabled &&
8751 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8752 dm_con_state->update_hdcp = false;
8753 return true;
8754 }
8755
8756 /* Hot-plug, headless s3, dpms
8757 *
8758 * Only start HDCP if the display is connected/enabled.
8759 * update_hdcp flag will be set to false until the next
8760 * HPD comes in.
31c0ed90
BL
8761 *
8762 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8763 */
97f6c917
BL
8764 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8765 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8766 dm_con_state->update_hdcp = false;
0c8620d6 8767 return true;
97f6c917 8768 }
0c8620d6 8769
31c0ed90
BL
8770 /*
8771 * Handles: UNDESIRED -> UNDESIRED
8772 * DESIRED -> DESIRED
8773 * ENABLED -> ENABLED
8774 */
0c8620d6
BL
8775 if (old_state->content_protection == state->content_protection)
8776 return false;
8777
31c0ed90
BL
8778 /*
8779 * Handles: UNDESIRED -> DESIRED
8780 * DESIRED -> UNDESIRED
8781 * ENABLED -> UNDESIRED
8782 */
97f6c917 8783 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8784 return true;
8785
31c0ed90
BL
8786 /*
8787 * Handles: DESIRED -> ENABLED
8788 */
0c8620d6
BL
8789 return false;
8790}
8791
0c8620d6 8792#endif
3ee6b26b
AD
8793static void remove_stream(struct amdgpu_device *adev,
8794 struct amdgpu_crtc *acrtc,
8795 struct dc_stream_state *stream)
e7b07cee
HW
8796{
8797 /* this is the update mode case */
e7b07cee
HW
8798
8799 acrtc->otg_inst = -1;
8800 acrtc->enabled = false;
8801}
8802
7578ecda
AD
8803static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8804 struct dc_cursor_position *position)
2a8f6ccb 8805{
f4c2cc43 8806 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8807 int x, y;
8808 int xorigin = 0, yorigin = 0;
8809
e371e19c 8810 if (!crtc || !plane->state->fb)
2a8f6ccb 8811 return 0;
2a8f6ccb
HW
8812
8813 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8814 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8815 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8816 __func__,
8817 plane->state->crtc_w,
8818 plane->state->crtc_h);
8819 return -EINVAL;
8820 }
8821
8822 x = plane->state->crtc_x;
8823 y = plane->state->crtc_y;
c14a005c 8824
e371e19c
NK
8825 if (x <= -amdgpu_crtc->max_cursor_width ||
8826 y <= -amdgpu_crtc->max_cursor_height)
8827 return 0;
8828
2a8f6ccb
HW
8829 if (x < 0) {
8830 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8831 x = 0;
8832 }
8833 if (y < 0) {
8834 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8835 y = 0;
8836 }
8837 position->enable = true;
d243b6ff 8838 position->translate_by_source = true;
2a8f6ccb
HW
8839 position->x = x;
8840 position->y = y;
8841 position->x_hotspot = xorigin;
8842 position->y_hotspot = yorigin;
8843
8844 return 0;
8845}
8846
3ee6b26b
AD
8847static void handle_cursor_update(struct drm_plane *plane,
8848 struct drm_plane_state *old_plane_state)
e7b07cee 8849{
1348969a 8850 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8851 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8852 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8853 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8854 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8855 uint64_t address = afb ? afb->address : 0;
6a30a929 8856 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8857 struct dc_cursor_attributes attributes;
8858 int ret;
8859
e7b07cee
HW
8860 if (!plane->state->fb && !old_plane_state->fb)
8861 return;
8862
cb2318b7 8863 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8864 __func__,
8865 amdgpu_crtc->crtc_id,
8866 plane->state->crtc_w,
8867 plane->state->crtc_h);
2a8f6ccb
HW
8868
8869 ret = get_cursor_position(plane, crtc, &position);
8870 if (ret)
8871 return;
8872
8873 if (!position.enable) {
8874 /* turn off cursor */
674e78ac
NK
8875 if (crtc_state && crtc_state->stream) {
8876 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8877 dc_stream_set_cursor_position(crtc_state->stream,
8878 &position);
674e78ac
NK
8879 mutex_unlock(&adev->dm.dc_lock);
8880 }
2a8f6ccb 8881 return;
e7b07cee 8882 }
e7b07cee 8883
2a8f6ccb
HW
8884 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8885 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8886
c1cefe11 8887 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8888 attributes.address.high_part = upper_32_bits(address);
8889 attributes.address.low_part = lower_32_bits(address);
8890 attributes.width = plane->state->crtc_w;
8891 attributes.height = plane->state->crtc_h;
8892 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8893 attributes.rotation_angle = 0;
8894 attributes.attribute_flags.value = 0;
8895
03a66367 8896 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8897
886daac9 8898 if (crtc_state->stream) {
674e78ac 8899 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8900 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8901 &attributes))
8902 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8903
2a8f6ccb
HW
8904 if (!dc_stream_set_cursor_position(crtc_state->stream,
8905 &position))
8906 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8907 mutex_unlock(&adev->dm.dc_lock);
886daac9 8908 }
2a8f6ccb 8909}
e7b07cee
HW
8910
8911static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8912{
8913
8914 assert_spin_locked(&acrtc->base.dev->event_lock);
8915 WARN_ON(acrtc->event);
8916
8917 acrtc->event = acrtc->base.state->event;
8918
8919 /* Set the flip status */
8920 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8921
8922 /* Mark this event as consumed */
8923 acrtc->base.state->event = NULL;
8924
cb2318b7
VL
8925 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8926 acrtc->crtc_id);
e7b07cee
HW
8927}
8928
bb47de73
NK
8929static void update_freesync_state_on_stream(
8930 struct amdgpu_display_manager *dm,
8931 struct dm_crtc_state *new_crtc_state,
180db303
NK
8932 struct dc_stream_state *new_stream,
8933 struct dc_plane_state *surface,
8934 u32 flip_timestamp_in_us)
bb47de73 8935{
09aef2c4 8936 struct mod_vrr_params vrr_params;
bb47de73 8937 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8938 struct amdgpu_device *adev = dm->adev;
585d450c 8939 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8940 unsigned long flags;
4cda3243 8941 bool pack_sdp_v1_3 = false;
bb47de73
NK
8942
8943 if (!new_stream)
8944 return;
8945
8946 /*
8947 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8948 * For now it's sufficient to just guard against these conditions.
8949 */
8950
8951 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8952 return;
8953
4a580877 8954 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8955 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8956
180db303
NK
8957 if (surface) {
8958 mod_freesync_handle_preflip(
8959 dm->freesync_module,
8960 surface,
8961 new_stream,
8962 flip_timestamp_in_us,
8963 &vrr_params);
09aef2c4
MK
8964
8965 if (adev->family < AMDGPU_FAMILY_AI &&
8966 amdgpu_dm_vrr_active(new_crtc_state)) {
8967 mod_freesync_handle_v_update(dm->freesync_module,
8968 new_stream, &vrr_params);
e63e2491
EB
8969
8970 /* Need to call this before the frame ends. */
8971 dc_stream_adjust_vmin_vmax(dm->dc,
8972 new_crtc_state->stream,
8973 &vrr_params.adjust);
09aef2c4 8974 }
180db303 8975 }
bb47de73
NK
8976
8977 mod_freesync_build_vrr_infopacket(
8978 dm->freesync_module,
8979 new_stream,
180db303 8980 &vrr_params,
ecd0136b
HT
8981 PACKET_TYPE_VRR,
8982 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8983 &vrr_infopacket,
8984 pack_sdp_v1_3);
bb47de73 8985
8a48b44c 8986 new_crtc_state->freesync_timing_changed |=
585d450c 8987 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8988 &vrr_params.adjust,
8989 sizeof(vrr_params.adjust)) != 0);
bb47de73 8990
8a48b44c 8991 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8992 (memcmp(&new_crtc_state->vrr_infopacket,
8993 &vrr_infopacket,
8994 sizeof(vrr_infopacket)) != 0);
8995
585d450c 8996 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8997 new_crtc_state->vrr_infopacket = vrr_infopacket;
8998
585d450c 8999 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
9000 new_stream->vrr_infopacket = vrr_infopacket;
9001
9002 if (new_crtc_state->freesync_vrr_info_changed)
9003 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9004 new_crtc_state->base.crtc->base.id,
9005 (int)new_crtc_state->base.vrr_enabled,
180db303 9006 (int)vrr_params.state);
09aef2c4 9007
4a580877 9008 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
9009}
9010
585d450c 9011static void update_stream_irq_parameters(
e854194c
MK
9012 struct amdgpu_display_manager *dm,
9013 struct dm_crtc_state *new_crtc_state)
9014{
9015 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 9016 struct mod_vrr_params vrr_params;
e854194c 9017 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 9018 struct amdgpu_device *adev = dm->adev;
585d450c 9019 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 9020 unsigned long flags;
e854194c
MK
9021
9022 if (!new_stream)
9023 return;
9024
9025 /*
9026 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9027 * For now it's sufficient to just guard against these conditions.
9028 */
9029 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9030 return;
9031
4a580877 9032 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 9033 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 9034
e854194c
MK
9035 if (new_crtc_state->vrr_supported &&
9036 config.min_refresh_in_uhz &&
9037 config.max_refresh_in_uhz) {
a85ba005
NC
9038 /*
9039 * if freesync compatible mode was set, config.state will be set
9040 * in atomic check
9041 */
9042 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9043 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9044 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9045 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9046 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9047 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9048 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9049 } else {
9050 config.state = new_crtc_state->base.vrr_enabled ?
9051 VRR_STATE_ACTIVE_VARIABLE :
9052 VRR_STATE_INACTIVE;
9053 }
e854194c
MK
9054 } else {
9055 config.state = VRR_STATE_UNSUPPORTED;
9056 }
9057
9058 mod_freesync_build_vrr_params(dm->freesync_module,
9059 new_stream,
9060 &config, &vrr_params);
9061
9062 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9063 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9064 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9065
585d450c
AP
9066 new_crtc_state->freesync_config = config;
9067 /* Copy state for access from DM IRQ handler */
9068 acrtc->dm_irq_params.freesync_config = config;
9069 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9070 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9071 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9072}
9073
66b0c973
MK
9074static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9075 struct dm_crtc_state *new_state)
9076{
9077 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9078 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9079
9080 if (!old_vrr_active && new_vrr_active) {
9081 /* Transition VRR inactive -> active:
9082 * While VRR is active, we must not disable vblank irq, as a
9083 * reenable after disable would compute bogus vblank/pflip
9084 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9085 *
9086 * We also need vupdate irq for the actual core vblank handling
9087 * at end of vblank.
66b0c973 9088 */
d2574c33 9089 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9090 drm_crtc_vblank_get(new_state->base.crtc);
9091 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9092 __func__, new_state->base.crtc->base.id);
9093 } else if (old_vrr_active && !new_vrr_active) {
9094 /* Transition VRR active -> inactive:
9095 * Allow vblank irq disable again for fixed refresh rate.
9096 */
d2574c33 9097 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9098 drm_crtc_vblank_put(new_state->base.crtc);
9099 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9100 __func__, new_state->base.crtc->base.id);
9101 }
9102}
9103
8ad27806
NK
9104static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9105{
9106 struct drm_plane *plane;
5760dcb9 9107 struct drm_plane_state *old_plane_state;
8ad27806
NK
9108 int i;
9109
9110 /*
9111 * TODO: Make this per-stream so we don't issue redundant updates for
9112 * commits with multiple streams.
9113 */
5760dcb9 9114 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9115 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9116 handle_cursor_update(plane, old_plane_state);
9117}
9118
3be5262e 9119static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9120 struct dc_state *dc_state,
3ee6b26b
AD
9121 struct drm_device *dev,
9122 struct amdgpu_display_manager *dm,
9123 struct drm_crtc *pcrtc,
420cd472 9124 bool wait_for_vblank)
e7b07cee 9125{
efc8278e 9126 uint32_t i;
8a48b44c 9127 uint64_t timestamp_ns;
e7b07cee 9128 struct drm_plane *plane;
0bc9706d 9129 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9130 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9131 struct drm_crtc_state *new_pcrtc_state =
9132 drm_atomic_get_new_crtc_state(state, pcrtc);
9133 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9134 struct dm_crtc_state *dm_old_crtc_state =
9135 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9136 int planes_count = 0, vpos, hpos;
570c91d5 9137 long r;
e7b07cee 9138 unsigned long flags;
8a48b44c 9139 struct amdgpu_bo *abo;
fdd1fe57
MK
9140 uint32_t target_vblank, last_flip_vblank;
9141 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9142 bool pflip_present = false;
bc7f670e
DF
9143 struct {
9144 struct dc_surface_update surface_updates[MAX_SURFACES];
9145 struct dc_plane_info plane_infos[MAX_SURFACES];
9146 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9147 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9148 struct dc_stream_update stream_update;
74aa7bd4 9149 } *bundle;
bc7f670e 9150
74aa7bd4 9151 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9152
74aa7bd4
DF
9153 if (!bundle) {
9154 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9155 goto cleanup;
9156 }
e7b07cee 9157
8ad27806
NK
9158 /*
9159 * Disable the cursor first if we're disabling all the planes.
9160 * It'll remain on the screen after the planes are re-enabled
9161 * if we don't.
9162 */
9163 if (acrtc_state->active_planes == 0)
9164 amdgpu_dm_commit_cursors(state);
9165
e7b07cee 9166 /* update planes when needed */
efc8278e 9167 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9168 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9169 struct drm_crtc_state *new_crtc_state;
0bc9706d 9170 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9171 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9172 bool plane_needs_flip;
c7af5f77 9173 struct dc_plane_state *dc_plane;
54d76575 9174 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9175
80c218d5
NK
9176 /* Cursor plane is handled after stream updates */
9177 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9178 continue;
e7b07cee 9179
f5ba60fe
DD
9180 if (!fb || !crtc || pcrtc != crtc)
9181 continue;
9182
9183 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9184 if (!new_crtc_state->active)
e7b07cee
HW
9185 continue;
9186
bc7f670e 9187 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9188
74aa7bd4 9189 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9190 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9191 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9192 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9193 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9194 }
8a48b44c 9195
4375d625 9196 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9197 &bundle->scaling_infos[planes_count]);
8a48b44c 9198
695af5f9
NK
9199 bundle->surface_updates[planes_count].scaling_info =
9200 &bundle->scaling_infos[planes_count];
8a48b44c 9201
f5031000 9202 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9203
f5031000 9204 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9205
f5031000
DF
9206 if (!plane_needs_flip) {
9207 planes_count += 1;
9208 continue;
9209 }
8a48b44c 9210
2fac0f53
CK
9211 abo = gem_to_amdgpu_bo(fb->obj[0]);
9212
f8308898
AG
9213 /*
9214 * Wait for all fences on this FB. Do limited wait to avoid
9215 * deadlock during GPU reset when this fence will not signal
9216 * but we hold reservation lock for the BO.
9217 */
7bc80a54
CK
9218 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9219 DMA_RESV_USAGE_WRITE, false,
d3fae3b3 9220 msecs_to_jiffies(5000));
f8308898 9221 if (unlikely(r <= 0))
ed8a5fb2 9222 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9223
695af5f9 9224 fill_dc_plane_info_and_addr(
8ce5d842 9225 dm->adev, new_plane_state,
6eed95b0 9226 afb->tiling_flags,
695af5f9 9227 &bundle->plane_infos[planes_count],
87b7ebc2 9228 &bundle->flip_addrs[planes_count].address,
6eed95b0 9229 afb->tmz_surface, false);
87b7ebc2 9230
9f07550b 9231 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9232 new_plane_state->plane->index,
9233 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9234
9235 bundle->surface_updates[planes_count].plane_info =
9236 &bundle->plane_infos[planes_count];
8a48b44c 9237
caff0e66
NK
9238 /*
9239 * Only allow immediate flips for fast updates that don't
9240 * change FB pitch, DCC state, rotation or mirroing.
9241 */
f5031000 9242 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9243 crtc->state->async_flip &&
caff0e66 9244 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9245
f5031000
DF
9246 timestamp_ns = ktime_get_ns();
9247 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9248 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9249 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9250
f5031000
DF
9251 if (!bundle->surface_updates[planes_count].surface) {
9252 DRM_ERROR("No surface for CRTC: id=%d\n",
9253 acrtc_attach->crtc_id);
9254 continue;
bc7f670e
DF
9255 }
9256
f5031000
DF
9257 if (plane == pcrtc->primary)
9258 update_freesync_state_on_stream(
9259 dm,
9260 acrtc_state,
9261 acrtc_state->stream,
9262 dc_plane,
9263 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9264
9f07550b 9265 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9266 __func__,
9267 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9268 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9269
9270 planes_count += 1;
9271
8a48b44c
DF
9272 }
9273
74aa7bd4 9274 if (pflip_present) {
634092b1
MK
9275 if (!vrr_active) {
9276 /* Use old throttling in non-vrr fixed refresh rate mode
9277 * to keep flip scheduling based on target vblank counts
9278 * working in a backwards compatible way, e.g., for
9279 * clients using the GLX_OML_sync_control extension or
9280 * DRI3/Present extension with defined target_msc.
9281 */
e3eff4b5 9282 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9283 }
9284 else {
9285 /* For variable refresh rate mode only:
9286 * Get vblank of last completed flip to avoid > 1 vrr
9287 * flips per video frame by use of throttling, but allow
9288 * flip programming anywhere in the possibly large
9289 * variable vrr vblank interval for fine-grained flip
9290 * timing control and more opportunity to avoid stutter
9291 * on late submission of flips.
9292 */
9293 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9294 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9295 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9296 }
9297
fdd1fe57 9298 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9299
9300 /*
9301 * Wait until we're out of the vertical blank period before the one
9302 * targeted by the flip
9303 */
9304 while ((acrtc_attach->enabled &&
9305 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9306 0, &vpos, &hpos, NULL,
9307 NULL, &pcrtc->hwmode)
9308 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9309 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9310 (int)(target_vblank -
e3eff4b5 9311 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9312 usleep_range(1000, 1100);
9313 }
9314
8fe684e9
NK
9315 /**
9316 * Prepare the flip event for the pageflip interrupt to handle.
9317 *
9318 * This only works in the case where we've already turned on the
9319 * appropriate hardware blocks (eg. HUBP) so in the transition case
9320 * from 0 -> n planes we have to skip a hardware generated event
9321 * and rely on sending it from software.
9322 */
9323 if (acrtc_attach->base.state->event &&
035f5496
AP
9324 acrtc_state->active_planes > 0 &&
9325 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9326 drm_crtc_vblank_get(pcrtc);
9327
9328 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9329
9330 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9331 prepare_flip_isr(acrtc_attach);
9332
9333 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9334 }
9335
9336 if (acrtc_state->stream) {
8a48b44c 9337 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9338 bundle->stream_update.vrr_infopacket =
8a48b44c 9339 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9340 }
e7b07cee
HW
9341 }
9342
bc92c065 9343 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9344 if ((planes_count || acrtc_state->active_planes == 0) &&
9345 acrtc_state->stream) {
58aa1c50
NK
9346 /*
9347 * If PSR or idle optimizations are enabled then flush out
9348 * any pending work before hardware programming.
9349 */
06dd1888
NK
9350 if (dm->vblank_control_workqueue)
9351 flush_workqueue(dm->vblank_control_workqueue);
58aa1c50 9352
b6e881c9 9353 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9354 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9355 bundle->stream_update.src = acrtc_state->stream->src;
9356 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9357 }
9358
cf020d49
NK
9359 if (new_pcrtc_state->color_mgmt_changed) {
9360 /*
9361 * TODO: This isn't fully correct since we've actually
9362 * already modified the stream in place.
9363 */
9364 bundle->stream_update.gamut_remap =
9365 &acrtc_state->stream->gamut_remap_matrix;
9366 bundle->stream_update.output_csc_transform =
9367 &acrtc_state->stream->csc_color_matrix;
9368 bundle->stream_update.out_transfer_func =
9369 acrtc_state->stream->out_transfer_func;
9370 }
bc7f670e 9371
8a48b44c 9372 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9373 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9374 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9375
e63e2491
EB
9376 /*
9377 * If FreeSync state on the stream has changed then we need to
9378 * re-adjust the min/max bounds now that DC doesn't handle this
9379 * as part of commit.
9380 */
a85ba005 9381 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9382 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9383 dc_stream_adjust_vmin_vmax(
9384 dm->dc, acrtc_state->stream,
585d450c 9385 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9386 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9387 }
bc7f670e 9388 mutex_lock(&dm->dc_lock);
8c322309 9389 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9390 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9391 amdgpu_dm_psr_disable(acrtc_state->stream);
9392
bc7f670e 9393 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9394 bundle->surface_updates,
bc7f670e
DF
9395 planes_count,
9396 acrtc_state->stream,
efc8278e
AJ
9397 &bundle->stream_update,
9398 dc_state);
8c322309 9399
8fe684e9
NK
9400 /**
9401 * Enable or disable the interrupts on the backend.
9402 *
9403 * Most pipes are put into power gating when unused.
9404 *
9405 * When power gating is enabled on a pipe we lose the
9406 * interrupt enablement state when power gating is disabled.
9407 *
9408 * So we need to update the IRQ control state in hardware
9409 * whenever the pipe turns on (since it could be previously
9410 * power gated) or off (since some pipes can't be power gated
9411 * on some ASICs).
9412 */
9413 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9414 dm_update_pflip_irq_state(drm_to_adev(dev),
9415 acrtc_attach);
8fe684e9 9416
8c322309 9417 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9418 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9419 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9420 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9421
9422 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9423 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9424 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9425 struct amdgpu_dm_connector *aconn =
9426 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9427
9428 if (aconn->psr_skip_count > 0)
9429 aconn->psr_skip_count--;
58aa1c50
NK
9430
9431 /* Allow PSR when skip count is 0. */
9432 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9433 } else {
9434 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9435 }
9436
bc7f670e 9437 mutex_unlock(&dm->dc_lock);
e7b07cee 9438 }
4b510503 9439
8ad27806
NK
9440 /*
9441 * Update cursor state *after* programming all the planes.
9442 * This avoids redundant programming in the case where we're going
9443 * to be disabling a single plane - those pipes are being disabled.
9444 */
9445 if (acrtc_state->active_planes)
9446 amdgpu_dm_commit_cursors(state);
80c218d5 9447
4b510503 9448cleanup:
74aa7bd4 9449 kfree(bundle);
e7b07cee
HW
9450}
9451
6ce8f316
NK
9452static void amdgpu_dm_commit_audio(struct drm_device *dev,
9453 struct drm_atomic_state *state)
9454{
1348969a 9455 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9456 struct amdgpu_dm_connector *aconnector;
9457 struct drm_connector *connector;
9458 struct drm_connector_state *old_con_state, *new_con_state;
9459 struct drm_crtc_state *new_crtc_state;
9460 struct dm_crtc_state *new_dm_crtc_state;
9461 const struct dc_stream_status *status;
9462 int i, inst;
9463
9464 /* Notify device removals. */
9465 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9466 if (old_con_state->crtc != new_con_state->crtc) {
9467 /* CRTC changes require notification. */
9468 goto notify;
9469 }
9470
9471 if (!new_con_state->crtc)
9472 continue;
9473
9474 new_crtc_state = drm_atomic_get_new_crtc_state(
9475 state, new_con_state->crtc);
9476
9477 if (!new_crtc_state)
9478 continue;
9479
9480 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9481 continue;
9482
9483 notify:
9484 aconnector = to_amdgpu_dm_connector(connector);
9485
9486 mutex_lock(&adev->dm.audio_lock);
9487 inst = aconnector->audio_inst;
9488 aconnector->audio_inst = -1;
9489 mutex_unlock(&adev->dm.audio_lock);
9490
9491 amdgpu_dm_audio_eld_notify(adev, inst);
9492 }
9493
9494 /* Notify audio device additions. */
9495 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9496 if (!new_con_state->crtc)
9497 continue;
9498
9499 new_crtc_state = drm_atomic_get_new_crtc_state(
9500 state, new_con_state->crtc);
9501
9502 if (!new_crtc_state)
9503 continue;
9504
9505 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9506 continue;
9507
9508 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9509 if (!new_dm_crtc_state->stream)
9510 continue;
9511
9512 status = dc_stream_get_status(new_dm_crtc_state->stream);
9513 if (!status)
9514 continue;
9515
9516 aconnector = to_amdgpu_dm_connector(connector);
9517
9518 mutex_lock(&adev->dm.audio_lock);
9519 inst = status->audio_inst;
9520 aconnector->audio_inst = inst;
9521 mutex_unlock(&adev->dm.audio_lock);
9522
9523 amdgpu_dm_audio_eld_notify(adev, inst);
9524 }
9525}
9526
1f6010a9 9527/*
27b3f4fc
LSL
9528 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9529 * @crtc_state: the DRM CRTC state
9530 * @stream_state: the DC stream state.
9531 *
9532 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9533 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9534 */
9535static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9536 struct dc_stream_state *stream_state)
9537{
b9952f93 9538 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9539}
e7b07cee 9540
b8592b48
LL
9541/**
9542 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9543 * @state: The atomic state to commit
9544 *
9545 * This will tell DC to commit the constructed DC state from atomic_check,
9546 * programming the hardware. Any failures here implies a hardware failure, since
9547 * atomic check should have filtered anything non-kosher.
9548 */
7578ecda 9549static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9550{
9551 struct drm_device *dev = state->dev;
1348969a 9552 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9553 struct amdgpu_display_manager *dm = &adev->dm;
9554 struct dm_atomic_state *dm_state;
eb3dc897 9555 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9556 uint32_t i, j;
5cc6dcbd 9557 struct drm_crtc *crtc;
0bc9706d 9558 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9559 unsigned long flags;
9560 bool wait_for_vblank = true;
9561 struct drm_connector *connector;
c2cea706 9562 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9563 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9564 int crtc_disable_count = 0;
6ee90e88 9565 bool mode_set_reset_required = false;
e7b07cee 9566
e8a98235
RS
9567 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9568
e7b07cee
HW
9569 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9570
eb3dc897
NK
9571 dm_state = dm_atomic_get_new_state(state);
9572 if (dm_state && dm_state->context) {
9573 dc_state = dm_state->context;
9574 } else {
9575 /* No state changes, retain current state. */
813d20dc 9576 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9577 ASSERT(dc_state_temp);
9578 dc_state = dc_state_temp;
9579 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9580 }
e7b07cee 9581
6d90a208
AP
9582 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9583 new_crtc_state, i) {
9584 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9585
9586 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9587
9588 if (old_crtc_state->active &&
9589 (!new_crtc_state->active ||
9590 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9591 manage_dm_interrupts(adev, acrtc, false);
9592 dc_stream_release(dm_old_crtc_state->stream);
9593 }
9594 }
9595
8976f73b
RS
9596 drm_atomic_helper_calc_timestamping_constants(state);
9597
e7b07cee 9598 /* update changed items */
0bc9706d 9599 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9600 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9601
54d76575
LSL
9602 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9603 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9604
9f07550b 9605 drm_dbg_state(state->dev,
e7b07cee
HW
9606 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9607 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9608 "connectors_changed:%d\n",
9609 acrtc->crtc_id,
0bc9706d
LSL
9610 new_crtc_state->enable,
9611 new_crtc_state->active,
9612 new_crtc_state->planes_changed,
9613 new_crtc_state->mode_changed,
9614 new_crtc_state->active_changed,
9615 new_crtc_state->connectors_changed);
e7b07cee 9616
5c68c652
VL
9617 /* Disable cursor if disabling crtc */
9618 if (old_crtc_state->active && !new_crtc_state->active) {
9619 struct dc_cursor_position position;
9620
9621 memset(&position, 0, sizeof(position));
9622 mutex_lock(&dm->dc_lock);
9623 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9624 mutex_unlock(&dm->dc_lock);
9625 }
9626
27b3f4fc
LSL
9627 /* Copy all transient state flags into dc state */
9628 if (dm_new_crtc_state->stream) {
9629 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9630 dm_new_crtc_state->stream);
9631 }
9632
e7b07cee
HW
9633 /* handles headless hotplug case, updating new_state and
9634 * aconnector as needed
9635 */
9636
54d76575 9637 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9638
4711c033 9639 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9640
54d76575 9641 if (!dm_new_crtc_state->stream) {
e7b07cee 9642 /*
b830ebc9
HW
9643 * this could happen because of issues with
9644 * userspace notifications delivery.
9645 * In this case userspace tries to set mode on
1f6010a9
DF
9646 * display which is disconnected in fact.
9647 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9648 * We expect reset mode will come soon.
9649 *
9650 * This can also happen when unplug is done
9651 * during resume sequence ended
9652 *
9653 * In this case, we want to pretend we still
9654 * have a sink to keep the pipe running so that
9655 * hw state is consistent with the sw state
9656 */
f1ad2f5e 9657 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9658 __func__, acrtc->base.base.id);
9659 continue;
9660 }
9661
54d76575
LSL
9662 if (dm_old_crtc_state->stream)
9663 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9664
97028037
LP
9665 pm_runtime_get_noresume(dev->dev);
9666
e7b07cee 9667 acrtc->enabled = true;
0bc9706d
LSL
9668 acrtc->hw_mode = new_crtc_state->mode;
9669 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9670 mode_set_reset_required = true;
0bc9706d 9671 } else if (modereset_required(new_crtc_state)) {
4711c033 9672 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9673 /* i.e. reset mode */
6ee90e88 9674 if (dm_old_crtc_state->stream)
54d76575 9675 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9676
6ee90e88 9677 mode_set_reset_required = true;
e7b07cee
HW
9678 }
9679 } /* for_each_crtc_in_state() */
9680
eb3dc897 9681 if (dc_state) {
6ee90e88 9682 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9683 if (mode_set_reset_required) {
06dd1888
NK
9684 if (dm->vblank_control_workqueue)
9685 flush_workqueue(dm->vblank_control_workqueue);
cae5c1ab 9686
6ee90e88 9687 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9688 }
6ee90e88 9689
eb3dc897 9690 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9691 mutex_lock(&dm->dc_lock);
eb3dc897 9692 WARN_ON(!dc_commit_state(dm->dc, dc_state));
f3106c94
JC
9693
9694 /* Allow idle optimization when vblank count is 0 for display off */
9695 if (dm->active_vblank_irq_count == 0)
9696 dc_allow_idle_optimizations(dm->dc, true);
674e78ac 9697 mutex_unlock(&dm->dc_lock);
fa2123db 9698 }
fe8858bb 9699
0bc9706d 9700 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9701 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9702
54d76575 9703 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9704
54d76575 9705 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9706 const struct dc_stream_status *status =
54d76575 9707 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9708
eb3dc897 9709 if (!status)
09f609c3
LL
9710 status = dc_stream_get_status_from_state(dc_state,
9711 dm_new_crtc_state->stream);
e7b07cee 9712 if (!status)
54d76575 9713 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9714 else
9715 acrtc->otg_inst = status->primary_otg_inst;
9716 }
9717 }
0c8620d6
BL
9718#ifdef CONFIG_DRM_AMD_DC_HDCP
9719 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9720 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9721 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9722 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9723
9724 new_crtc_state = NULL;
9725
9726 if (acrtc)
9727 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9728
9729 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9730
9731 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9732 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9733 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9734 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9735 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9736 continue;
9737 }
9738
9739 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9740 hdcp_update_display(
9741 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9742 new_con_state->hdcp_content_type,
0e86d3d4 9743 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9744 }
9745#endif
e7b07cee 9746
02d6a6fc 9747 /* Handle connector state changes */
c2cea706 9748 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9749 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9750 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9751 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9752 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9753 struct dc_stream_update stream_update;
b232d4ed 9754 struct dc_info_packet hdr_packet;
e7b07cee 9755 struct dc_stream_status *status = NULL;
b232d4ed 9756 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9757
efc8278e 9758 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9759 memset(&stream_update, 0, sizeof(stream_update));
9760
44d09c6a 9761 if (acrtc) {
0bc9706d 9762 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9763 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9764 }
0bc9706d 9765
e7b07cee 9766 /* Skip any modesets/resets */
0bc9706d 9767 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9768 continue;
9769
54d76575 9770 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9771 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9772
b232d4ed
NK
9773 scaling_changed = is_scaling_state_different(dm_new_con_state,
9774 dm_old_con_state);
9775
9776 abm_changed = dm_new_crtc_state->abm_level !=
9777 dm_old_crtc_state->abm_level;
9778
9779 hdr_changed =
72921cdf 9780 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9781
9782 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9783 continue;
e7b07cee 9784
b6e881c9 9785 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9786 if (scaling_changed) {
02d6a6fc 9787 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9788 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9789
02d6a6fc
DF
9790 stream_update.src = dm_new_crtc_state->stream->src;
9791 stream_update.dst = dm_new_crtc_state->stream->dst;
9792 }
9793
b232d4ed 9794 if (abm_changed) {
02d6a6fc
DF
9795 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9796
9797 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9798 }
70e8ffc5 9799
b232d4ed
NK
9800 if (hdr_changed) {
9801 fill_hdr_info_packet(new_con_state, &hdr_packet);
9802 stream_update.hdr_static_metadata = &hdr_packet;
9803 }
9804
54d76575 9805 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9806
9807 if (WARN_ON(!status))
9808 continue;
9809
3be5262e 9810 WARN_ON(!status->plane_count);
e7b07cee 9811
02d6a6fc
DF
9812 /*
9813 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9814 * Here we create an empty update on each plane.
9815 * To fix this, DC should permit updating only stream properties.
9816 */
9817 for (j = 0; j < status->plane_count; j++)
efc8278e 9818 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9819
9820
9821 mutex_lock(&dm->dc_lock);
9822 dc_commit_updates_for_stream(dm->dc,
efc8278e 9823 dummy_updates,
02d6a6fc
DF
9824 status->plane_count,
9825 dm_new_crtc_state->stream,
efc8278e
AJ
9826 &stream_update,
9827 dc_state);
02d6a6fc 9828 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9829 }
9830
b5e83f6f 9831 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9832 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9833 new_crtc_state, i) {
fe2a1965
LP
9834 if (old_crtc_state->active && !new_crtc_state->active)
9835 crtc_disable_count++;
9836
54d76575 9837 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9838 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9839
585d450c
AP
9840 /* For freesync config update on crtc state and params for irq */
9841 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9842
66b0c973
MK
9843 /* Handle vrr on->off / off->on transitions */
9844 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9845 dm_new_crtc_state);
e7b07cee
HW
9846 }
9847
8fe684e9
NK
9848 /**
9849 * Enable interrupts for CRTCs that are newly enabled or went through
9850 * a modeset. It was intentionally deferred until after the front end
9851 * state was modified to wait until the OTG was on and so the IRQ
9852 * handlers didn't access stale or invalid state.
9853 */
9854 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9855 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9856#ifdef CONFIG_DEBUG_FS
86bc2219 9857 bool configure_crc = false;
8e7b6fee 9858 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9859#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9860 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9861#endif
9862 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9863 cur_crc_src = acrtc->dm_irq_params.crc_src;
9864 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9865#endif
585d450c
AP
9866 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9867
8fe684e9
NK
9868 if (new_crtc_state->active &&
9869 (!old_crtc_state->active ||
9870 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9871 dc_stream_retain(dm_new_crtc_state->stream);
9872 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9873 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9874
24eb9374 9875#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9876 /**
9877 * Frontend may have changed so reapply the CRC capture
9878 * settings for the stream.
9879 */
9880 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9881
8e7b6fee 9882 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9883 configure_crc = true;
9884#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9885 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9886 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9887 acrtc->dm_irq_params.crc_window.update_win = true;
9888 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9889 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9890 crc_rd_wrk->crtc = crtc;
9891 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9892 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9893 }
86bc2219 9894#endif
e2881d6d 9895 }
c920888c 9896
86bc2219 9897 if (configure_crc)
bbc49fc0
WL
9898 if (amdgpu_dm_crtc_configure_crc_source(
9899 crtc, dm_new_crtc_state, cur_crc_src))
9900 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9901#endif
8fe684e9
NK
9902 }
9903 }
e7b07cee 9904
420cd472 9905 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9906 if (new_crtc_state->async_flip)
420cd472
DF
9907 wait_for_vblank = false;
9908
e7b07cee 9909 /* update planes when needed per crtc*/
5cc6dcbd 9910 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9911 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9912
54d76575 9913 if (dm_new_crtc_state->stream)
eb3dc897 9914 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9915 dm, crtc, wait_for_vblank);
e7b07cee
HW
9916 }
9917
6ce8f316
NK
9918 /* Update audio instances for each connector. */
9919 amdgpu_dm_commit_audio(dev, state);
9920
7230362c
AD
9921#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9922 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9923 /* restore the backlight level */
7fd13bae
AD
9924 for (i = 0; i < dm->num_of_edps; i++) {
9925 if (dm->backlight_dev[i] &&
4052287a 9926 (dm->actual_brightness[i] != dm->brightness[i]))
7fd13bae
AD
9927 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9928 }
7230362c 9929#endif
e7b07cee
HW
9930 /*
9931 * send vblank event on all events not handled in flip and
9932 * mark consumed event for drm_atomic_helper_commit_hw_done
9933 */
4a580877 9934 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9935 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9936
0bc9706d
LSL
9937 if (new_crtc_state->event)
9938 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9939
0bc9706d 9940 new_crtc_state->event = NULL;
e7b07cee 9941 }
4a580877 9942 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9943
29c8f234
LL
9944 /* Signal HW programming completion */
9945 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9946
9947 if (wait_for_vblank)
320a1274 9948 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9949
9950 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9951
5f6fab24
AD
9952 /* return the stolen vga memory back to VRAM */
9953 if (!adev->mman.keep_stolen_vga_memory)
9954 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9955 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9956
1f6010a9
DF
9957 /*
9958 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9959 * so we can put the GPU into runtime suspend if we're not driving any
9960 * displays anymore
9961 */
fe2a1965
LP
9962 for (i = 0; i < crtc_disable_count; i++)
9963 pm_runtime_put_autosuspend(dev->dev);
97028037 9964 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9965
9966 if (dc_state_temp)
9967 dc_release_state(dc_state_temp);
e7b07cee
HW
9968}
9969
9970
9971static int dm_force_atomic_commit(struct drm_connector *connector)
9972{
9973 int ret = 0;
9974 struct drm_device *ddev = connector->dev;
9975 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9976 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9977 struct drm_plane *plane = disconnected_acrtc->base.primary;
9978 struct drm_connector_state *conn_state;
9979 struct drm_crtc_state *crtc_state;
9980 struct drm_plane_state *plane_state;
9981
9982 if (!state)
9983 return -ENOMEM;
9984
9985 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9986
9987 /* Construct an atomic state to restore previous display setting */
9988
9989 /*
9990 * Attach connectors to drm_atomic_state
9991 */
9992 conn_state = drm_atomic_get_connector_state(state, connector);
9993
9994 ret = PTR_ERR_OR_ZERO(conn_state);
9995 if (ret)
2dc39051 9996 goto out;
e7b07cee
HW
9997
9998 /* Attach crtc to drm_atomic_state*/
9999 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10000
10001 ret = PTR_ERR_OR_ZERO(crtc_state);
10002 if (ret)
2dc39051 10003 goto out;
e7b07cee
HW
10004
10005 /* force a restore */
10006 crtc_state->mode_changed = true;
10007
10008 /* Attach plane to drm_atomic_state */
10009 plane_state = drm_atomic_get_plane_state(state, plane);
10010
10011 ret = PTR_ERR_OR_ZERO(plane_state);
10012 if (ret)
2dc39051 10013 goto out;
e7b07cee
HW
10014
10015 /* Call commit internally with the state we just constructed */
10016 ret = drm_atomic_commit(state);
e7b07cee 10017
2dc39051 10018out:
e7b07cee 10019 drm_atomic_state_put(state);
2dc39051
VL
10020 if (ret)
10021 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
10022
10023 return ret;
10024}
10025
10026/*
1f6010a9
DF
10027 * This function handles all cases when set mode does not come upon hotplug.
10028 * This includes when a display is unplugged then plugged back into the
10029 * same port and when running without usermode desktop manager supprot
e7b07cee 10030 */
3ee6b26b
AD
10031void dm_restore_drm_connector_state(struct drm_device *dev,
10032 struct drm_connector *connector)
e7b07cee 10033{
c84dec2f 10034 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
10035 struct amdgpu_crtc *disconnected_acrtc;
10036 struct dm_crtc_state *acrtc_state;
10037
10038 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10039 return;
10040
10041 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
10042 if (!disconnected_acrtc)
10043 return;
e7b07cee 10044
70e8ffc5
HW
10045 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10046 if (!acrtc_state->stream)
e7b07cee
HW
10047 return;
10048
10049 /*
10050 * If the previous sink is not released and different from the current,
10051 * we deduce we are in a state where we can not rely on usermode call
10052 * to turn on the display, so we do it here
10053 */
10054 if (acrtc_state->stream->sink != aconnector->dc_sink)
10055 dm_force_atomic_commit(&aconnector->base);
10056}
10057
1f6010a9 10058/*
e7b07cee
HW
10059 * Grabs all modesetting locks to serialize against any blocking commits,
10060 * Waits for completion of all non blocking commits.
10061 */
3ee6b26b
AD
10062static int do_aquire_global_lock(struct drm_device *dev,
10063 struct drm_atomic_state *state)
e7b07cee
HW
10064{
10065 struct drm_crtc *crtc;
10066 struct drm_crtc_commit *commit;
10067 long ret;
10068
1f6010a9
DF
10069 /*
10070 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10071 * ensure that when the framework release it the
10072 * extra locks we are locking here will get released to
10073 */
10074 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10075 if (ret)
10076 return ret;
10077
10078 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10079 spin_lock(&crtc->commit_lock);
10080 commit = list_first_entry_or_null(&crtc->commit_list,
10081 struct drm_crtc_commit, commit_entry);
10082 if (commit)
10083 drm_crtc_commit_get(commit);
10084 spin_unlock(&crtc->commit_lock);
10085
10086 if (!commit)
10087 continue;
10088
1f6010a9
DF
10089 /*
10090 * Make sure all pending HW programming completed and
e7b07cee
HW
10091 * page flips done
10092 */
10093 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10094
10095 if (ret > 0)
10096 ret = wait_for_completion_interruptible_timeout(
10097 &commit->flip_done, 10*HZ);
10098
10099 if (ret == 0)
10100 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10101 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10102
10103 drm_crtc_commit_put(commit);
10104 }
10105
10106 return ret < 0 ? ret : 0;
10107}
10108
bb47de73
NK
10109static void get_freesync_config_for_crtc(
10110 struct dm_crtc_state *new_crtc_state,
10111 struct dm_connector_state *new_con_state)
98e6436d
AK
10112{
10113 struct mod_freesync_config config = {0};
98e6436d
AK
10114 struct amdgpu_dm_connector *aconnector =
10115 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10116 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10117 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10118 bool fs_vid_mode = false;
98e6436d 10119
a057ec46 10120 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10121 vrefresh >= aconnector->min_vfreq &&
10122 vrefresh <= aconnector->max_vfreq;
bb47de73 10123
a057ec46
IB
10124 if (new_crtc_state->vrr_supported) {
10125 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10126 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10127
10128 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10129 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10130 config.vsif_supported = true;
180db303 10131 config.btr = true;
98e6436d 10132
a85ba005
NC
10133 if (fs_vid_mode) {
10134 config.state = VRR_STATE_ACTIVE_FIXED;
10135 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10136 goto out;
10137 } else if (new_crtc_state->base.vrr_enabled) {
10138 config.state = VRR_STATE_ACTIVE_VARIABLE;
10139 } else {
10140 config.state = VRR_STATE_INACTIVE;
10141 }
10142 }
10143out:
bb47de73
NK
10144 new_crtc_state->freesync_config = config;
10145}
98e6436d 10146
bb47de73
NK
10147static void reset_freesync_config_for_crtc(
10148 struct dm_crtc_state *new_crtc_state)
10149{
10150 new_crtc_state->vrr_supported = false;
98e6436d 10151
bb47de73
NK
10152 memset(&new_crtc_state->vrr_infopacket, 0,
10153 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10154}
10155
a85ba005
NC
10156static bool
10157is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10158 struct drm_crtc_state *new_crtc_state)
10159{
1cbd7887 10160 const struct drm_display_mode *old_mode, *new_mode;
a85ba005
NC
10161
10162 if (!old_crtc_state || !new_crtc_state)
10163 return false;
10164
1cbd7887
VS
10165 old_mode = &old_crtc_state->mode;
10166 new_mode = &new_crtc_state->mode;
10167
10168 if (old_mode->clock == new_mode->clock &&
10169 old_mode->hdisplay == new_mode->hdisplay &&
10170 old_mode->vdisplay == new_mode->vdisplay &&
10171 old_mode->htotal == new_mode->htotal &&
10172 old_mode->vtotal != new_mode->vtotal &&
10173 old_mode->hsync_start == new_mode->hsync_start &&
10174 old_mode->vsync_start != new_mode->vsync_start &&
10175 old_mode->hsync_end == new_mode->hsync_end &&
10176 old_mode->vsync_end != new_mode->vsync_end &&
10177 old_mode->hskew == new_mode->hskew &&
10178 old_mode->vscan == new_mode->vscan &&
10179 (old_mode->vsync_end - old_mode->vsync_start) ==
10180 (new_mode->vsync_end - new_mode->vsync_start))
a85ba005
NC
10181 return true;
10182
10183 return false;
10184}
10185
10186static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10187 uint64_t num, den, res;
10188 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10189
10190 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10191
10192 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10193 den = (unsigned long long)new_crtc_state->mode.htotal *
10194 (unsigned long long)new_crtc_state->mode.vtotal;
10195
10196 res = div_u64(num, den);
10197 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10198}
10199
f11d9373 10200static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
17ce8a69
RL
10201 struct drm_atomic_state *state,
10202 struct drm_crtc *crtc,
10203 struct drm_crtc_state *old_crtc_state,
10204 struct drm_crtc_state *new_crtc_state,
10205 bool enable,
10206 bool *lock_and_validation_needed)
e7b07cee 10207{
eb3dc897 10208 struct dm_atomic_state *dm_state = NULL;
54d76575 10209 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10210 struct dc_stream_state *new_stream;
62f55537 10211 int ret = 0;
d4d4a645 10212
1f6010a9
DF
10213 /*
10214 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10215 * update changed items
10216 */
4b9674e5
LL
10217 struct amdgpu_crtc *acrtc = NULL;
10218 struct amdgpu_dm_connector *aconnector = NULL;
10219 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10220 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10221
4b9674e5 10222 new_stream = NULL;
9635b754 10223
4b9674e5
LL
10224 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10225 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10226 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10227 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10228
4b9674e5
LL
10229 /* TODO This hack should go away */
10230 if (aconnector && enable) {
10231 /* Make sure fake sink is created in plug-in scenario */
10232 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10233 &aconnector->base);
10234 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10235 &aconnector->base);
19f89e23 10236
4b9674e5
LL
10237 if (IS_ERR(drm_new_conn_state)) {
10238 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10239 goto fail;
10240 }
19f89e23 10241
4b9674e5
LL
10242 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10243 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10244
02d35a67
JFZ
10245 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10246 goto skip_modeset;
10247
cbd14ae7
SW
10248 new_stream = create_validate_stream_for_sink(aconnector,
10249 &new_crtc_state->mode,
10250 dm_new_conn_state,
10251 dm_old_crtc_state->stream);
19f89e23 10252
4b9674e5
LL
10253 /*
10254 * we can have no stream on ACTION_SET if a display
10255 * was disconnected during S3, in this case it is not an
10256 * error, the OS will be updated after detection, and
10257 * will do the right thing on next atomic commit
10258 */
19f89e23 10259
4b9674e5
LL
10260 if (!new_stream) {
10261 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10262 __func__, acrtc->base.base.id);
10263 ret = -ENOMEM;
10264 goto fail;
10265 }
e7b07cee 10266
3d4e52d0
VL
10267 /*
10268 * TODO: Check VSDB bits to decide whether this should
10269 * be enabled or not.
10270 */
10271 new_stream->triggered_crtc_reset.enabled =
10272 dm->force_timing_sync;
10273
4b9674e5 10274 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10275
88694af9
NK
10276 ret = fill_hdr_info_packet(drm_new_conn_state,
10277 &new_stream->hdr_static_metadata);
10278 if (ret)
10279 goto fail;
10280
7e930949
NK
10281 /*
10282 * If we already removed the old stream from the context
10283 * (and set the new stream to NULL) then we can't reuse
10284 * the old stream even if the stream and scaling are unchanged.
10285 * We'll hit the BUG_ON and black screen.
10286 *
10287 * TODO: Refactor this function to allow this check to work
10288 * in all conditions.
10289 */
de05abe6 10290 if (dm_new_crtc_state->stream &&
a85ba005
NC
10291 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10292 goto skip_modeset;
10293
7e930949
NK
10294 if (dm_new_crtc_state->stream &&
10295 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10296 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10297 new_crtc_state->mode_changed = false;
10298 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10299 new_crtc_state->mode_changed);
62f55537 10300 }
4b9674e5 10301 }
b830ebc9 10302
02d35a67 10303 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10304 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10305 goto skip_modeset;
e7b07cee 10306
9f07550b 10307 drm_dbg_state(state->dev,
4b9674e5
LL
10308 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10309 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10310 "connectors_changed:%d\n",
10311 acrtc->crtc_id,
10312 new_crtc_state->enable,
10313 new_crtc_state->active,
10314 new_crtc_state->planes_changed,
10315 new_crtc_state->mode_changed,
10316 new_crtc_state->active_changed,
10317 new_crtc_state->connectors_changed);
62f55537 10318
4b9674e5
LL
10319 /* Remove stream for any changed/disabled CRTC */
10320 if (!enable) {
62f55537 10321
4b9674e5
LL
10322 if (!dm_old_crtc_state->stream)
10323 goto skip_modeset;
eb3dc897 10324
de05abe6 10325 if (dm_new_crtc_state->stream &&
a85ba005
NC
10326 is_timing_unchanged_for_freesync(new_crtc_state,
10327 old_crtc_state)) {
10328 new_crtc_state->mode_changed = false;
10329 DRM_DEBUG_DRIVER(
10330 "Mode change not required for front porch change, "
10331 "setting mode_changed to %d",
10332 new_crtc_state->mode_changed);
10333
10334 set_freesync_fixed_config(dm_new_crtc_state);
10335
10336 goto skip_modeset;
de05abe6 10337 } else if (aconnector &&
a85ba005
NC
10338 is_freesync_video_mode(&new_crtc_state->mode,
10339 aconnector)) {
e88ebd83
SC
10340 struct drm_display_mode *high_mode;
10341
10342 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10343 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10344 set_freesync_fixed_config(dm_new_crtc_state);
10345 }
a85ba005
NC
10346 }
10347
4b9674e5
LL
10348 ret = dm_atomic_get_state(state, &dm_state);
10349 if (ret)
10350 goto fail;
e7b07cee 10351
4b9674e5
LL
10352 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10353 crtc->base.id);
62f55537 10354
4b9674e5
LL
10355 /* i.e. reset mode */
10356 if (dc_remove_stream_from_ctx(
10357 dm->dc,
10358 dm_state->context,
10359 dm_old_crtc_state->stream) != DC_OK) {
10360 ret = -EINVAL;
10361 goto fail;
10362 }
62f55537 10363
4b9674e5
LL
10364 dc_stream_release(dm_old_crtc_state->stream);
10365 dm_new_crtc_state->stream = NULL;
bb47de73 10366
4b9674e5 10367 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10368
4b9674e5 10369 *lock_and_validation_needed = true;
62f55537 10370
4b9674e5
LL
10371 } else {/* Add stream for any updated/enabled CRTC */
10372 /*
10373 * Quick fix to prevent NULL pointer on new_stream when
10374 * added MST connectors not found in existing crtc_state in the chained mode
10375 * TODO: need to dig out the root cause of that
10376 */
10377 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10378 goto skip_modeset;
62f55537 10379
4b9674e5
LL
10380 if (modereset_required(new_crtc_state))
10381 goto skip_modeset;
62f55537 10382
4b9674e5
LL
10383 if (modeset_required(new_crtc_state, new_stream,
10384 dm_old_crtc_state->stream)) {
62f55537 10385
4b9674e5 10386 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10387
4b9674e5
LL
10388 ret = dm_atomic_get_state(state, &dm_state);
10389 if (ret)
10390 goto fail;
27b3f4fc 10391
4b9674e5 10392 dm_new_crtc_state->stream = new_stream;
62f55537 10393
4b9674e5 10394 dc_stream_retain(new_stream);
1dc90497 10395
4711c033
LT
10396 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10397 crtc->base.id);
1dc90497 10398
4b9674e5
LL
10399 if (dc_add_stream_to_ctx(
10400 dm->dc,
10401 dm_state->context,
10402 dm_new_crtc_state->stream) != DC_OK) {
10403 ret = -EINVAL;
10404 goto fail;
9b690ef3
BL
10405 }
10406
4b9674e5
LL
10407 *lock_and_validation_needed = true;
10408 }
10409 }
e277adc5 10410
4b9674e5
LL
10411skip_modeset:
10412 /* Release extra reference */
10413 if (new_stream)
10414 dc_stream_release(new_stream);
e277adc5 10415
4b9674e5
LL
10416 /*
10417 * We want to do dc stream updates that do not require a
10418 * full modeset below.
10419 */
2afda735 10420 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10421 return 0;
10422 /*
10423 * Given above conditions, the dc state cannot be NULL because:
10424 * 1. We're in the process of enabling CRTCs (just been added
10425 * to the dc context, or already is on the context)
10426 * 2. Has a valid connector attached, and
10427 * 3. Is currently active and enabled.
10428 * => The dc stream state currently exists.
10429 */
10430 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10431
4b9674e5 10432 /* Scaling or underscan settings */
c521fc31
RL
10433 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10434 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10435 update_stream_scaling_settings(
10436 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10437
b05e2c5e
DF
10438 /* ABM settings */
10439 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10440
4b9674e5
LL
10441 /*
10442 * Color management settings. We also update color properties
10443 * when a modeset is needed, to ensure it gets reprogrammed.
10444 */
10445 if (dm_new_crtc_state->base.color_mgmt_changed ||
10446 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10447 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10448 if (ret)
10449 goto fail;
62f55537 10450 }
e7b07cee 10451
4b9674e5
LL
10452 /* Update Freesync settings. */
10453 get_freesync_config_for_crtc(dm_new_crtc_state,
10454 dm_new_conn_state);
10455
62f55537 10456 return ret;
9635b754
DS
10457
10458fail:
10459 if (new_stream)
10460 dc_stream_release(new_stream);
10461 return ret;
62f55537 10462}
9b690ef3 10463
f6ff2a08
NK
10464static bool should_reset_plane(struct drm_atomic_state *state,
10465 struct drm_plane *plane,
10466 struct drm_plane_state *old_plane_state,
10467 struct drm_plane_state *new_plane_state)
10468{
10469 struct drm_plane *other;
10470 struct drm_plane_state *old_other_state, *new_other_state;
10471 struct drm_crtc_state *new_crtc_state;
10472 int i;
10473
70a1efac
NK
10474 /*
10475 * TODO: Remove this hack once the checks below are sufficient
10476 * enough to determine when we need to reset all the planes on
10477 * the stream.
10478 */
10479 if (state->allow_modeset)
10480 return true;
10481
f6ff2a08
NK
10482 /* Exit early if we know that we're adding or removing the plane. */
10483 if (old_plane_state->crtc != new_plane_state->crtc)
10484 return true;
10485
10486 /* old crtc == new_crtc == NULL, plane not in context. */
10487 if (!new_plane_state->crtc)
10488 return false;
10489
10490 new_crtc_state =
10491 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10492
10493 if (!new_crtc_state)
10494 return true;
10495
7316c4ad
NK
10496 /* CRTC Degamma changes currently require us to recreate planes. */
10497 if (new_crtc_state->color_mgmt_changed)
10498 return true;
10499
f6ff2a08
NK
10500 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10501 return true;
10502
10503 /*
10504 * If there are any new primary or overlay planes being added or
10505 * removed then the z-order can potentially change. To ensure
10506 * correct z-order and pipe acquisition the current DC architecture
10507 * requires us to remove and recreate all existing planes.
10508 *
10509 * TODO: Come up with a more elegant solution for this.
10510 */
10511 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10512 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10513 if (other->type == DRM_PLANE_TYPE_CURSOR)
10514 continue;
10515
10516 if (old_other_state->crtc != new_plane_state->crtc &&
10517 new_other_state->crtc != new_plane_state->crtc)
10518 continue;
10519
10520 if (old_other_state->crtc != new_other_state->crtc)
10521 return true;
10522
dc4cb30d
NK
10523 /* Src/dst size and scaling updates. */
10524 if (old_other_state->src_w != new_other_state->src_w ||
10525 old_other_state->src_h != new_other_state->src_h ||
10526 old_other_state->crtc_w != new_other_state->crtc_w ||
10527 old_other_state->crtc_h != new_other_state->crtc_h)
10528 return true;
10529
10530 /* Rotation / mirroring updates. */
10531 if (old_other_state->rotation != new_other_state->rotation)
10532 return true;
10533
10534 /* Blending updates. */
10535 if (old_other_state->pixel_blend_mode !=
10536 new_other_state->pixel_blend_mode)
10537 return true;
10538
10539 /* Alpha updates. */
10540 if (old_other_state->alpha != new_other_state->alpha)
10541 return true;
10542
10543 /* Colorspace changes. */
10544 if (old_other_state->color_range != new_other_state->color_range ||
10545 old_other_state->color_encoding != new_other_state->color_encoding)
10546 return true;
10547
9a81cc60
NK
10548 /* Framebuffer checks fall at the end. */
10549 if (!old_other_state->fb || !new_other_state->fb)
10550 continue;
10551
10552 /* Pixel format changes can require bandwidth updates. */
10553 if (old_other_state->fb->format != new_other_state->fb->format)
10554 return true;
10555
6eed95b0
BN
10556 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10557 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10558
10559 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10560 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10561 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10562 return true;
10563 }
10564
10565 return false;
10566}
10567
b0455fda
SS
10568static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10569 struct drm_plane_state *new_plane_state,
10570 struct drm_framebuffer *fb)
10571{
e72868c4
SS
10572 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10573 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10574 unsigned int pitch;
e72868c4 10575 bool linear;
b0455fda
SS
10576
10577 if (fb->width > new_acrtc->max_cursor_width ||
10578 fb->height > new_acrtc->max_cursor_height) {
10579 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10580 new_plane_state->fb->width,
10581 new_plane_state->fb->height);
10582 return -EINVAL;
10583 }
10584 if (new_plane_state->src_w != fb->width << 16 ||
10585 new_plane_state->src_h != fb->height << 16) {
10586 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10587 return -EINVAL;
10588 }
10589
10590 /* Pitch in pixels */
10591 pitch = fb->pitches[0] / fb->format->cpp[0];
10592
10593 if (fb->width != pitch) {
10594 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10595 fb->width, pitch);
10596 return -EINVAL;
10597 }
10598
10599 switch (pitch) {
10600 case 64:
10601 case 128:
10602 case 256:
10603 /* FB pitch is supported by cursor plane */
10604 break;
10605 default:
10606 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10607 return -EINVAL;
10608 }
10609
e72868c4
SS
10610 /* Core DRM takes care of checking FB modifiers, so we only need to
10611 * check tiling flags when the FB doesn't have a modifier. */
10612 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10613 if (adev->family < AMDGPU_FAMILY_AI) {
10614 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10615 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10616 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10617 } else {
10618 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10619 }
10620 if (!linear) {
10621 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10622 return -EINVAL;
10623 }
10624 }
10625
b0455fda
SS
10626 return 0;
10627}
10628
9e869063
LL
10629static int dm_update_plane_state(struct dc *dc,
10630 struct drm_atomic_state *state,
10631 struct drm_plane *plane,
10632 struct drm_plane_state *old_plane_state,
10633 struct drm_plane_state *new_plane_state,
10634 bool enable,
10635 bool *lock_and_validation_needed)
62f55537 10636{
eb3dc897
NK
10637
10638 struct dm_atomic_state *dm_state = NULL;
62f55537 10639 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10640 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10641 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10642 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10643 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10644 bool needs_reset;
62f55537 10645 int ret = 0;
e7b07cee 10646
9b690ef3 10647
9e869063
LL
10648 new_plane_crtc = new_plane_state->crtc;
10649 old_plane_crtc = old_plane_state->crtc;
10650 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10651 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10652
626bf90f
SS
10653 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10654 if (!enable || !new_plane_crtc ||
10655 drm_atomic_plane_disabling(plane->state, new_plane_state))
10656 return 0;
10657
10658 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10659
5f581248
SS
10660 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10661 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10662 return -EINVAL;
10663 }
10664
24f99d2b 10665 if (new_plane_state->fb) {
b0455fda
SS
10666 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10667 new_plane_state->fb);
10668 if (ret)
10669 return ret;
24f99d2b
SS
10670 }
10671
9e869063 10672 return 0;
626bf90f 10673 }
9b690ef3 10674
f6ff2a08
NK
10675 needs_reset = should_reset_plane(state, plane, old_plane_state,
10676 new_plane_state);
10677
9e869063
LL
10678 /* Remove any changed/removed planes */
10679 if (!enable) {
f6ff2a08 10680 if (!needs_reset)
9e869063 10681 return 0;
a7b06724 10682
9e869063
LL
10683 if (!old_plane_crtc)
10684 return 0;
62f55537 10685
9e869063
LL
10686 old_crtc_state = drm_atomic_get_old_crtc_state(
10687 state, old_plane_crtc);
10688 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10689
9e869063
LL
10690 if (!dm_old_crtc_state->stream)
10691 return 0;
62f55537 10692
9e869063
LL
10693 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10694 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10695
9e869063
LL
10696 ret = dm_atomic_get_state(state, &dm_state);
10697 if (ret)
10698 return ret;
eb3dc897 10699
9e869063
LL
10700 if (!dc_remove_plane_from_context(
10701 dc,
10702 dm_old_crtc_state->stream,
10703 dm_old_plane_state->dc_state,
10704 dm_state->context)) {
62f55537 10705
c3537613 10706 return -EINVAL;
9e869063 10707 }
e7b07cee 10708
9b690ef3 10709
9e869063
LL
10710 dc_plane_state_release(dm_old_plane_state->dc_state);
10711 dm_new_plane_state->dc_state = NULL;
1dc90497 10712
9e869063 10713 *lock_and_validation_needed = true;
1dc90497 10714
9e869063
LL
10715 } else { /* Add new planes */
10716 struct dc_plane_state *dc_new_plane_state;
1dc90497 10717
9e869063
LL
10718 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10719 return 0;
e7b07cee 10720
9e869063
LL
10721 if (!new_plane_crtc)
10722 return 0;
e7b07cee 10723
9e869063
LL
10724 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10725 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10726
9e869063
LL
10727 if (!dm_new_crtc_state->stream)
10728 return 0;
62f55537 10729
f6ff2a08 10730 if (!needs_reset)
9e869063 10731 return 0;
62f55537 10732
8c44515b
AP
10733 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10734 if (ret)
10735 return ret;
10736
9e869063 10737 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10738
9e869063
LL
10739 dc_new_plane_state = dc_create_plane_state(dc);
10740 if (!dc_new_plane_state)
10741 return -ENOMEM;
62f55537 10742
4711c033
LT
10743 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10744 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10745
695af5f9 10746 ret = fill_dc_plane_attributes(
1348969a 10747 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10748 dc_new_plane_state,
10749 new_plane_state,
10750 new_crtc_state);
10751 if (ret) {
10752 dc_plane_state_release(dc_new_plane_state);
10753 return ret;
10754 }
62f55537 10755
9e869063
LL
10756 ret = dm_atomic_get_state(state, &dm_state);
10757 if (ret) {
10758 dc_plane_state_release(dc_new_plane_state);
10759 return ret;
10760 }
eb3dc897 10761
9e869063
LL
10762 /*
10763 * Any atomic check errors that occur after this will
10764 * not need a release. The plane state will be attached
10765 * to the stream, and therefore part of the atomic
10766 * state. It'll be released when the atomic state is
10767 * cleaned.
10768 */
10769 if (!dc_add_plane_to_context(
10770 dc,
10771 dm_new_crtc_state->stream,
10772 dc_new_plane_state,
10773 dm_state->context)) {
62f55537 10774
9e869063
LL
10775 dc_plane_state_release(dc_new_plane_state);
10776 return -EINVAL;
10777 }
8c45c5db 10778
9e869063 10779 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10780
214993e1
ML
10781 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10782
9e869063
LL
10783 /* Tell DC to do a full surface update every time there
10784 * is a plane change. Inefficient, but works for now.
10785 */
10786 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10787
10788 *lock_and_validation_needed = true;
62f55537 10789 }
e7b07cee
HW
10790
10791
62f55537
AG
10792 return ret;
10793}
a87fa993 10794
69cb5629
VZ
10795static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10796 int *src_w, int *src_h)
10797{
10798 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10799 case DRM_MODE_ROTATE_90:
10800 case DRM_MODE_ROTATE_270:
10801 *src_w = plane_state->src_h >> 16;
10802 *src_h = plane_state->src_w >> 16;
10803 break;
10804 case DRM_MODE_ROTATE_0:
10805 case DRM_MODE_ROTATE_180:
10806 default:
10807 *src_w = plane_state->src_w >> 16;
10808 *src_h = plane_state->src_h >> 16;
10809 break;
10810 }
10811}
10812
12f4849a
SS
10813static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10814 struct drm_crtc *crtc,
10815 struct drm_crtc_state *new_crtc_state)
10816{
d1bfbe8a
SS
10817 struct drm_plane *cursor = crtc->cursor, *underlying;
10818 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10819 int i;
10820 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
10821 int cursor_src_w, cursor_src_h;
10822 int underlying_src_w, underlying_src_h;
12f4849a
SS
10823
10824 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10825 * cursor per pipe but it's going to inherit the scaling and
10826 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10827 * blending properties match the underlying planes'. */
12f4849a 10828
d1bfbe8a
SS
10829 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10830 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10831 return 0;
10832 }
10833
69cb5629
VZ
10834 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10835 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10836 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 10837
d1bfbe8a
SS
10838 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10839 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10840 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10841 continue;
12f4849a 10842
d1bfbe8a
SS
10843 /* Ignore disabled planes */
10844 if (!new_underlying_state->fb)
10845 continue;
10846
69cb5629
VZ
10847 dm_get_oriented_plane_size(new_underlying_state,
10848 &underlying_src_w, &underlying_src_h);
10849 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10850 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
10851
10852 if (cursor_scale_w != underlying_scale_w ||
10853 cursor_scale_h != underlying_scale_h) {
10854 drm_dbg_atomic(crtc->dev,
10855 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10856 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10857 return -EINVAL;
10858 }
10859
10860 /* If this plane covers the whole CRTC, no need to check planes underneath */
10861 if (new_underlying_state->crtc_x <= 0 &&
10862 new_underlying_state->crtc_y <= 0 &&
10863 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10864 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10865 break;
12f4849a
SS
10866 }
10867
10868 return 0;
10869}
10870
e10517b3 10871#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10872static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10873{
10874 struct drm_connector *connector;
128f8ed5 10875 struct drm_connector_state *conn_state, *old_conn_state;
44be939f
ML
10876 struct amdgpu_dm_connector *aconnector = NULL;
10877 int i;
128f8ed5
RL
10878 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10879 if (!conn_state->crtc)
10880 conn_state = old_conn_state;
10881
44be939f
ML
10882 if (conn_state->crtc != crtc)
10883 continue;
10884
10885 aconnector = to_amdgpu_dm_connector(connector);
10886 if (!aconnector->port || !aconnector->mst_port)
10887 aconnector = NULL;
10888 else
10889 break;
10890 }
10891
10892 if (!aconnector)
10893 return 0;
10894
10895 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10896}
e10517b3 10897#endif
44be939f 10898
b8592b48
LL
10899/**
10900 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10901 * @dev: The DRM device
10902 * @state: The atomic state to commit
10903 *
10904 * Validate that the given atomic state is programmable by DC into hardware.
10905 * This involves constructing a &struct dc_state reflecting the new hardware
10906 * state we wish to commit, then querying DC to see if it is programmable. It's
10907 * important not to modify the existing DC state. Otherwise, atomic_check
10908 * may unexpectedly commit hardware changes.
10909 *
10910 * When validating the DC state, it's important that the right locks are
10911 * acquired. For full updates case which removes/adds/updates streams on one
10912 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10913 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10914 * flip using DRMs synchronization events.
b8592b48
LL
10915 *
10916 * Note that DM adds the affected connectors for all CRTCs in state, when that
10917 * might not seem necessary. This is because DC stream creation requires the
10918 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10919 * be possible but non-trivial - a possible TODO item.
10920 *
10921 * Return: -Error code if validation failed.
10922 */
7578ecda
AD
10923static int amdgpu_dm_atomic_check(struct drm_device *dev,
10924 struct drm_atomic_state *state)
62f55537 10925{
1348969a 10926 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10927 struct dm_atomic_state *dm_state = NULL;
62f55537 10928 struct dc *dc = adev->dm.dc;
62f55537 10929 struct drm_connector *connector;
c2cea706 10930 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10931 struct drm_crtc *crtc;
fc9e9920 10932 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10933 struct drm_plane *plane;
10934 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10935 enum dc_status status;
1e88ad0a 10936 int ret, i;
62f55537 10937 bool lock_and_validation_needed = false;
214993e1 10938 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6513104b
HW
10939#if defined(CONFIG_DRM_AMD_DC_DCN)
10940 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10941 struct drm_dp_mst_topology_state *mst_state;
10942 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10943#endif
62f55537 10944
e8a98235 10945 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10946
62f55537 10947 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10948 if (ret) {
10949 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10950 goto fail;
68ca1c3e 10951 }
62f55537 10952
c5892a10
SW
10953 /* Check connector changes */
10954 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10955 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10956 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10957
10958 /* Skip connectors that are disabled or part of modeset already. */
10959 if (!old_con_state->crtc && !new_con_state->crtc)
10960 continue;
10961
10962 if (!new_con_state->crtc)
10963 continue;
10964
10965 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10966 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10967 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10968 ret = PTR_ERR(new_crtc_state);
10969 goto fail;
10970 }
10971
10972 if (dm_old_con_state->abm_level !=
10973 dm_new_con_state->abm_level)
10974 new_crtc_state->connectors_changed = true;
10975 }
10976
e10517b3 10977#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10978 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10979 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10980 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10981 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10982 if (ret) {
10983 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10984 goto fail;
68ca1c3e 10985 }
44be939f
ML
10986 }
10987 }
17ce8a69 10988 pre_validate_dsc(state, &dm_state, vars);
44be939f 10989 }
e10517b3 10990#endif
1e88ad0a 10991 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10992 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10993
1e88ad0a 10994 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10995 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10996 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10997 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10998 continue;
7bef1af3 10999
03fc4cf4 11000 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
11001 if (ret) {
11002 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 11003 goto fail;
68ca1c3e 11004 }
03fc4cf4 11005
1e88ad0a
S
11006 if (!new_crtc_state->enable)
11007 continue;
fc9e9920 11008
1e88ad0a 11009 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
11010 if (ret) {
11011 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 11012 goto fail;
68ca1c3e 11013 }
fc9e9920 11014
1e88ad0a 11015 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
11016 if (ret) {
11017 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 11018 goto fail;
68ca1c3e 11019 }
115a385c 11020
cbac53f7 11021 if (dm_old_crtc_state->dsc_force_changed)
115a385c 11022 new_crtc_state->mode_changed = true;
e7b07cee
HW
11023 }
11024
2d9e6431
NK
11025 /*
11026 * Add all primary and overlay planes on the CRTC to the state
11027 * whenever a plane is enabled to maintain correct z-ordering
11028 * and to enable fast surface updates.
11029 */
11030 drm_for_each_crtc(crtc, dev) {
11031 bool modified = false;
11032
11033 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11034 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11035 continue;
11036
11037 if (new_plane_state->crtc == crtc ||
11038 old_plane_state->crtc == crtc) {
11039 modified = true;
11040 break;
11041 }
11042 }
11043
11044 if (!modified)
11045 continue;
11046
11047 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11048 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11049 continue;
11050
11051 new_plane_state =
11052 drm_atomic_get_plane_state(state, plane);
11053
11054 if (IS_ERR(new_plane_state)) {
11055 ret = PTR_ERR(new_plane_state);
68ca1c3e 11056 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
11057 goto fail;
11058 }
11059 }
11060 }
11061
62f55537 11062 /* Remove exiting planes if they are modified */
9e869063
LL
11063 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11064 ret = dm_update_plane_state(dc, state, plane,
11065 old_plane_state,
11066 new_plane_state,
11067 false,
11068 &lock_and_validation_needed);
68ca1c3e
S
11069 if (ret) {
11070 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11071 goto fail;
68ca1c3e 11072 }
62f55537
AG
11073 }
11074
11075 /* Disable all crtcs which require disable */
4b9674e5
LL
11076 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11077 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11078 old_crtc_state,
11079 new_crtc_state,
11080 false,
11081 &lock_and_validation_needed);
68ca1c3e
S
11082 if (ret) {
11083 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11084 goto fail;
68ca1c3e 11085 }
62f55537
AG
11086 }
11087
11088 /* Enable all crtcs which require enable */
4b9674e5
LL
11089 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11090 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11091 old_crtc_state,
11092 new_crtc_state,
11093 true,
11094 &lock_and_validation_needed);
68ca1c3e
S
11095 if (ret) {
11096 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11097 goto fail;
68ca1c3e 11098 }
62f55537
AG
11099 }
11100
11101 /* Add new/modified planes */
9e869063
LL
11102 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11103 ret = dm_update_plane_state(dc, state, plane,
11104 old_plane_state,
11105 new_plane_state,
11106 true,
11107 &lock_and_validation_needed);
68ca1c3e
S
11108 if (ret) {
11109 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11110 goto fail;
68ca1c3e 11111 }
62f55537
AG
11112 }
11113
b349f76e
ES
11114 /* Run this here since we want to validate the streams we created */
11115 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11116 if (ret) {
11117 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11118 goto fail;
68ca1c3e 11119 }
62f55537 11120
214993e1
ML
11121 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11122 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11123 if (dm_new_crtc_state->mpo_requested)
11124 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11125 }
11126
12f4849a
SS
11127 /* Check cursor planes scaling */
11128 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11129 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11130 if (ret) {
11131 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11132 goto fail;
68ca1c3e 11133 }
12f4849a
SS
11134 }
11135
43d10d30
NK
11136 if (state->legacy_cursor_update) {
11137 /*
11138 * This is a fast cursor update coming from the plane update
11139 * helper, check if it can be done asynchronously for better
11140 * performance.
11141 */
11142 state->async_update =
11143 !drm_atomic_helper_async_check(dev, state);
11144
11145 /*
11146 * Skip the remaining global validation if this is an async
11147 * update. Cursor updates can be done without affecting
11148 * state or bandwidth calcs and this avoids the performance
11149 * penalty of locking the private state object and
11150 * allocating a new dc_state.
11151 */
11152 if (state->async_update)
11153 return 0;
11154 }
11155
ebdd27e1 11156 /* Check scaling and underscan changes*/
1f6010a9 11157 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11158 * new stream into context w\o causing full reset. Need to
11159 * decide how to handle.
11160 */
c2cea706 11161 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11162 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11163 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11164 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11165
11166 /* Skip any modesets/resets */
0bc9706d
LSL
11167 if (!acrtc || drm_atomic_crtc_needs_modeset(
11168 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11169 continue;
11170
b830ebc9 11171 /* Skip any thing not scale or underscan changes */
54d76575 11172 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11173 continue;
11174
11175 lock_and_validation_needed = true;
11176 }
11177
41724ea2
BL
11178#if defined(CONFIG_DRM_AMD_DC_DCN)
11179 /* set the slot info for each mst_state based on the link encoding format */
11180 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11181 struct amdgpu_dm_connector *aconnector;
11182 struct drm_connector *connector;
11183 struct drm_connector_list_iter iter;
11184 u8 link_coding_cap;
11185
11186 if (!mgr->mst_state )
11187 continue;
11188
11189 drm_connector_list_iter_begin(dev, &iter);
11190 drm_for_each_connector_iter(connector, &iter) {
11191 int id = connector->index;
11192
11193 if (id == mst_state->mgr->conn_base_id) {
11194 aconnector = to_amdgpu_dm_connector(connector);
11195 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11196 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11197
11198 break;
11199 }
11200 }
11201 drm_connector_list_iter_end(&iter);
11202
11203 }
11204#endif
f6d7c7fa
NK
11205 /**
11206 * Streams and planes are reset when there are changes that affect
11207 * bandwidth. Anything that affects bandwidth needs to go through
11208 * DC global validation to ensure that the configuration can be applied
11209 * to hardware.
11210 *
11211 * We have to currently stall out here in atomic_check for outstanding
11212 * commits to finish in this case because our IRQ handlers reference
11213 * DRM state directly - we can end up disabling interrupts too early
11214 * if we don't.
11215 *
11216 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11217 */
f6d7c7fa 11218 if (lock_and_validation_needed) {
eb3dc897 11219 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11220 if (ret) {
11221 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11222 goto fail;
68ca1c3e 11223 }
e7b07cee
HW
11224
11225 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11226 if (ret) {
11227 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11228 goto fail;
68ca1c3e 11229 }
1dc90497 11230
d9fe1a4c 11231#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11232 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11233 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11234 goto fail;
68ca1c3e 11235 }
8c20a1ed 11236
6513104b 11237 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11238 if (ret) {
11239 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11240 goto fail;
68ca1c3e 11241 }
d9fe1a4c 11242#endif
29b9ba74 11243
ded58c7b
ZL
11244 /*
11245 * Perform validation of MST topology in the state:
11246 * We need to perform MST atomic check before calling
11247 * dc_validate_global_state(), or there is a chance
11248 * to get stuck in an infinite loop and hang eventually.
11249 */
11250 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11251 if (ret) {
11252 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11253 goto fail;
68ca1c3e 11254 }
85fb8bb9 11255 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11256 if (status != DC_OK) {
68ca1c3e 11257 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11258 dc_status_to_str(status), status);
e7b07cee
HW
11259 ret = -EINVAL;
11260 goto fail;
11261 }
bd200d19 11262 } else {
674e78ac 11263 /*
bd200d19
NK
11264 * The commit is a fast update. Fast updates shouldn't change
11265 * the DC context, affect global validation, and can have their
11266 * commit work done in parallel with other commits not touching
11267 * the same resource. If we have a new DC context as part of
11268 * the DM atomic state from validation we need to free it and
11269 * retain the existing one instead.
fde9f39a
MR
11270 *
11271 * Furthermore, since the DM atomic state only contains the DC
11272 * context and can safely be annulled, we can free the state
11273 * and clear the associated private object now to free
11274 * some memory and avoid a possible use-after-free later.
674e78ac 11275 */
bd200d19 11276
fde9f39a
MR
11277 for (i = 0; i < state->num_private_objs; i++) {
11278 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11279
fde9f39a
MR
11280 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11281 int j = state->num_private_objs-1;
bd200d19 11282
fde9f39a
MR
11283 dm_atomic_destroy_state(obj,
11284 state->private_objs[i].state);
11285
11286 /* If i is not at the end of the array then the
11287 * last element needs to be moved to where i was
11288 * before the array can safely be truncated.
11289 */
11290 if (i != j)
11291 state->private_objs[i] =
11292 state->private_objs[j];
bd200d19 11293
fde9f39a
MR
11294 state->private_objs[j].ptr = NULL;
11295 state->private_objs[j].state = NULL;
11296 state->private_objs[j].old_state = NULL;
11297 state->private_objs[j].new_state = NULL;
11298
11299 state->num_private_objs = j;
11300 break;
11301 }
bd200d19 11302 }
e7b07cee
HW
11303 }
11304
caff0e66
NK
11305 /* Store the overall update type for use later in atomic check. */
11306 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11307 struct dm_crtc_state *dm_new_crtc_state =
11308 to_dm_crtc_state(new_crtc_state);
11309
f6d7c7fa
NK
11310 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11311 UPDATE_TYPE_FULL :
11312 UPDATE_TYPE_FAST;
e7b07cee
HW
11313 }
11314
11315 /* Must be success */
11316 WARN_ON(ret);
e8a98235
RS
11317
11318 trace_amdgpu_dm_atomic_check_finish(state, ret);
11319
e7b07cee
HW
11320 return ret;
11321
11322fail:
11323 if (ret == -EDEADLK)
01e28f9c 11324 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11325 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11326 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11327 else
01e28f9c 11328 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11329
e8a98235
RS
11330 trace_amdgpu_dm_atomic_check_finish(state, ret);
11331
e7b07cee
HW
11332 return ret;
11333}
11334
3ee6b26b
AD
11335static bool is_dp_capable_without_timing_msa(struct dc *dc,
11336 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11337{
11338 uint8_t dpcd_data;
11339 bool capable = false;
11340
c84dec2f 11341 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11342 dm_helpers_dp_read_dpcd(
11343 NULL,
c84dec2f 11344 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11345 DP_DOWN_STREAM_PORT_COUNT,
11346 &dpcd_data,
11347 sizeof(dpcd_data))) {
11348 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11349 }
11350
11351 return capable;
11352}
f9b4f20c 11353
46db138d
SW
11354static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11355 unsigned int offset,
11356 unsigned int total_length,
11357 uint8_t *data,
11358 unsigned int length,
11359 struct amdgpu_hdmi_vsdb_info *vsdb)
11360{
11361 bool res;
11362 union dmub_rb_cmd cmd;
11363 struct dmub_cmd_send_edid_cea *input;
11364 struct dmub_cmd_edid_cea_output *output;
11365
11366 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11367 return false;
11368
11369 memset(&cmd, 0, sizeof(cmd));
11370
11371 input = &cmd.edid_cea.data.input;
11372
11373 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11374 cmd.edid_cea.header.sub_type = 0;
11375 cmd.edid_cea.header.payload_bytes =
11376 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11377 input->offset = offset;
11378 input->length = length;
eb9e59eb 11379 input->cea_total_length = total_length;
46db138d
SW
11380 memcpy(input->payload, data, length);
11381
11382 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11383 if (!res) {
11384 DRM_ERROR("EDID CEA parser failed\n");
11385 return false;
11386 }
11387
11388 output = &cmd.edid_cea.data.output;
11389
11390 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11391 if (!output->ack.success) {
11392 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11393 output->ack.offset);
11394 }
11395 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11396 if (!output->amd_vsdb.vsdb_found)
11397 return false;
11398
11399 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11400 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11401 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11402 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11403 } else {
b76a8062 11404 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11405 return false;
11406 }
11407
11408 return true;
11409}
11410
11411static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11412 uint8_t *edid_ext, int len,
11413 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11414{
11415 int i;
f9b4f20c
SW
11416
11417 /* send extension block to DMCU for parsing */
11418 for (i = 0; i < len; i += 8) {
11419 bool res;
11420 int offset;
11421
11422 /* send 8 bytes a time */
46db138d 11423 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11424 return false;
11425
11426 if (i+8 == len) {
11427 /* EDID block sent completed, expect result */
11428 int version, min_rate, max_rate;
11429
46db138d 11430 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11431 if (res) {
11432 /* amd vsdb found */
11433 vsdb_info->freesync_supported = 1;
11434 vsdb_info->amd_vsdb_version = version;
11435 vsdb_info->min_refresh_rate_hz = min_rate;
11436 vsdb_info->max_refresh_rate_hz = max_rate;
11437 return true;
11438 }
11439 /* not amd vsdb */
11440 return false;
11441 }
11442
11443 /* check for ack*/
46db138d 11444 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11445 if (!res)
11446 return false;
11447 }
11448
11449 return false;
11450}
11451
46db138d
SW
11452static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11453 uint8_t *edid_ext, int len,
11454 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11455{
11456 int i;
11457
11458 /* send extension block to DMCU for parsing */
11459 for (i = 0; i < len; i += 8) {
11460 /* send 8 bytes a time */
11461 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11462 return false;
11463 }
11464
11465 return vsdb_info->freesync_supported;
11466}
11467
11468static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11469 uint8_t *edid_ext, int len,
11470 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11471{
11472 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11473
11474 if (adev->dm.dmub_srv)
11475 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11476 else
11477 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11478}
11479
7c7dd774 11480static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11481 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11482{
11483 uint8_t *edid_ext = NULL;
11484 int i;
11485 bool valid_vsdb_found = false;
11486
11487 /*----- drm_find_cea_extension() -----*/
11488 /* No EDID or EDID extensions */
11489 if (edid == NULL || edid->extensions == 0)
7c7dd774 11490 return -ENODEV;
f9b4f20c
SW
11491
11492 /* Find CEA extension */
11493 for (i = 0; i < edid->extensions; i++) {
11494 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11495 if (edid_ext[0] == CEA_EXT)
11496 break;
11497 }
11498
11499 if (i == edid->extensions)
7c7dd774 11500 return -ENODEV;
f9b4f20c
SW
11501
11502 /*----- cea_db_offsets() -----*/
11503 if (edid_ext[0] != CEA_EXT)
7c7dd774 11504 return -ENODEV;
f9b4f20c
SW
11505
11506 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11507
11508 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11509}
11510
98e6436d
AK
11511void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11512 struct edid *edid)
e7b07cee 11513{
eb0709ba 11514 int i = 0;
e7b07cee
HW
11515 struct detailed_timing *timing;
11516 struct detailed_non_pixel *data;
11517 struct detailed_data_monitor_range *range;
c84dec2f
HW
11518 struct amdgpu_dm_connector *amdgpu_dm_connector =
11519 to_amdgpu_dm_connector(connector);
bb47de73 11520 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11521 struct dc_sink *sink;
e7b07cee
HW
11522
11523 struct drm_device *dev = connector->dev;
1348969a 11524 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11525 bool freesync_capable = false;
f9b4f20c 11526 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11527
8218d7f1
HW
11528 if (!connector->state) {
11529 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11530 goto update;
8218d7f1
HW
11531 }
11532
9b2fdc33
AP
11533 sink = amdgpu_dm_connector->dc_sink ?
11534 amdgpu_dm_connector->dc_sink :
11535 amdgpu_dm_connector->dc_em_sink;
11536
11537 if (!edid || !sink) {
98e6436d
AK
11538 dm_con_state = to_dm_connector_state(connector->state);
11539
11540 amdgpu_dm_connector->min_vfreq = 0;
11541 amdgpu_dm_connector->max_vfreq = 0;
11542 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11543 connector->display_info.monitor_range.min_vfreq = 0;
11544 connector->display_info.monitor_range.max_vfreq = 0;
11545 freesync_capable = false;
98e6436d 11546
bb47de73 11547 goto update;
98e6436d
AK
11548 }
11549
8218d7f1
HW
11550 dm_con_state = to_dm_connector_state(connector->state);
11551
e7b07cee 11552 if (!adev->dm.freesync_module)
bb47de73 11553 goto update;
f9b4f20c
SW
11554
11555
9b2fdc33
AP
11556 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11557 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11558 bool edid_check_required = false;
11559
11560 if (edid) {
e7b07cee
HW
11561 edid_check_required = is_dp_capable_without_timing_msa(
11562 adev->dm.dc,
c84dec2f 11563 amdgpu_dm_connector);
e7b07cee 11564 }
e7b07cee 11565
f9b4f20c
SW
11566 if (edid_check_required == true && (edid->version > 1 ||
11567 (edid->version == 1 && edid->revision > 1))) {
11568 for (i = 0; i < 4; i++) {
e7b07cee 11569
f9b4f20c
SW
11570 timing = &edid->detailed_timings[i];
11571 data = &timing->data.other_data;
11572 range = &data->data.range;
11573 /*
11574 * Check if monitor has continuous frequency mode
11575 */
11576 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11577 continue;
11578 /*
11579 * Check for flag range limits only. If flag == 1 then
11580 * no additional timing information provided.
11581 * Default GTF, GTF Secondary curve and CVT are not
11582 * supported
11583 */
11584 if (range->flags != 1)
11585 continue;
a0ffc3fd 11586
f9b4f20c
SW
11587 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11588 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11589 amdgpu_dm_connector->pixel_clock_mhz =
11590 range->pixel_clock_mhz * 10;
a0ffc3fd 11591
f9b4f20c
SW
11592 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11593 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11594
f9b4f20c
SW
11595 break;
11596 }
98e6436d 11597
f9b4f20c
SW
11598 if (amdgpu_dm_connector->max_vfreq -
11599 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11600
f9b4f20c
SW
11601 freesync_capable = true;
11602 }
11603 }
9b2fdc33 11604 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11605 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11606 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11607 timing = &edid->detailed_timings[i];
11608 data = &timing->data.other_data;
11609
11610 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11611 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11612 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11613 freesync_capable = true;
11614
11615 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11616 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11617 }
11618 }
bb47de73
NK
11619
11620update:
11621 if (dm_con_state)
11622 dm_con_state->freesync_capable = freesync_capable;
11623
11624 if (connector->vrr_capable_property)
11625 drm_connector_set_vrr_capable_property(connector,
11626 freesync_capable);
e7b07cee
HW
11627}
11628
3d4e52d0
VL
11629void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11630{
1348969a 11631 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11632 struct dc *dc = adev->dm.dc;
11633 int i;
11634
11635 mutex_lock(&adev->dm.dc_lock);
11636 if (dc->current_state) {
11637 for (i = 0; i < dc->current_state->stream_count; ++i)
11638 dc->current_state->streams[i]
11639 ->triggered_crtc_reset.enabled =
11640 adev->dm.force_timing_sync;
11641
11642 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11643 dc_trigger_sync(dc, dc->current_state);
11644 }
11645 mutex_unlock(&adev->dm.dc_lock);
11646}
9d83722d
RS
11647
11648void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11649 uint32_t value, const char *func_name)
11650{
11651#ifdef DM_CHECK_ADDR_0
11652 if (address == 0) {
11653 DC_ERR("invalid register write. address = 0");
11654 return;
11655 }
11656#endif
11657 cgs_write_register(ctx->cgs_device, address, value);
11658 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11659}
11660
11661uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11662 const char *func_name)
11663{
11664 uint32_t value;
11665#ifdef DM_CHECK_ADDR_0
11666 if (address == 0) {
11667 DC_ERR("invalid register read; address = 0\n");
11668 return 0;
11669 }
11670#endif
11671
11672 if (ctx->dmub_srv &&
11673 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11674 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11675 ASSERT(false);
11676 return 0;
11677 }
11678
11679 value = cgs_read_register(ctx->cgs_device, address);
11680
11681 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11682
11683 return value;
11684}
81927e28 11685
240e6d25
IB
11686static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11687 struct dc_context *ctx,
11688 uint8_t status_type,
11689 uint32_t *operation_result)
88f52b1f
JS
11690{
11691 struct amdgpu_device *adev = ctx->driver_context;
11692 int return_status = -1;
11693 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11694
11695 if (is_cmd_aux) {
11696 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11697 return_status = p_notify->aux_reply.length;
11698 *operation_result = p_notify->result;
11699 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11700 *operation_result = AUX_RET_ERROR_TIMEOUT;
11701 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11702 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11703 } else {
11704 *operation_result = AUX_RET_ERROR_UNKNOWN;
11705 }
11706 } else {
11707 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11708 return_status = 0;
11709 *operation_result = p_notify->sc_status;
11710 } else {
11711 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11712 }
11713 }
11714
11715 return return_status;
11716}
11717
11718int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11719 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11720{
11721 struct amdgpu_device *adev = ctx->driver_context;
11722 int ret = 0;
11723
88f52b1f
JS
11724 if (is_cmd_aux) {
11725 dc_process_dmub_aux_transfer_async(ctx->dc,
11726 link_index, (struct aux_payload *)cmd_payload);
11727 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11728 (struct set_config_cmd_payload *)cmd_payload,
11729 adev->dm.dmub_notify)) {
11730 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11731 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11732 (uint32_t *)operation_result);
11733 }
11734
9e3a50d2 11735 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11736 if (ret == 0) {
9e3a50d2 11737 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11738 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11739 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11740 (uint32_t *)operation_result);
81927e28 11741 }
81927e28 11742
88f52b1f
JS
11743 if (is_cmd_aux) {
11744 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11745 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11746
88f52b1f
JS
11747 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11748 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11749 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11750 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11751 adev->dm.dmub_notify->aux_reply.length);
11752 }
11753 }
81927e28
JS
11754 }
11755
88f52b1f
JS
11756 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11757 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11758 (uint32_t *)operation_result);
81927e28 11759}
1edf5ae1
ZL
11760
11761/*
11762 * Check whether seamless boot is supported.
11763 *
11764 * So far we only support seamless boot on CHIP_VANGOGH.
11765 * If everything goes well, we may consider expanding
11766 * seamless boot to other ASICs.
11767 */
11768bool check_seamless_boot_capability(struct amdgpu_device *adev)
11769{
11770 switch (adev->asic_type) {
11771 case CHIP_VANGOGH:
11772 if (!adev->mman.keep_stolen_vga_memory)
11773 return true;
11774 break;
11775 default:
11776 break;
11777 }
11778
11779 return false;
11780}