drm/amd/display: Add DCN reg offsets to DC
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
6a99099f 51#include <drm/display/drm_hdcp_helper.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
57b9f338 75#include <linux/dmi.h>
4562236b 76
da68386d 77#include <drm/display/drm_dp_mst_helper.h>
4fc8cb47 78#include <drm/display/drm_hdmi_helper.h>
4562236b 79#include <drm/drm_atomic.h>
674e78ac 80#include <drm/drm_atomic_uapi.h>
4562236b 81#include <drm/drm_atomic_helper.h>
e7b07cee 82#include <drm/drm_fb_helper.h>
09d21852 83#include <drm/drm_fourcc.h>
e7b07cee 84#include <drm/drm_edid.h>
09d21852 85#include <drm/drm_vblank.h>
6ce8f316 86#include <drm/drm_audio_component.h>
4562236b 87
5527cd06 88#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 89
ad941f7a
FX
90#include "dcn/dcn_1_0_offset.h"
91#include "dcn/dcn_1_0_sh_mask.h"
407e7517 92#include "soc15_hw_ip.h"
543036a2 93#include "soc15_common.h"
407e7517 94#include "vega10_ip_offset.h"
ff5ef992
AD
95
96#include "soc15_common.h"
ff5ef992 97
543036a2
AP
98#include "gc/gc_11_0_0_offset.h"
99#include "gc/gc_11_0_0_sh_mask.h"
100
e7b07cee 101#include "modules/inc/mod_freesync.h"
bbf854dc 102#include "modules/power/power_helpers.h"
ecd0136b 103#include "modules/inc/mod_info_packet.h"
e7b07cee 104
743b9786
NK
105#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
107#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
109#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
111#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
113#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
115#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
117#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
118MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
119#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
120MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
b5b8ed44
QZ
121#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
122MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
de7cc1b4
PL
123#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
124MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
2200eb9e 125
577359ca
AP
126#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
127MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
128#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
129MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
130
a94d5569
DF
131#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
132MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 133
5ea23931
RL
134#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
135MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
136
8c7aea40
NK
137/* Number of bytes in PSP header for firmware. */
138#define PSP_HEADER_BYTES 0x100
139
140/* Number of bytes in PSP footer for firmware. */
141#define PSP_FOOTER_BYTES 0x100
142
b8592b48
LL
143/**
144 * DOC: overview
145 *
146 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 147 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
148 * requests into DC requests, and DC responses into DRM responses.
149 *
150 * The root control structure is &struct amdgpu_display_manager.
151 */
152
7578ecda
AD
153/* basic init/fini API */
154static int amdgpu_dm_init(struct amdgpu_device *adev);
155static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 156static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 157
0f877894
OV
158static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
159{
160 switch (link->dpcd_caps.dongle_type) {
161 case DISPLAY_DONGLE_NONE:
162 return DRM_MODE_SUBCONNECTOR_Native;
163 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
164 return DRM_MODE_SUBCONNECTOR_VGA;
165 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
166 case DISPLAY_DONGLE_DP_DVI_DONGLE:
167 return DRM_MODE_SUBCONNECTOR_DVID;
168 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
169 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
170 return DRM_MODE_SUBCONNECTOR_HDMIA;
171 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
172 default:
173 return DRM_MODE_SUBCONNECTOR_Unknown;
174 }
175}
176
177static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
178{
179 struct dc_link *link = aconnector->dc_link;
180 struct drm_connector *connector = &aconnector->base;
181 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
182
183 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
184 return;
185
186 if (aconnector->dc_sink)
187 subconnector = get_subconnector_type(link);
188
189 drm_object_property_set_value(&connector->base,
190 connector->dev->mode_config.dp_subconnector_property,
191 subconnector);
192}
193
1f6010a9
DF
194/*
195 * initializes drm_device display related structures, based on the information
7578ecda
AD
196 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
197 * drm_encoder, drm_mode_config
198 *
199 * Returns 0 on success
200 */
201static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
202/* removes and deallocates the drm structures, created by the above function */
203static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
204
7578ecda 205static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 206 struct drm_plane *plane,
cc1fec57
NK
207 unsigned long possible_crtcs,
208 const struct dc_plane_cap *plane_cap);
7578ecda
AD
209static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
210 struct drm_plane *plane,
211 uint32_t link_index);
212static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
213 struct amdgpu_dm_connector *amdgpu_dm_connector,
214 uint32_t link_index,
215 struct amdgpu_encoder *amdgpu_encoder);
216static int amdgpu_dm_encoder_init(struct drm_device *dev,
217 struct amdgpu_encoder *aencoder,
218 uint32_t link_index);
219
220static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221
7578ecda
AD
222static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223
224static int amdgpu_dm_atomic_check(struct drm_device *dev,
225 struct drm_atomic_state *state);
226
674e78ac
NK
227static void handle_cursor_update(struct drm_plane *plane,
228 struct drm_plane_state *old_plane_state);
7578ecda 229
dfbbfe3c
BN
230static const struct drm_format_info *
231amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
232
e27c41d5 233static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 234static void handle_hpd_rx_irq(void *param);
e27c41d5 235
a85ba005
NC
236static bool
237is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
238 struct drm_crtc_state *new_crtc_state);
4562236b
HW
239/*
240 * dm_vblank_get_counter
241 *
242 * @brief
243 * Get counter for number of vertical blanks
244 *
245 * @param
246 * struct amdgpu_device *adev - [in] desired amdgpu device
247 * int disp_idx - [in] which CRTC to get the counter from
248 *
249 * @return
250 * Counter for vertical blanks
251 */
252static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
253{
254 if (crtc >= adev->mode_info.num_crtc)
255 return 0;
256 else {
257 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
585d450c 259 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 crtc);
4562236b
HW
262 return 0;
263 }
264
585d450c 265 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
266 }
267}
268
269static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 270 u32 *vbl, u32 *position)
4562236b 271{
81c50963
ST
272 uint32_t v_blank_start, v_blank_end, h_position, v_position;
273
4562236b
HW
274 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
275 return -EINVAL;
276 else {
277 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
278
585d450c 279 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
280 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
281 crtc);
4562236b
HW
282 return 0;
283 }
284
81c50963
ST
285 /*
286 * TODO rework base driver to use values directly.
287 * for now parse it back into reg-format
288 */
585d450c 289 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
290 &v_blank_start,
291 &v_blank_end,
292 &h_position,
293 &v_position);
294
e806208d
AG
295 *position = v_position | (h_position << 16);
296 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
297 }
298
299 return 0;
300}
301
302static bool dm_is_idle(void *handle)
303{
304 /* XXX todo */
305 return true;
306}
307
308static int dm_wait_for_idle(void *handle)
309{
310 /* XXX todo */
311 return 0;
312}
313
314static bool dm_check_soft_reset(void *handle)
315{
316 return false;
317}
318
319static int dm_soft_reset(void *handle)
320{
321 /* XXX todo */
322 return 0;
323}
324
3ee6b26b
AD
325static struct amdgpu_crtc *
326get_crtc_by_otg_inst(struct amdgpu_device *adev,
327 int otg_inst)
4562236b 328{
4a580877 329 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
330 struct drm_crtc *crtc;
331 struct amdgpu_crtc *amdgpu_crtc;
332
bcd74374 333 if (WARN_ON(otg_inst == -1))
4562236b 334 return adev->mode_info.crtcs[0];
4562236b
HW
335
336 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
337 amdgpu_crtc = to_amdgpu_crtc(crtc);
338
339 if (amdgpu_crtc->otg_inst == otg_inst)
340 return amdgpu_crtc;
341 }
342
343 return NULL;
344}
345
585d450c
AP
346static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
347{
348 return acrtc->dm_irq_params.freesync_config.state ==
349 VRR_STATE_ACTIVE_VARIABLE ||
350 acrtc->dm_irq_params.freesync_config.state ==
351 VRR_STATE_ACTIVE_FIXED;
352}
353
66b0c973
MK
354static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
355{
356 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
357 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
358}
359
a85ba005
NC
360static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
361 struct dm_crtc_state *new_state)
362{
363 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
364 return true;
365 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
366 return true;
367 else
368 return false;
369}
370
b8e8c934
HW
371/**
372 * dm_pflip_high_irq() - Handle pageflip interrupt
373 * @interrupt_params: ignored
374 *
375 * Handles the pageflip interrupt by notifying all interested parties
376 * that the pageflip has been completed.
377 */
4562236b
HW
378static void dm_pflip_high_irq(void *interrupt_params)
379{
4562236b
HW
380 struct amdgpu_crtc *amdgpu_crtc;
381 struct common_irq_params *irq_params = interrupt_params;
382 struct amdgpu_device *adev = irq_params->adev;
383 unsigned long flags;
71bbe51a 384 struct drm_pending_vblank_event *e;
71bbe51a
MK
385 uint32_t vpos, hpos, v_blank_start, v_blank_end;
386 bool vrr_active;
4562236b
HW
387
388 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
389
390 /* IRQ could occur when in initial stage */
1f6010a9 391 /* TODO work and BO cleanup */
4562236b 392 if (amdgpu_crtc == NULL) {
cb2318b7 393 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
394 return;
395 }
396
4a580877 397 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
398
399 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 400 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
401 amdgpu_crtc->pflip_status,
402 AMDGPU_FLIP_SUBMITTED,
403 amdgpu_crtc->crtc_id,
404 amdgpu_crtc);
4a580877 405 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
406 return;
407 }
408
71bbe51a
MK
409 /* page flip completed. */
410 e = amdgpu_crtc->event;
411 amdgpu_crtc->event = NULL;
4562236b 412
bcd74374 413 WARN_ON(!e);
1159898a 414
585d450c 415 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
416
417 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
418 if (!vrr_active ||
585d450c 419 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
420 &v_blank_end, &hpos, &vpos) ||
421 (vpos < v_blank_start)) {
422 /* Update to correct count and vblank timestamp if racing with
423 * vblank irq. This also updates to the correct vblank timestamp
424 * even in VRR mode, as scanout is past the front-porch atm.
425 */
426 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 427
71bbe51a
MK
428 /* Wake up userspace by sending the pageflip event with proper
429 * count and timestamp of vblank of flip completion.
430 */
431 if (e) {
432 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
433
434 /* Event sent, so done with vblank for this flip */
435 drm_crtc_vblank_put(&amdgpu_crtc->base);
436 }
437 } else if (e) {
438 /* VRR active and inside front-porch: vblank count and
439 * timestamp for pageflip event will only be up to date after
440 * drm_crtc_handle_vblank() has been executed from late vblank
441 * irq handler after start of back-porch (vline 0). We queue the
442 * pageflip event for send-out by drm_crtc_handle_vblank() with
443 * updated timestamp and count, once it runs after us.
444 *
445 * We need to open-code this instead of using the helper
446 * drm_crtc_arm_vblank_event(), as that helper would
447 * call drm_crtc_accurate_vblank_count(), which we must
448 * not call in VRR mode while we are in front-porch!
449 */
450
451 /* sequence will be replaced by real count during send-out. */
452 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
453 e->pipe = amdgpu_crtc->crtc_id;
454
4a580877 455 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
456 e = NULL;
457 }
4562236b 458
fdd1fe57
MK
459 /* Keep track of vblank of this flip for flip throttling. We use the
460 * cooked hw counter, as that one incremented at start of this vblank
461 * of pageflip completion, so last_flip_vblank is the forbidden count
462 * for queueing new pageflips if vsync + VRR is enabled.
463 */
5d1c59c4 464 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 465 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 466
54f5499a 467 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 468 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 469
cb2318b7
VL
470 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
471 amdgpu_crtc->crtc_id, amdgpu_crtc,
472 vrr_active, (int) !e);
4562236b
HW
473}
474
d2574c33
MK
475static void dm_vupdate_high_irq(void *interrupt_params)
476{
477 struct common_irq_params *irq_params = interrupt_params;
478 struct amdgpu_device *adev = irq_params->adev;
479 struct amdgpu_crtc *acrtc;
47588233
RS
480 struct drm_device *drm_dev;
481 struct drm_vblank_crtc *vblank;
482 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 483 unsigned long flags;
585d450c 484 int vrr_active;
d2574c33
MK
485
486 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
487
488 if (acrtc) {
585d450c 489 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
490 drm_dev = acrtc->base.dev;
491 vblank = &drm_dev->vblank[acrtc->base.index];
492 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
493 frame_duration_ns = vblank->time - previous_timestamp;
494
495 if (frame_duration_ns > 0) {
496 trace_amdgpu_refresh_rate_track(acrtc->base.index,
497 frame_duration_ns,
498 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
499 atomic64_set(&irq_params->previous_timestamp, vblank->time);
500 }
d2574c33 501
cb2318b7 502 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 503 acrtc->crtc_id,
585d450c 504 vrr_active);
d2574c33
MK
505
506 /* Core vblank handling is done here after end of front-porch in
507 * vrr mode, as vblank timestamping will give valid results
508 * while now done after front-porch. This will also deliver
509 * page-flip completion events that have been queued to us
510 * if a pageflip happened inside front-porch.
511 */
585d450c 512 if (vrr_active) {
d2574c33 513 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
514
515 /* BTR processing for pre-DCE12 ASICs */
585d450c 516 if (acrtc->dm_irq_params.stream &&
09aef2c4 517 adev->family < AMDGPU_FAMILY_AI) {
4a580877 518 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
519 mod_freesync_handle_v_update(
520 adev->dm.freesync_module,
585d450c
AP
521 acrtc->dm_irq_params.stream,
522 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
523
524 dc_stream_adjust_vmin_vmax(
525 adev->dm.dc,
585d450c
AP
526 acrtc->dm_irq_params.stream,
527 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 528 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
529 }
530 }
d2574c33
MK
531 }
532}
533
b8e8c934
HW
534/**
535 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 536 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
537 *
538 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
539 * event handler.
540 */
4562236b
HW
541static void dm_crtc_high_irq(void *interrupt_params)
542{
543 struct common_irq_params *irq_params = interrupt_params;
544 struct amdgpu_device *adev = irq_params->adev;
4562236b 545 struct amdgpu_crtc *acrtc;
09aef2c4 546 unsigned long flags;
585d450c 547 int vrr_active;
4562236b 548
b57de80a 549 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
550 if (!acrtc)
551 return;
552
585d450c 553 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 554
cb2318b7 555 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 556 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 557
2346ef47
NK
558 /**
559 * Core vblank handling at start of front-porch is only possible
560 * in non-vrr mode, as only there vblank timestamping will give
561 * valid results while done in front-porch. Otherwise defer it
562 * to dm_vupdate_high_irq after end of front-porch.
563 */
585d450c 564 if (!vrr_active)
2346ef47
NK
565 drm_crtc_handle_vblank(&acrtc->base);
566
567 /**
568 * Following stuff must happen at start of vblank, for crc
569 * computation and below-the-range btr support in vrr mode.
570 */
16f17eda 571 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
572
573 /* BTR updates need to happen before VUPDATE on Vega and above. */
574 if (adev->family < AMDGPU_FAMILY_AI)
575 return;
16f17eda 576
4a580877 577 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 578
585d450c
AP
579 if (acrtc->dm_irq_params.stream &&
580 acrtc->dm_irq_params.vrr_params.supported &&
581 acrtc->dm_irq_params.freesync_config.state ==
582 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 583 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
584 acrtc->dm_irq_params.stream,
585 &acrtc->dm_irq_params.vrr_params);
16f17eda 586
585d450c
AP
587 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
588 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
589 }
590
2b5aed9a
MK
591 /*
592 * If there aren't any active_planes then DCH HUBP may be clock-gated.
593 * In that case, pageflip completion interrupts won't fire and pageflip
594 * completion events won't get delivered. Prevent this by sending
595 * pending pageflip events from here if a flip is still pending.
596 *
597 * If any planes are enabled, use dm_pflip_high_irq() instead, to
598 * avoid race conditions between flip programming and completion,
599 * which could cause too early flip completion events.
600 */
2346ef47
NK
601 if (adev->family >= AMDGPU_FAMILY_RV &&
602 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 603 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
604 if (acrtc->event) {
605 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
606 acrtc->event = NULL;
607 drm_crtc_vblank_put(&acrtc->base);
608 }
609 acrtc->pflip_status = AMDGPU_FLIP_NONE;
610 }
611
4a580877 612 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
613}
614
9e1178ef 615#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
616/**
617 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
618 * DCN generation ASICs
48e01bf4 619 * @interrupt_params: interrupt parameters
86bc2219
WL
620 *
621 * Used to set crc window/read out crc value at vertical line 0 position
622 */
86bc2219
WL
623static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
624{
625 struct common_irq_params *irq_params = interrupt_params;
626 struct amdgpu_device *adev = irq_params->adev;
627 struct amdgpu_crtc *acrtc;
628
629 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
630
631 if (!acrtc)
632 return;
633
634 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
635}
433e5dec 636#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 637
e27c41d5 638/**
03f2abb0 639 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
e27c41d5
JS
640 * @adev: amdgpu_device pointer
641 * @notify: dmub notification structure
642 *
643 * Dmub AUX or SET_CONFIG command completion processing callback
644 * Copies dmub notification to DM which is to be read by AUX command.
645 * issuing thread and also signals the event to wake up the thread.
646 */
240e6d25
IB
647static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
648 struct dmub_notification *notify)
e27c41d5
JS
649{
650 if (adev->dm.dmub_notify)
651 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
652 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
653 complete(&adev->dm.dmub_aux_transfer_done);
654}
655
656/**
657 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
658 * @adev: amdgpu_device pointer
659 * @notify: dmub notification structure
660 *
661 * Dmub Hpd interrupt processing callback. Gets displayindex through the
662 * ink index and calls helper to do the processing.
663 */
240e6d25
IB
664static void dmub_hpd_callback(struct amdgpu_device *adev,
665 struct dmub_notification *notify)
e27c41d5
JS
666{
667 struct amdgpu_dm_connector *aconnector;
f6e03f80 668 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
669 struct drm_connector *connector;
670 struct drm_connector_list_iter iter;
671 struct dc_link *link;
672 uint8_t link_index = 0;
978ffac8 673 struct drm_device *dev;
e27c41d5
JS
674
675 if (adev == NULL)
676 return;
677
678 if (notify == NULL) {
679 DRM_ERROR("DMUB HPD callback notification was NULL");
680 return;
681 }
682
683 if (notify->link_index > adev->dm.dc->link_count) {
684 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
685 return;
686 }
687
e27c41d5 688 link_index = notify->link_index;
e27c41d5 689 link = adev->dm.dc->links[link_index];
978ffac8 690 dev = adev->dm.ddev;
e27c41d5
JS
691
692 drm_connector_list_iter_begin(dev, &iter);
693 drm_for_each_connector_iter(connector, &iter) {
694 aconnector = to_amdgpu_dm_connector(connector);
695 if (link && aconnector->dc_link == link) {
696 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 697 hpd_aconnector = aconnector;
e27c41d5
JS
698 break;
699 }
700 }
701 drm_connector_list_iter_end(&iter);
e27c41d5 702
c40a09e5
NK
703 if (hpd_aconnector) {
704 if (notify->type == DMUB_NOTIFICATION_HPD)
705 handle_hpd_irq_helper(hpd_aconnector);
706 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
707 handle_hpd_rx_irq(hpd_aconnector);
708 }
e27c41d5
JS
709}
710
711/**
712 * register_dmub_notify_callback - Sets callback for DMUB notify
713 * @adev: amdgpu_device pointer
714 * @type: Type of dmub notification
715 * @callback: Dmub interrupt callback function
716 * @dmub_int_thread_offload: offload indicator
717 *
718 * API to register a dmub callback handler for a dmub notification
719 * Also sets indicator whether callback processing to be offloaded.
720 * to dmub interrupt handling thread
721 * Return: true if successfully registered, false if there is existing registration
722 */
240e6d25
IB
723static bool register_dmub_notify_callback(struct amdgpu_device *adev,
724 enum dmub_notification_type type,
725 dmub_notify_interrupt_callback_t callback,
726 bool dmub_int_thread_offload)
e27c41d5
JS
727{
728 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
729 adev->dm.dmub_callback[type] = callback;
730 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
731 } else
732 return false;
733
734 return true;
735}
736
737static void dm_handle_hpd_work(struct work_struct *work)
738{
739 struct dmub_hpd_work *dmub_hpd_wrk;
740
741 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
742
743 if (!dmub_hpd_wrk->dmub_notify) {
744 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
745 return;
746 }
747
748 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
749 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
750 dmub_hpd_wrk->dmub_notify);
751 }
094b21c1
JS
752
753 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
754 kfree(dmub_hpd_wrk);
755
756}
757
e25515e2 758#define DMUB_TRACE_MAX_READ 64
81927e28
JS
759/**
760 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
761 * @interrupt_params: used for determining the Outbox instance
762 *
763 * Handles the Outbox Interrupt
764 * event handler.
765 */
81927e28
JS
766static void dm_dmub_outbox1_low_irq(void *interrupt_params)
767{
768 struct dmub_notification notify;
769 struct common_irq_params *irq_params = interrupt_params;
770 struct amdgpu_device *adev = irq_params->adev;
771 struct amdgpu_display_manager *dm = &adev->dm;
772 struct dmcub_trace_buf_entry entry = { 0 };
773 uint32_t count = 0;
e27c41d5 774 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 775 struct dc_link *plink = NULL;
81927e28 776
f6e03f80
JS
777 if (dc_enable_dmub_notifications(adev->dm.dc) &&
778 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 779
f6e03f80
JS
780 do {
781 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
a35faec3 782 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
f6e03f80
JS
783 DRM_ERROR("DM: notify type %d invalid!", notify.type);
784 continue;
785 }
c40a09e5
NK
786 if (!dm->dmub_callback[notify.type]) {
787 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
788 continue;
789 }
f6e03f80 790 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
791 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
792 if (!dmub_hpd_wrk) {
793 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
794 return;
795 }
796 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
797 if (!dmub_hpd_wrk->dmub_notify) {
798 kfree(dmub_hpd_wrk);
799 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
800 return;
801 }
802 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
803 if (dmub_hpd_wrk->dmub_notify)
804 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
805 dmub_hpd_wrk->adev = adev;
806 if (notify.type == DMUB_NOTIFICATION_HPD) {
807 plink = adev->dm.dc->links[notify.link_index];
808 if (plink) {
809 plink->hpd_status =
b97788e5 810 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 811 }
e27c41d5 812 }
f6e03f80
JS
813 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
814 } else {
815 dm->dmub_callback[notify.type](adev, &notify);
816 }
817 } while (notify.pending_notification);
81927e28
JS
818 }
819
820
821 do {
822 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
823 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
824 entry.param0, entry.param1);
825
826 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
827 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
828 } else
829 break;
830
831 count++;
832
833 } while (count <= DMUB_TRACE_MAX_READ);
834
f6e03f80
JS
835 if (count > DMUB_TRACE_MAX_READ)
836 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 837}
86bc2219 838
4562236b
HW
839static int dm_set_clockgating_state(void *handle,
840 enum amd_clockgating_state state)
841{
842 return 0;
843}
844
845static int dm_set_powergating_state(void *handle,
846 enum amd_powergating_state state)
847{
848 return 0;
849}
850
851/* Prototypes of private functions */
852static int dm_early_init(void* handle);
853
a32e24b4 854/* Allocate memory for FBC compressed data */
3e332d3a 855static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 856{
3e332d3a 857 struct drm_device *dev = connector->dev;
1348969a 858 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 859 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
860 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
861 struct drm_display_mode *mode;
42e67c3b
RL
862 unsigned long max_size = 0;
863
864 if (adev->dm.dc->fbc_compressor == NULL)
865 return;
a32e24b4 866
3e332d3a 867 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
868 return;
869
3e332d3a
RL
870 if (compressor->bo_ptr)
871 return;
42e67c3b 872
42e67c3b 873
3e332d3a
RL
874 list_for_each_entry(mode, &connector->modes, head) {
875 if (max_size < mode->htotal * mode->vtotal)
876 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
877 }
878
879 if (max_size) {
880 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 881 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 882 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
883
884 if (r)
42e67c3b
RL
885 DRM_ERROR("DM: Failed to initialize FBC\n");
886 else {
887 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
888 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
889 }
890
a32e24b4
RL
891 }
892
893}
a32e24b4 894
6ce8f316
NK
895static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
896 int pipe, bool *enabled,
897 unsigned char *buf, int max_bytes)
898{
899 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 900 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
901 struct drm_connector *connector;
902 struct drm_connector_list_iter conn_iter;
903 struct amdgpu_dm_connector *aconnector;
904 int ret = 0;
905
906 *enabled = false;
907
908 mutex_lock(&adev->dm.audio_lock);
909
910 drm_connector_list_iter_begin(dev, &conn_iter);
911 drm_for_each_connector_iter(connector, &conn_iter) {
912 aconnector = to_amdgpu_dm_connector(connector);
913 if (aconnector->audio_inst != port)
914 continue;
915
916 *enabled = true;
917 ret = drm_eld_size(connector->eld);
918 memcpy(buf, connector->eld, min(max_bytes, ret));
919
920 break;
921 }
922 drm_connector_list_iter_end(&conn_iter);
923
924 mutex_unlock(&adev->dm.audio_lock);
925
926 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
927
928 return ret;
929}
930
931static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
932 .get_eld = amdgpu_dm_audio_component_get_eld,
933};
934
935static int amdgpu_dm_audio_component_bind(struct device *kdev,
936 struct device *hda_kdev, void *data)
937{
938 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 939 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
940 struct drm_audio_component *acomp = data;
941
942 acomp->ops = &amdgpu_dm_audio_component_ops;
943 acomp->dev = kdev;
944 adev->dm.audio_component = acomp;
945
946 return 0;
947}
948
949static void amdgpu_dm_audio_component_unbind(struct device *kdev,
950 struct device *hda_kdev, void *data)
951{
952 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 953 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
954 struct drm_audio_component *acomp = data;
955
956 acomp->ops = NULL;
957 acomp->dev = NULL;
958 adev->dm.audio_component = NULL;
959}
960
961static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
962 .bind = amdgpu_dm_audio_component_bind,
963 .unbind = amdgpu_dm_audio_component_unbind,
964};
965
966static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
967{
968 int i, ret;
969
970 if (!amdgpu_audio)
971 return 0;
972
973 adev->mode_info.audio.enabled = true;
974
975 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
976
977 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
978 adev->mode_info.audio.pin[i].channels = -1;
979 adev->mode_info.audio.pin[i].rate = -1;
980 adev->mode_info.audio.pin[i].bits_per_sample = -1;
981 adev->mode_info.audio.pin[i].status_bits = 0;
982 adev->mode_info.audio.pin[i].category_code = 0;
983 adev->mode_info.audio.pin[i].connected = false;
984 adev->mode_info.audio.pin[i].id =
985 adev->dm.dc->res_pool->audios[i]->inst;
986 adev->mode_info.audio.pin[i].offset = 0;
987 }
988
989 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
990 if (ret < 0)
991 return ret;
992
993 adev->dm.audio_registered = true;
994
995 return 0;
996}
997
998static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
999{
1000 if (!amdgpu_audio)
1001 return;
1002
1003 if (!adev->mode_info.audio.enabled)
1004 return;
1005
1006 if (adev->dm.audio_registered) {
1007 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1008 adev->dm.audio_registered = false;
1009 }
1010
1011 /* TODO: Disable audio? */
1012
1013 adev->mode_info.audio.enabled = false;
1014}
1015
dfd84d90 1016static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1017{
1018 struct drm_audio_component *acomp = adev->dm.audio_component;
1019
1020 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1021 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1022
1023 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1024 pin, -1);
1025 }
1026}
1027
743b9786
NK
1028static int dm_dmub_hw_init(struct amdgpu_device *adev)
1029{
743b9786
NK
1030 const struct dmcub_firmware_header_v1_0 *hdr;
1031 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1032 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1033 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1034 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1035 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1036 struct dmub_srv_hw_params hw_params;
1037 enum dmub_status status;
1038 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1039 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1040 bool has_hw_support;
1041
1042 if (!dmub_srv)
1043 /* DMUB isn't supported on the ASIC. */
1044 return 0;
1045
8c7aea40
NK
1046 if (!fb_info) {
1047 DRM_ERROR("No framebuffer info for DMUB service.\n");
1048 return -EINVAL;
1049 }
1050
743b9786
NK
1051 if (!dmub_fw) {
1052 /* Firmware required for DMUB support. */
1053 DRM_ERROR("No firmware provided for DMUB.\n");
1054 return -EINVAL;
1055 }
1056
1057 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1058 if (status != DMUB_STATUS_OK) {
1059 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1060 return -EINVAL;
1061 }
1062
1063 if (!has_hw_support) {
1064 DRM_INFO("DMUB unsupported on ASIC\n");
1065 return 0;
1066 }
1067
47e62dbd
NK
1068 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1069 status = dmub_srv_hw_reset(dmub_srv);
1070 if (status != DMUB_STATUS_OK)
1071 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1072
743b9786
NK
1073 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1074
743b9786
NK
1075 fw_inst_const = dmub_fw->data +
1076 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1077 PSP_HEADER_BYTES;
743b9786
NK
1078
1079 fw_bss_data = dmub_fw->data +
1080 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1081 le32_to_cpu(hdr->inst_const_bytes);
1082
1083 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1084 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1085 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1086
1087 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1088
ddde28a5
HW
1089 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1090 * amdgpu_ucode_init_single_fw will load dmub firmware
1091 * fw_inst_const part to cw0; otherwise, the firmware back door load
1092 * will be done by dm_dmub_hw_init
1093 */
1094 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1095 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1096 fw_inst_const_size);
1097 }
1098
a576b345
NK
1099 if (fw_bss_data_size)
1100 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1101 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1102
1103 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1104 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1105 adev->bios_size);
1106
1107 /* Reset regions that need to be reset. */
1108 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1109 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1110
1111 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1112 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1113
1114 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1115 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1116
1117 /* Initialize hardware. */
1118 memset(&hw_params, 0, sizeof(hw_params));
1119 hw_params.fb_base = adev->gmc.fb_start;
1120 hw_params.fb_offset = adev->gmc.aper_base;
1121
31a7f4bb
HW
1122 /* backdoor load firmware and trigger dmub running */
1123 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1124 hw_params.load_inst_const = true;
1125
743b9786
NK
1126 if (dmcu)
1127 hw_params.psp_version = dmcu->psp_version;
1128
8c7aea40
NK
1129 for (i = 0; i < fb_info->num_fb; ++i)
1130 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1131
3b36f50d
TH
1132 switch (adev->ip_versions[DCE_HWIP][0]) {
1133 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1134 hw_params.dpia_supported = true;
7367540b 1135 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
5b109397
JS
1136 break;
1137 default:
1138 break;
1139 }
1140
743b9786
NK
1141 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1142 if (status != DMUB_STATUS_OK) {
1143 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1144 return -EINVAL;
1145 }
1146
1147 /* Wait for firmware load to finish. */
1148 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1149 if (status != DMUB_STATUS_OK)
1150 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1151
1152 /* Init DMCU and ABM if available. */
1153 if (dmcu && abm) {
1154 dmcu->funcs->dmcu_init(dmcu);
1155 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1156 }
1157
051b7887
RL
1158 if (!adev->dm.dc->ctx->dmub_srv)
1159 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1160 if (!adev->dm.dc->ctx->dmub_srv) {
1161 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1162 return -ENOMEM;
1163 }
1164
743b9786
NK
1165 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1166 adev->dm.dmcub_fw_version);
1167
1168 return 0;
1169}
1170
79d6b935
NK
1171static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1172{
1173 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1174 enum dmub_status status;
1175 bool init;
1176
1177 if (!dmub_srv) {
1178 /* DMUB isn't supported on the ASIC. */
1179 return;
1180 }
1181
1182 status = dmub_srv_is_hw_init(dmub_srv, &init);
1183 if (status != DMUB_STATUS_OK)
1184 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1185
1186 if (status == DMUB_STATUS_OK && init) {
1187 /* Wait for firmware load to finish. */
1188 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1189 if (status != DMUB_STATUS_OK)
1190 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1191 } else {
1192 /* Perform the full hardware initialization. */
1193 dm_dmub_hw_init(adev);
1194 }
1195}
1196
c0fb85ae 1197static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1198{
c0fb85ae
YZ
1199 uint64_t pt_base;
1200 uint32_t logical_addr_low;
1201 uint32_t logical_addr_high;
1202 uint32_t agp_base, agp_bot, agp_top;
1203 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1204
a0f884f5
NK
1205 memset(pa_config, 0, sizeof(*pa_config));
1206
c0fb85ae
YZ
1207 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1208 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1209
c0fb85ae
YZ
1210 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1211 /*
1212 * Raven2 has a HW issue that it is unable to use the vram which
1213 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1214 * workaround that increase system aperture high address (add 1)
1215 * to get rid of the VM fault and hardware hang.
1216 */
1217 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1218 else
1219 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1220
c0fb85ae
YZ
1221 agp_base = 0;
1222 agp_bot = adev->gmc.agp_start >> 24;
1223 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1224
c44a22b3 1225
c0fb85ae
YZ
1226 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1227 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1228 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1229 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1230 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1231 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1232
c0fb85ae
YZ
1233 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1234 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1235
1236 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1237 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1238 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1239
1240 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1241 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1242 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1243
1244 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1245 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1246 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1247
1248 pa_config->is_hvm_enabled = 0;
c44a22b3 1249
c44a22b3 1250}
cae5c1ab 1251
09a5df6c 1252static void vblank_control_worker(struct work_struct *work)
ea3b4242 1253{
09a5df6c
NK
1254 struct vblank_control_work *vblank_work =
1255 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1256 struct amdgpu_display_manager *dm = vblank_work->dm;
1257
1258 mutex_lock(&dm->dc_lock);
1259
1260 if (vblank_work->enable)
1261 dm->active_vblank_irq_count++;
5af50b0b 1262 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1263 dm->active_vblank_irq_count--;
1264
2cbcb78c 1265 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1266
4711c033 1267 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1268
7cc191ee
LL
1269 /*
1270 * Control PSR based on vblank requirements from OS
1271 *
1272 * If panel supports PSR SU, there's no need to disable PSR when OS is
1273 * submitting fast atomic commits (we infer this by whether the OS
1274 * requests vblank events). Fast atomic commits will simply trigger a
1275 * full-frame-update (FFU); a specific case of selective-update (SU)
1276 * where the SU region is the full hactive*vactive region. See
1277 * fill_dc_dirty_rects().
1278 */
58aa1c50
NK
1279 if (vblank_work->stream && vblank_work->stream->link) {
1280 if (vblank_work->enable) {
7cc191ee
LL
1281 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1282 vblank_work->stream->link->psr_settings.psr_allow_active)
58aa1c50
NK
1283 amdgpu_dm_psr_disable(vblank_work->stream);
1284 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1285 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1286 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1287 amdgpu_dm_psr_enable(vblank_work->stream);
1288 }
1289 }
1290
ea3b4242 1291 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1292
1293 dc_stream_release(vblank_work->stream);
1294
09a5df6c 1295 kfree(vblank_work);
ea3b4242
QZ
1296}
1297
8e794421
WL
1298static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1299{
1300 struct hpd_rx_irq_offload_work *offload_work;
1301 struct amdgpu_dm_connector *aconnector;
1302 struct dc_link *dc_link;
1303 struct amdgpu_device *adev;
1304 enum dc_connection_type new_connection_type = dc_connection_none;
1305 unsigned long flags;
1306
1307 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1308 aconnector = offload_work->offload_wq->aconnector;
1309
1310 if (!aconnector) {
1311 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1312 goto skip;
1313 }
1314
1315 adev = drm_to_adev(aconnector->base.dev);
1316 dc_link = aconnector->dc_link;
1317
1318 mutex_lock(&aconnector->hpd_lock);
1319 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1320 DRM_ERROR("KMS: Failed to detect connector\n");
1321 mutex_unlock(&aconnector->hpd_lock);
1322
1323 if (new_connection_type == dc_connection_none)
1324 goto skip;
1325
1326 if (amdgpu_in_reset(adev))
1327 goto skip;
1328
1329 mutex_lock(&adev->dm.dc_lock);
1330 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1331 dc_link_dp_handle_automated_test(dc_link);
1332 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1333 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1334 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1335 dc_link_dp_handle_link_loss(dc_link);
1336 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1337 offload_work->offload_wq->is_handling_link_loss = false;
1338 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1339 }
1340 mutex_unlock(&adev->dm.dc_lock);
1341
1342skip:
1343 kfree(offload_work);
1344
1345}
1346
1347static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1348{
1349 int max_caps = dc->caps.max_links;
1350 int i = 0;
1351 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1352
1353 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1354
1355 if (!hpd_rx_offload_wq)
1356 return NULL;
1357
1358
1359 for (i = 0; i < max_caps; i++) {
1360 hpd_rx_offload_wq[i].wq =
1361 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1362
1363 if (hpd_rx_offload_wq[i].wq == NULL) {
1364 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1365 return NULL;
1366 }
1367
1368 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1369 }
1370
1371 return hpd_rx_offload_wq;
1372}
1373
3ce51649
AD
1374struct amdgpu_stutter_quirk {
1375 u16 chip_vendor;
1376 u16 chip_device;
1377 u16 subsys_vendor;
1378 u16 subsys_device;
1379 u8 revision;
1380};
1381
1382static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1383 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1384 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1385 { 0, 0, 0, 0, 0 },
1386};
1387
1388static bool dm_should_disable_stutter(struct pci_dev *pdev)
1389{
1390 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1391
1392 while (p && p->chip_device != 0) {
1393 if (pdev->vendor == p->chip_vendor &&
1394 pdev->device == p->chip_device &&
1395 pdev->subsystem_vendor == p->subsys_vendor &&
1396 pdev->subsystem_device == p->subsys_device &&
1397 pdev->revision == p->revision) {
1398 return true;
1399 }
1400 ++p;
1401 }
1402 return false;
1403}
1404
57b9f338
FZ
1405static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1406 {
1407 .matches = {
1408 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1409 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1410 },
1411 },
1412 {
1413 .matches = {
1414 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1415 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1416 },
1417 },
1418 {
1419 .matches = {
1420 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1421 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1422 },
1423 },
1424 {}
1425};
1426
1427static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1428{
1429 const struct dmi_system_id *dmi_id;
1430
1431 dm->aux_hpd_discon_quirk = false;
1432
1433 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1434 if (dmi_id) {
1435 dm->aux_hpd_discon_quirk = true;
1436 DRM_INFO("aux_hpd_discon_quirk attached\n");
1437 }
1438}
1439
7578ecda 1440static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1441{
1442 struct dc_init_data init_data;
52704fca
BL
1443#ifdef CONFIG_DRM_AMD_DC_HDCP
1444 struct dc_callback_init init_params;
1445#endif
743b9786 1446 int r;
52704fca 1447
4a580877 1448 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1449 adev->dm.adev = adev;
1450
4562236b
HW
1451 /* Zero all the fields */
1452 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1453#ifdef CONFIG_DRM_AMD_DC_HDCP
1454 memset(&init_params, 0, sizeof(init_params));
1455#endif
4562236b 1456
674e78ac 1457 mutex_init(&adev->dm.dc_lock);
6ce8f316 1458 mutex_init(&adev->dm.audio_lock);
ea3b4242 1459 spin_lock_init(&adev->dm.vblank_lock);
674e78ac 1460
4562236b
HW
1461 if(amdgpu_dm_irq_init(adev)) {
1462 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1463 goto error;
1464 }
1465
1466 init_data.asic_id.chip_family = adev->family;
1467
2dc31ca1 1468 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1469 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1470 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1471
770d13b1 1472 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1473 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1474 init_data.asic_id.atombios_base_address =
1475 adev->mode_info.atom_context->bios;
1476
1477 init_data.driver = adev;
1478
1479 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1480
1481 if (!adev->dm.cgs_device) {
1482 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1483 goto error;
1484 }
1485
1486 init_data.cgs_device = adev->dm.cgs_device;
1487
4562236b
HW
1488 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1489
fd546bc5
AD
1490 switch (adev->ip_versions[DCE_HWIP][0]) {
1491 case IP_VERSION(2, 1, 0):
1492 switch (adev->dm.dmcub_fw_version) {
1493 case 0: /* development */
1494 case 0x1: /* linux-firmware.git hash 6d9f399 */
1495 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1496 init_data.flags.disable_dmcu = false;
1497 break;
1498 default:
1499 init_data.flags.disable_dmcu = true;
1500 }
1501 break;
1502 case IP_VERSION(2, 0, 3):
1503 init_data.flags.disable_dmcu = true;
1504 break;
1505 default:
1506 break;
1507 }
1508
60fb100b
AD
1509 switch (adev->asic_type) {
1510 case CHIP_CARRIZO:
1511 case CHIP_STONEY:
1ebcaebd
NK
1512 init_data.flags.gpu_vm_support = true;
1513 break;
60fb100b 1514 default:
1d789535 1515 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
1516 case IP_VERSION(1, 0, 0):
1517 case IP_VERSION(1, 0, 1):
a7f520bf
AD
1518 /* enable S/G on PCO and RV2 */
1519 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1520 (adev->apu_flags & AMD_APU_IS_PICASSO))
1521 init_data.flags.gpu_vm_support = true;
1522 break;
fd546bc5 1523 case IP_VERSION(2, 1, 0):
c08182f2
AD
1524 case IP_VERSION(3, 0, 1):
1525 case IP_VERSION(3, 1, 2):
1526 case IP_VERSION(3, 1, 3):
b5b8ed44 1527 case IP_VERSION(3, 1, 5):
0fe382fb 1528 case IP_VERSION(3, 1, 6):
c08182f2
AD
1529 init_data.flags.gpu_vm_support = true;
1530 break;
c08182f2
AD
1531 default:
1532 break;
1533 }
60fb100b
AD
1534 break;
1535 }
6e227308 1536
a7f520bf
AD
1537 if (init_data.flags.gpu_vm_support)
1538 adev->mode_info.gpu_vm_support = true;
1539
04b94af4
AD
1540 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1541 init_data.flags.fbc_support = true;
1542
d99f38ae
AD
1543 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1544 init_data.flags.multi_mon_pp_mclk_switch = true;
1545
eaf56410
LL
1546 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1547 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1548
1549 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1550 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1551
12320274
AP
1552 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1553 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1554 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1555 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
12320274 1556
7aba117a 1557 init_data.flags.seamless_boot_edp_requested = false;
78ad75f8 1558
1edf5ae1 1559 if (check_seamless_boot_capability(adev)) {
7aba117a 1560 init_data.flags.seamless_boot_edp_requested = true;
1edf5ae1
ZL
1561 init_data.flags.allow_seamless_boot_optimization = true;
1562 DRM_INFO("Seamless boot condition check passed\n");
1563 }
1564
a8201902
LM
1565 init_data.flags.enable_mipi_converter_optimization = true;
1566
e5028e9f
HW
1567 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1568
0dd79532 1569 INIT_LIST_HEAD(&adev->dm.da_list);
57b9f338
FZ
1570
1571 retrieve_dmi_info(&adev->dm);
1572
4562236b
HW
1573 /* Display Core create. */
1574 adev->dm.dc = dc_create(&init_data);
1575
423788c7 1576 if (adev->dm.dc) {
76121231 1577 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1578 } else {
76121231 1579 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1580 goto error;
1581 }
4562236b 1582
8a791dab
HW
1583 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1584 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1585 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1586 }
1587
f99d8762
HW
1588 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1589 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1590 if (dm_should_disable_stutter(adev->pdev))
1591 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1592
8a791dab
HW
1593 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1594 adev->dm.dc->debug.disable_stutter = true;
1595
2665f63a 1596 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1597 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1598 adev->dm.dc->debug.disable_dsc_edp = true;
1599 }
8a791dab
HW
1600
1601 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1602 adev->dm.dc->debug.disable_clock_gate = true;
1603
cfb979f7
AP
1604 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1605 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1606
743b9786
NK
1607 r = dm_dmub_hw_init(adev);
1608 if (r) {
1609 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1610 goto error;
1611 }
1612
bb6785c1
NK
1613 dc_hardware_init(adev->dm.dc);
1614
8e794421
WL
1615 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1616 if (!adev->dm.hpd_rx_offload_wq) {
1617 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1618 goto error;
1619 }
1620
3ca001af 1621 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1622 struct dc_phy_addr_space_config pa_config;
1623
0b08c54b 1624 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1625
0b08c54b
YZ
1626 // Call the DC init_memory func
1627 dc_setup_system_context(adev->dm.dc, &pa_config);
1628 }
c0fb85ae 1629
4562236b
HW
1630 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1631 if (!adev->dm.freesync_module) {
1632 DRM_ERROR(
1633 "amdgpu: failed to initialize freesync_module.\n");
1634 } else
f1ad2f5e 1635 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1636 adev->dm.freesync_module);
1637
e277adc5
LSL
1638 amdgpu_dm_init_color_mod();
1639
ea3b4242 1640 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1641 adev->dm.vblank_control_workqueue =
1642 create_singlethread_workqueue("dm_vblank_control_workqueue");
1643 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1644 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242 1645 }
ea3b4242 1646
52704fca 1647#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1648 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1649 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1650
96a3b32e
BL
1651 if (!adev->dm.hdcp_workqueue)
1652 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1653 else
1654 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1655
96a3b32e
BL
1656 dc_init_callbacks(adev->dm.dc, &init_params);
1657 }
9a65df19
WL
1658#endif
1659#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1660 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1661#endif
11d526f1 1662 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
81927e28
JS
1663 init_completion(&adev->dm.dmub_aux_transfer_done);
1664 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1665 if (!adev->dm.dmub_notify) {
1666 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1667 goto error;
1668 }
e27c41d5
JS
1669
1670 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1671 if (!adev->dm.delayed_hpd_wq) {
1672 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1673 goto error;
1674 }
1675
81927e28 1676 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1677 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1678 dmub_aux_setconfig_callback, false)) {
1679 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1680 goto error;
1681 }
1682 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1683 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1684 goto error;
1685 }
c40a09e5
NK
1686 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1687 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1688 goto error;
1689 }
81927e28
JS
1690 }
1691
4562236b
HW
1692 if (amdgpu_dm_initialize_drm_device(adev)) {
1693 DRM_ERROR(
1694 "amdgpu: failed to initialize sw for display support.\n");
1695 goto error;
1696 }
1697
11d526f1
SW
1698 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1699 * It is expected that DMUB will resend any pending notifications at this point, for
1700 * example HPD from DPIA.
1701 */
1702 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1703 dc_enable_dmub_outbox(adev->dm.dc);
1704
f74367e4
AD
1705 /* create fake encoders for MST */
1706 dm_dp_create_fake_mst_encoders(adev);
1707
4562236b
HW
1708 /* TODO: Add_display_info? */
1709
1710 /* TODO use dynamic cursor width */
4a580877
LT
1711 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1712 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1713
4a580877 1714 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1715 DRM_ERROR(
1716 "amdgpu: failed to initialize sw for display support.\n");
1717 goto error;
1718 }
1719
c0fb85ae 1720
f1ad2f5e 1721 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1722
1723 return 0;
1724error:
1725 amdgpu_dm_fini(adev);
1726
59d0f396 1727 return -EINVAL;
4562236b
HW
1728}
1729
e9669fb7
AG
1730static int amdgpu_dm_early_fini(void *handle)
1731{
1732 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1733
1734 amdgpu_dm_audio_fini(adev);
1735
1736 return 0;
1737}
1738
7578ecda 1739static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1740{
f74367e4
AD
1741 int i;
1742
09a5df6c
NK
1743 if (adev->dm.vblank_control_workqueue) {
1744 destroy_workqueue(adev->dm.vblank_control_workqueue);
1745 adev->dm.vblank_control_workqueue = NULL;
1746 }
09a5df6c 1747
f74367e4
AD
1748 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1749 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1750 }
1751
4562236b 1752 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1753
9a65df19
WL
1754#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1755 if (adev->dm.crc_rd_wrk) {
1756 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1757 kfree(adev->dm.crc_rd_wrk);
1758 adev->dm.crc_rd_wrk = NULL;
1759 }
1760#endif
52704fca
BL
1761#ifdef CONFIG_DRM_AMD_DC_HDCP
1762 if (adev->dm.hdcp_workqueue) {
e96b1b29 1763 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1764 adev->dm.hdcp_workqueue = NULL;
1765 }
1766
1767 if (adev->dm.dc)
1768 dc_deinit_callbacks(adev->dm.dc);
1769#endif
51ba6912 1770
3beac533 1771 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1772
81927e28
JS
1773 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1774 kfree(adev->dm.dmub_notify);
1775 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1776 destroy_workqueue(adev->dm.delayed_hpd_wq);
1777 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1778 }
1779
743b9786
NK
1780 if (adev->dm.dmub_bo)
1781 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1782 &adev->dm.dmub_bo_gpu_addr,
1783 &adev->dm.dmub_bo_cpu_addr);
52704fca 1784
006c26a0
AG
1785 if (adev->dm.hpd_rx_offload_wq) {
1786 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1787 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1788 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1789 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1790 }
1791 }
1792
1793 kfree(adev->dm.hpd_rx_offload_wq);
1794 adev->dm.hpd_rx_offload_wq = NULL;
1795 }
1796
c8bdf2b6
ED
1797 /* DC Destroy TODO: Replace destroy DAL */
1798 if (adev->dm.dc)
1799 dc_destroy(&adev->dm.dc);
4562236b
HW
1800 /*
1801 * TODO: pageflip, vlank interrupt
1802 *
1803 * amdgpu_dm_irq_fini(adev);
1804 */
1805
1806 if (adev->dm.cgs_device) {
1807 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1808 adev->dm.cgs_device = NULL;
1809 }
1810 if (adev->dm.freesync_module) {
1811 mod_freesync_destroy(adev->dm.freesync_module);
1812 adev->dm.freesync_module = NULL;
1813 }
674e78ac 1814
6ce8f316 1815 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1816 mutex_destroy(&adev->dm.dc_lock);
1817
4562236b
HW
1818 return;
1819}
1820
a94d5569 1821static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1822{
a7669aff 1823 const char *fw_name_dmcu = NULL;
a94d5569
DF
1824 int r;
1825 const struct dmcu_firmware_header_v1_0 *hdr;
1826
1827 switch(adev->asic_type) {
55e56389
MR
1828#if defined(CONFIG_DRM_AMD_DC_SI)
1829 case CHIP_TAHITI:
1830 case CHIP_PITCAIRN:
1831 case CHIP_VERDE:
1832 case CHIP_OLAND:
1833#endif
a94d5569
DF
1834 case CHIP_BONAIRE:
1835 case CHIP_HAWAII:
1836 case CHIP_KAVERI:
1837 case CHIP_KABINI:
1838 case CHIP_MULLINS:
1839 case CHIP_TONGA:
1840 case CHIP_FIJI:
1841 case CHIP_CARRIZO:
1842 case CHIP_STONEY:
1843 case CHIP_POLARIS11:
1844 case CHIP_POLARIS10:
1845 case CHIP_POLARIS12:
1846 case CHIP_VEGAM:
1847 case CHIP_VEGA10:
1848 case CHIP_VEGA12:
1849 case CHIP_VEGA20:
1850 return 0;
5ea23931
RL
1851 case CHIP_NAVI12:
1852 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1853 break;
a94d5569 1854 case CHIP_RAVEN:
a7669aff
HW
1855 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1856 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1857 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1858 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1859 else
a7669aff 1860 return 0;
a94d5569
DF
1861 break;
1862 default:
1d789535 1863 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1864 case IP_VERSION(2, 0, 2):
1865 case IP_VERSION(2, 0, 3):
1866 case IP_VERSION(2, 0, 0):
1867 case IP_VERSION(2, 1, 0):
1868 case IP_VERSION(3, 0, 0):
1869 case IP_VERSION(3, 0, 2):
1870 case IP_VERSION(3, 0, 3):
1871 case IP_VERSION(3, 0, 1):
1872 case IP_VERSION(3, 1, 2):
1873 case IP_VERSION(3, 1, 3):
b5b8ed44 1874 case IP_VERSION(3, 1, 5):
de7cc1b4 1875 case IP_VERSION(3, 1, 6):
577359ca
AP
1876 case IP_VERSION(3, 2, 0):
1877 case IP_VERSION(3, 2, 1):
c08182f2
AD
1878 return 0;
1879 default:
1880 break;
1881 }
a94d5569 1882 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1883 return -EINVAL;
a94d5569
DF
1884 }
1885
1886 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1887 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1888 return 0;
1889 }
1890
1891 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1892 if (r == -ENOENT) {
1893 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1894 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1895 adev->dm.fw_dmcu = NULL;
1896 return 0;
1897 }
1898 if (r) {
1899 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1900 fw_name_dmcu);
1901 return r;
1902 }
1903
1904 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1905 if (r) {
1906 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1907 fw_name_dmcu);
1908 release_firmware(adev->dm.fw_dmcu);
1909 adev->dm.fw_dmcu = NULL;
1910 return r;
1911 }
1912
1913 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1914 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1915 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1916 adev->firmware.fw_size +=
1917 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1918
1919 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1920 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1921 adev->firmware.fw_size +=
1922 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1923
ee6e89c0
DF
1924 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1925
a94d5569
DF
1926 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1927
4562236b
HW
1928 return 0;
1929}
1930
743b9786
NK
1931static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1932{
1933 struct amdgpu_device *adev = ctx;
1934
1935 return dm_read_reg(adev->dm.dc->ctx, address);
1936}
1937
1938static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1939 uint32_t value)
1940{
1941 struct amdgpu_device *adev = ctx;
1942
1943 return dm_write_reg(adev->dm.dc->ctx, address, value);
1944}
1945
1946static int dm_dmub_sw_init(struct amdgpu_device *adev)
1947{
1948 struct dmub_srv_create_params create_params;
8c7aea40
NK
1949 struct dmub_srv_region_params region_params;
1950 struct dmub_srv_region_info region_info;
1951 struct dmub_srv_fb_params fb_params;
1952 struct dmub_srv_fb_info *fb_info;
1953 struct dmub_srv *dmub_srv;
743b9786
NK
1954 const struct dmcub_firmware_header_v1_0 *hdr;
1955 const char *fw_name_dmub;
1956 enum dmub_asic dmub_asic;
1957 enum dmub_status status;
1958 int r;
1959
1d789535 1960 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1961 case IP_VERSION(2, 1, 0):
743b9786
NK
1962 dmub_asic = DMUB_ASIC_DCN21;
1963 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1964 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1965 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1966 break;
c08182f2 1967 case IP_VERSION(3, 0, 0):
1d789535 1968 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1969 dmub_asic = DMUB_ASIC_DCN30;
1970 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1971 } else {
1972 dmub_asic = DMUB_ASIC_DCN30;
1973 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1974 }
79037324 1975 break;
c08182f2 1976 case IP_VERSION(3, 0, 1):
469989ca
RL
1977 dmub_asic = DMUB_ASIC_DCN301;
1978 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1979 break;
c08182f2 1980 case IP_VERSION(3, 0, 2):
2a411205
BL
1981 dmub_asic = DMUB_ASIC_DCN302;
1982 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1983 break;
c08182f2 1984 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1985 dmub_asic = DMUB_ASIC_DCN303;
1986 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1987 break;
c08182f2
AD
1988 case IP_VERSION(3, 1, 2):
1989 case IP_VERSION(3, 1, 3):
3137f792 1990 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1991 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1992 break;
b5b8ed44
QZ
1993 case IP_VERSION(3, 1, 5):
1994 dmub_asic = DMUB_ASIC_DCN315;
1995 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1996 break;
de7cc1b4 1997 case IP_VERSION(3, 1, 6):
868f4357 1998 dmub_asic = DMUB_ASIC_DCN316;
de7cc1b4
PL
1999 fw_name_dmub = FIRMWARE_DCN316_DMUB;
2000 break;
577359ca
AP
2001 case IP_VERSION(3, 2, 0):
2002 dmub_asic = DMUB_ASIC_DCN32;
2003 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2004 break;
2005 case IP_VERSION(3, 2, 1):
2006 dmub_asic = DMUB_ASIC_DCN321;
2007 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2008 break;
743b9786
NK
2009 default:
2010 /* ASIC doesn't support DMUB. */
2011 return 0;
2012 }
2013
743b9786
NK
2014 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2015 if (r) {
2016 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2017 return 0;
2018 }
2019
2020 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2021 if (r) {
2022 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2023 return 0;
2024 }
2025
743b9786 2026 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 2027 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 2028
9a6ed547
NK
2029 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2030 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2031 AMDGPU_UCODE_ID_DMCUB;
2032 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2033 adev->dm.dmub_fw;
2034 adev->firmware.fw_size +=
2035 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 2036
9a6ed547
NK
2037 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2038 adev->dm.dmcub_fw_version);
2039 }
2040
743b9786 2041
8c7aea40
NK
2042 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2043 dmub_srv = adev->dm.dmub_srv;
2044
2045 if (!dmub_srv) {
2046 DRM_ERROR("Failed to allocate DMUB service!\n");
2047 return -ENOMEM;
2048 }
2049
2050 memset(&create_params, 0, sizeof(create_params));
2051 create_params.user_ctx = adev;
2052 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2053 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2054 create_params.asic = dmub_asic;
2055
2056 /* Create the DMUB service. */
2057 status = dmub_srv_create(dmub_srv, &create_params);
2058 if (status != DMUB_STATUS_OK) {
2059 DRM_ERROR("Error creating DMUB service: %d\n", status);
2060 return -EINVAL;
2061 }
2062
2063 /* Calculate the size of all the regions for the DMUB service. */
2064 memset(&region_params, 0, sizeof(region_params));
2065
2066 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2067 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2068 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2069 region_params.vbios_size = adev->bios_size;
0922b899 2070 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
2071 adev->dm.dmub_fw->data +
2072 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 2073 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
2074 region_params.fw_inst_const =
2075 adev->dm.dmub_fw->data +
2076 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2077 PSP_HEADER_BYTES;
8c7aea40
NK
2078
2079 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2080 &region_info);
2081
2082 if (status != DMUB_STATUS_OK) {
2083 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2084 return -EINVAL;
2085 }
2086
2087 /*
2088 * Allocate a framebuffer based on the total size of all the regions.
2089 * TODO: Move this into GART.
2090 */
2091 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2092 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2093 &adev->dm.dmub_bo_gpu_addr,
2094 &adev->dm.dmub_bo_cpu_addr);
2095 if (r)
2096 return r;
2097
2098 /* Rebase the regions on the framebuffer address. */
2099 memset(&fb_params, 0, sizeof(fb_params));
2100 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2101 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2102 fb_params.region_info = &region_info;
2103
2104 adev->dm.dmub_fb_info =
2105 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2106 fb_info = adev->dm.dmub_fb_info;
2107
2108 if (!fb_info) {
2109 DRM_ERROR(
2110 "Failed to allocate framebuffer info for DMUB service!\n");
2111 return -ENOMEM;
2112 }
2113
2114 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2115 if (status != DMUB_STATUS_OK) {
2116 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2117 return -EINVAL;
2118 }
2119
743b9786
NK
2120 return 0;
2121}
2122
a94d5569
DF
2123static int dm_sw_init(void *handle)
2124{
2125 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2126 int r;
2127
2128 r = dm_dmub_sw_init(adev);
2129 if (r)
2130 return r;
a94d5569
DF
2131
2132 return load_dmcu_fw(adev);
2133}
2134
4562236b
HW
2135static int dm_sw_fini(void *handle)
2136{
a94d5569
DF
2137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2138
8c7aea40
NK
2139 kfree(adev->dm.dmub_fb_info);
2140 adev->dm.dmub_fb_info = NULL;
2141
743b9786
NK
2142 if (adev->dm.dmub_srv) {
2143 dmub_srv_destroy(adev->dm.dmub_srv);
2144 adev->dm.dmub_srv = NULL;
2145 }
2146
75e1658e
ND
2147 release_firmware(adev->dm.dmub_fw);
2148 adev->dm.dmub_fw = NULL;
743b9786 2149
75e1658e
ND
2150 release_firmware(adev->dm.fw_dmcu);
2151 adev->dm.fw_dmcu = NULL;
a94d5569 2152
4562236b
HW
2153 return 0;
2154}
2155
7abcf6b5 2156static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2157{
c84dec2f 2158 struct amdgpu_dm_connector *aconnector;
4562236b 2159 struct drm_connector *connector;
f8d2d39e 2160 struct drm_connector_list_iter iter;
7abcf6b5 2161 int ret = 0;
4562236b 2162
f8d2d39e
LP
2163 drm_connector_list_iter_begin(dev, &iter);
2164 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2165 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2166 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2167 aconnector->mst_mgr.aux) {
f1ad2f5e 2168 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2169 aconnector,
2170 aconnector->base.base.id);
7abcf6b5
AG
2171
2172 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2173 if (ret < 0) {
2174 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2175 aconnector->dc_link->type =
2176 dc_connection_single;
2177 break;
7abcf6b5 2178 }
f8d2d39e 2179 }
4562236b 2180 }
f8d2d39e 2181 drm_connector_list_iter_end(&iter);
4562236b 2182
7abcf6b5
AG
2183 return ret;
2184}
2185
2186static int dm_late_init(void *handle)
2187{
42e67c3b 2188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2189
bbf854dc
DF
2190 struct dmcu_iram_parameters params;
2191 unsigned int linear_lut[16];
2192 int i;
17bdb4a8 2193 struct dmcu *dmcu = NULL;
bbf854dc 2194
17bdb4a8
JFZ
2195 dmcu = adev->dm.dc->res_pool->dmcu;
2196
bbf854dc
DF
2197 for (i = 0; i < 16; i++)
2198 linear_lut[i] = 0xFFFF * i / 15;
2199
2200 params.set = 0;
75068994 2201 params.backlight_ramping_override = false;
bbf854dc
DF
2202 params.backlight_ramping_start = 0xCCCC;
2203 params.backlight_ramping_reduction = 0xCCCCCCCC;
2204 params.backlight_lut_array_size = 16;
2205 params.backlight_lut_array = linear_lut;
2206
2ad0cdf9
AK
2207 /* Min backlight level after ABM reduction, Don't allow below 1%
2208 * 0xFFFF x 0.01 = 0x28F
2209 */
2210 params.min_abm_backlight = 0x28F;
5cb32419 2211 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2212 * dmcu object will be null.
2213 * ABM 2.4 and up are implemented on dmcub.
2214 */
2215 if (dmcu) {
2216 if (!dmcu_load_iram(dmcu, params))
2217 return -EINVAL;
2218 } else if (adev->dm.dc->ctx->dmub_srv) {
2219 struct dc_link *edp_links[MAX_NUM_EDP];
2220 int edp_num;
bbf854dc 2221
6e568e43
JW
2222 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2223 for (i = 0; i < edp_num; i++) {
2224 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2225 return -EINVAL;
2226 }
2227 }
bbf854dc 2228
4a580877 2229 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2230}
2231
2232static void s3_handle_mst(struct drm_device *dev, bool suspend)
2233{
c84dec2f 2234 struct amdgpu_dm_connector *aconnector;
4562236b 2235 struct drm_connector *connector;
f8d2d39e 2236 struct drm_connector_list_iter iter;
fe7553be
LP
2237 struct drm_dp_mst_topology_mgr *mgr;
2238 int ret;
2239 bool need_hotplug = false;
4562236b 2240
f8d2d39e
LP
2241 drm_connector_list_iter_begin(dev, &iter);
2242 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2243 aconnector = to_amdgpu_dm_connector(connector);
2244 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2245 aconnector->mst_port)
2246 continue;
2247
2248 mgr = &aconnector->mst_mgr;
2249
2250 if (suspend) {
2251 drm_dp_mst_topology_mgr_suspend(mgr);
2252 } else {
6f85f738 2253 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be 2254 if (ret < 0) {
84a8b390
WL
2255 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2256 aconnector->dc_link);
fe7553be
LP
2257 need_hotplug = true;
2258 }
2259 }
4562236b 2260 }
f8d2d39e 2261 drm_connector_list_iter_end(&iter);
fe7553be
LP
2262
2263 if (need_hotplug)
2264 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2265}
2266
9340dfd3
HW
2267static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2268{
9340dfd3
HW
2269 int ret = 0;
2270
9340dfd3
HW
2271 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2272 * on window driver dc implementation.
2273 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2274 * should be passed to smu during boot up and resume from s3.
2275 * boot up: dc calculate dcn watermark clock settings within dc_create,
2276 * dcn20_resource_construct
2277 * then call pplib functions below to pass the settings to smu:
2278 * smu_set_watermarks_for_clock_ranges
2279 * smu_set_watermarks_table
2280 * navi10_set_watermarks_table
2281 * smu_write_watermarks_table
2282 *
2283 * For Renoir, clock settings of dcn watermark are also fixed values.
2284 * dc has implemented different flow for window driver:
2285 * dc_hardware_init / dc_set_power_state
2286 * dcn10_init_hw
2287 * notify_wm_ranges
2288 * set_wm_ranges
2289 * -- Linux
2290 * smu_set_watermarks_for_clock_ranges
2291 * renoir_set_watermarks_table
2292 * smu_write_watermarks_table
2293 *
2294 * For Linux,
2295 * dc_hardware_init -> amdgpu_dm_init
2296 * dc_set_power_state --> dm_resume
2297 *
2298 * therefore, this function apply to navi10/12/14 but not Renoir
2299 * *
2300 */
1d789535 2301 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2302 case IP_VERSION(2, 0, 2):
2303 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2304 break;
2305 default:
2306 return 0;
2307 }
2308
13f5dbd6 2309 ret = amdgpu_dpm_write_watermarks_table(adev);
e7a95eea
EQ
2310 if (ret) {
2311 DRM_ERROR("Failed to update WMTABLE!\n");
2312 return ret;
9340dfd3
HW
2313 }
2314
9340dfd3
HW
2315 return 0;
2316}
2317
b8592b48
LL
2318/**
2319 * dm_hw_init() - Initialize DC device
28d687ea 2320 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2321 *
2322 * Initialize the &struct amdgpu_display_manager device. This involves calling
2323 * the initializers of each DM component, then populating the struct with them.
2324 *
2325 * Although the function implies hardware initialization, both hardware and
2326 * software are initialized here. Splitting them out to their relevant init
2327 * hooks is a future TODO item.
2328 *
2329 * Some notable things that are initialized here:
2330 *
2331 * - Display Core, both software and hardware
2332 * - DC modules that we need (freesync and color management)
2333 * - DRM software states
2334 * - Interrupt sources and handlers
2335 * - Vblank support
2336 * - Debug FS entries, if enabled
2337 */
4562236b
HW
2338static int dm_hw_init(void *handle)
2339{
2340 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2341 /* Create DAL display manager */
2342 amdgpu_dm_init(adev);
4562236b
HW
2343 amdgpu_dm_hpd_init(adev);
2344
4562236b
HW
2345 return 0;
2346}
2347
b8592b48
LL
2348/**
2349 * dm_hw_fini() - Teardown DC device
28d687ea 2350 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2351 *
2352 * Teardown components within &struct amdgpu_display_manager that require
2353 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2354 * were loaded. Also flush IRQ workqueues and disable them.
2355 */
4562236b
HW
2356static int dm_hw_fini(void *handle)
2357{
2358 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2359
2360 amdgpu_dm_hpd_fini(adev);
2361
2362 amdgpu_dm_irq_fini(adev);
21de3396 2363 amdgpu_dm_fini(adev);
4562236b
HW
2364 return 0;
2365}
2366
cdaae837
BL
2367
2368static int dm_enable_vblank(struct drm_crtc *crtc);
2369static void dm_disable_vblank(struct drm_crtc *crtc);
2370
2371static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2372 struct dc_state *state, bool enable)
2373{
2374 enum dc_irq_source irq_source;
2375 struct amdgpu_crtc *acrtc;
2376 int rc = -EBUSY;
2377 int i = 0;
2378
2379 for (i = 0; i < state->stream_count; i++) {
2380 acrtc = get_crtc_by_otg_inst(
2381 adev, state->stream_status[i].primary_otg_inst);
2382
2383 if (acrtc && state->stream_status[i].plane_count != 0) {
2384 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2385 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2386 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2387 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2388 if (rc)
2389 DRM_WARN("Failed to %s pflip interrupts\n",
2390 enable ? "enable" : "disable");
2391
2392 if (enable) {
2393 rc = dm_enable_vblank(&acrtc->base);
2394 if (rc)
2395 DRM_WARN("Failed to enable vblank interrupts\n");
2396 } else {
2397 dm_disable_vblank(&acrtc->base);
2398 }
2399
2400 }
2401 }
2402
2403}
2404
dfd84d90 2405static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2406{
2407 struct dc_state *context = NULL;
2408 enum dc_status res = DC_ERROR_UNEXPECTED;
2409 int i;
2410 struct dc_stream_state *del_streams[MAX_PIPES];
2411 int del_streams_count = 0;
2412
2413 memset(del_streams, 0, sizeof(del_streams));
2414
2415 context = dc_create_state(dc);
2416 if (context == NULL)
2417 goto context_alloc_fail;
2418
2419 dc_resource_state_copy_construct_current(dc, context);
2420
2421 /* First remove from context all streams */
2422 for (i = 0; i < context->stream_count; i++) {
2423 struct dc_stream_state *stream = context->streams[i];
2424
2425 del_streams[del_streams_count++] = stream;
2426 }
2427
2428 /* Remove all planes for removed streams and then remove the streams */
2429 for (i = 0; i < del_streams_count; i++) {
2430 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2431 res = DC_FAIL_DETACH_SURFACES;
2432 goto fail;
2433 }
2434
2435 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2436 if (res != DC_OK)
2437 goto fail;
2438 }
2439
cdaae837
BL
2440 res = dc_commit_state(dc, context);
2441
2442fail:
2443 dc_release_state(context);
2444
2445context_alloc_fail:
2446 return res;
2447}
2448
8e794421
WL
2449static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2450{
2451 int i;
2452
2453 if (dm->hpd_rx_offload_wq) {
2454 for (i = 0; i < dm->dc->caps.max_links; i++)
2455 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2456 }
2457}
2458
4562236b
HW
2459static int dm_suspend(void *handle)
2460{
2461 struct amdgpu_device *adev = handle;
2462 struct amdgpu_display_manager *dm = &adev->dm;
2463 int ret = 0;
4562236b 2464
53b3f8f4 2465 if (amdgpu_in_reset(adev)) {
cdaae837 2466 mutex_lock(&dm->dc_lock);
98ab5f35 2467
98ab5f35 2468 dc_allow_idle_optimizations(adev->dm.dc, false);
98ab5f35 2469
cdaae837
BL
2470 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2471
2472 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2473
2474 amdgpu_dm_commit_zero_streams(dm->dc);
2475
2476 amdgpu_dm_irq_suspend(adev);
2477
8e794421
WL
2478 hpd_rx_irq_work_suspend(dm);
2479
cdaae837
BL
2480 return ret;
2481 }
4562236b 2482
d2f0b53b 2483 WARN_ON(adev->dm.cached_state);
4a580877 2484 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2485
4a580877 2486 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2487
4562236b
HW
2488 amdgpu_dm_irq_suspend(adev);
2489
8e794421
WL
2490 hpd_rx_irq_work_suspend(dm);
2491
32f5062d 2492 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2493
1c2075d4 2494 return 0;
4562236b
HW
2495}
2496
17ce8a69 2497struct amdgpu_dm_connector *
1daf8c63
AD
2498amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2499 struct drm_crtc *crtc)
4562236b
HW
2500{
2501 uint32_t i;
c2cea706 2502 struct drm_connector_state *new_con_state;
4562236b
HW
2503 struct drm_connector *connector;
2504 struct drm_crtc *crtc_from_state;
2505
c2cea706
LSL
2506 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2507 crtc_from_state = new_con_state->crtc;
4562236b
HW
2508
2509 if (crtc_from_state == crtc)
c84dec2f 2510 return to_amdgpu_dm_connector(connector);
4562236b
HW
2511 }
2512
2513 return NULL;
2514}
2515
fbbdadf2
BL
2516static void emulated_link_detect(struct dc_link *link)
2517{
2518 struct dc_sink_init_data sink_init_data = { 0 };
2519 struct display_sink_capability sink_caps = { 0 };
2520 enum dc_edid_status edid_status;
2521 struct dc_context *dc_ctx = link->ctx;
2522 struct dc_sink *sink = NULL;
2523 struct dc_sink *prev_sink = NULL;
2524
2525 link->type = dc_connection_none;
2526 prev_sink = link->local_sink;
2527
30164a16
VL
2528 if (prev_sink)
2529 dc_sink_release(prev_sink);
fbbdadf2
BL
2530
2531 switch (link->connector_signal) {
2532 case SIGNAL_TYPE_HDMI_TYPE_A: {
2533 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2534 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2535 break;
2536 }
2537
2538 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2539 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2540 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2541 break;
2542 }
2543
2544 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2545 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2546 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2547 break;
2548 }
2549
2550 case SIGNAL_TYPE_LVDS: {
2551 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2552 sink_caps.signal = SIGNAL_TYPE_LVDS;
2553 break;
2554 }
2555
2556 case SIGNAL_TYPE_EDP: {
2557 sink_caps.transaction_type =
2558 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2559 sink_caps.signal = SIGNAL_TYPE_EDP;
2560 break;
2561 }
2562
2563 case SIGNAL_TYPE_DISPLAY_PORT: {
2564 sink_caps.transaction_type =
2565 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2566 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2567 break;
2568 }
2569
2570 default:
2571 DC_ERROR("Invalid connector type! signal:%d\n",
2572 link->connector_signal);
2573 return;
2574 }
2575
2576 sink_init_data.link = link;
2577 sink_init_data.sink_signal = sink_caps.signal;
2578
2579 sink = dc_sink_create(&sink_init_data);
2580 if (!sink) {
2581 DC_ERROR("Failed to create sink!\n");
2582 return;
2583 }
2584
dcd5fb82 2585 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2586 link->local_sink = sink;
2587
2588 edid_status = dm_helpers_read_local_edid(
2589 link->ctx,
2590 link,
2591 sink);
2592
2593 if (edid_status != EDID_OK)
2594 DC_ERROR("Failed to read EDID");
2595
2596}
2597
cdaae837
BL
2598static void dm_gpureset_commit_state(struct dc_state *dc_state,
2599 struct amdgpu_display_manager *dm)
2600{
2601 struct {
2602 struct dc_surface_update surface_updates[MAX_SURFACES];
2603 struct dc_plane_info plane_infos[MAX_SURFACES];
2604 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2605 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2606 struct dc_stream_update stream_update;
2607 } * bundle;
2608 int k, m;
2609
2610 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2611
2612 if (!bundle) {
2613 dm_error("Failed to allocate update bundle\n");
2614 goto cleanup;
2615 }
2616
2617 for (k = 0; k < dc_state->stream_count; k++) {
2618 bundle->stream_update.stream = dc_state->streams[k];
2619
2620 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2621 bundle->surface_updates[m].surface =
2622 dc_state->stream_status->plane_states[m];
2623 bundle->surface_updates[m].surface->force_full_update =
2624 true;
2625 }
2626 dc_commit_updates_for_stream(
2627 dm->dc, bundle->surface_updates,
2628 dc_state->stream_status->plane_count,
efc8278e 2629 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2630 }
2631
2632cleanup:
2633 kfree(bundle);
2634
2635 return;
2636}
2637
4562236b
HW
2638static int dm_resume(void *handle)
2639{
2640 struct amdgpu_device *adev = handle;
4a580877 2641 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2642 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2643 struct amdgpu_dm_connector *aconnector;
4562236b 2644 struct drm_connector *connector;
f8d2d39e 2645 struct drm_connector_list_iter iter;
4562236b 2646 struct drm_crtc *crtc;
c2cea706 2647 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2648 struct dm_crtc_state *dm_new_crtc_state;
2649 struct drm_plane *plane;
2650 struct drm_plane_state *new_plane_state;
2651 struct dm_plane_state *dm_new_plane_state;
113b7a01 2652 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2653 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2654 struct dc_state *dc_state;
2655 int i, r, j;
4562236b 2656
53b3f8f4 2657 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2658 dc_state = dm->cached_dc_state;
2659
6d63fcc2
NK
2660 /*
2661 * The dc->current_state is backed up into dm->cached_dc_state
2662 * before we commit 0 streams.
2663 *
2664 * DC will clear link encoder assignments on the real state
2665 * but the changes won't propagate over to the copy we made
2666 * before the 0 streams commit.
2667 *
2668 * DC expects that link encoder assignments are *not* valid
32685b32
NK
2669 * when committing a state, so as a workaround we can copy
2670 * off of the current state.
2671 *
2672 * We lose the previous assignments, but we had already
2673 * commit 0 streams anyway.
6d63fcc2 2674 */
32685b32 2675 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
6d63fcc2 2676
cdaae837
BL
2677 r = dm_dmub_hw_init(adev);
2678 if (r)
2679 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2680
2681 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2682 dc_resume(dm->dc);
2683
2684 amdgpu_dm_irq_resume_early(adev);
2685
2686 for (i = 0; i < dc_state->stream_count; i++) {
2687 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2688 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2689 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2690 = 0xffffffff;
2691 }
2692 }
2693
11d526f1
SW
2694 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2695 amdgpu_dm_outbox_init(adev);
2696 dc_enable_dmub_outbox(adev->dm.dc);
2697 }
2698
cdaae837 2699 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2700
cdaae837
BL
2701 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2702
2703 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2704
2705 dc_release_state(dm->cached_dc_state);
2706 dm->cached_dc_state = NULL;
2707
2708 amdgpu_dm_irq_resume_late(adev);
2709
2710 mutex_unlock(&dm->dc_lock);
2711
2712 return 0;
2713 }
113b7a01
LL
2714 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2715 dc_release_state(dm_state->context);
2716 dm_state->context = dc_create_state(dm->dc);
2717 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2718 dc_resource_state_construct(dm->dc, dm_state->context);
2719
8c7aea40 2720 /* Before powering on DC we need to re-initialize DMUB. */
79d6b935 2721 dm_dmub_hw_resume(adev);
8c7aea40 2722
11d526f1
SW
2723 /* Re-enable outbox interrupts for DPIA. */
2724 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2725 amdgpu_dm_outbox_init(adev);
2726 dc_enable_dmub_outbox(adev->dm.dc);
2727 }
2728
a80aa93d
ML
2729 /* power on hardware */
2730 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2731
4562236b
HW
2732 /* program HPD filter */
2733 dc_resume(dm->dc);
2734
4562236b
HW
2735 /*
2736 * early enable HPD Rx IRQ, should be done before set mode as short
2737 * pulse interrupts are used for MST
2738 */
2739 amdgpu_dm_irq_resume_early(adev);
2740
d20ebea8 2741 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2742 s3_handle_mst(ddev, false);
2743
4562236b 2744 /* Do detection*/
f8d2d39e
LP
2745 drm_connector_list_iter_begin(ddev, &iter);
2746 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2747 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2748
2749 /*
2750 * this is the case when traversing through already created
2751 * MST connectors, should be skipped
2752 */
f4346fb3
RL
2753 if (aconnector->dc_link &&
2754 aconnector->dc_link->type == dc_connection_mst_branch)
4562236b
HW
2755 continue;
2756
03ea364c 2757 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2758 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2759 DRM_ERROR("KMS: Failed to detect connector\n");
2760
15c735e7 2761 if (aconnector->base.force && new_connection_type == dc_connection_none) {
fbbdadf2 2762 emulated_link_detect(aconnector->dc_link);
15c735e7
WL
2763 } else {
2764 mutex_lock(&dm->dc_lock);
fbbdadf2 2765 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
15c735e7
WL
2766 mutex_unlock(&dm->dc_lock);
2767 }
3eb4eba4
RL
2768
2769 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2770 aconnector->fake_enable = false;
2771
dcd5fb82
MF
2772 if (aconnector->dc_sink)
2773 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2774 aconnector->dc_sink = NULL;
2775 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2776 mutex_unlock(&aconnector->hpd_lock);
4562236b 2777 }
f8d2d39e 2778 drm_connector_list_iter_end(&iter);
4562236b 2779
1f6010a9 2780 /* Force mode set in atomic commit */
a80aa93d 2781 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2782 new_crtc_state->active_changed = true;
4f346e65 2783
fcb4019e
LSL
2784 /*
2785 * atomic_check is expected to create the dc states. We need to release
2786 * them here, since they were duplicated as part of the suspend
2787 * procedure.
2788 */
a80aa93d 2789 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2790 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2791 if (dm_new_crtc_state->stream) {
2792 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2793 dc_stream_release(dm_new_crtc_state->stream);
2794 dm_new_crtc_state->stream = NULL;
2795 }
2796 }
2797
a80aa93d 2798 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2799 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2800 if (dm_new_plane_state->dc_state) {
2801 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2802 dc_plane_state_release(dm_new_plane_state->dc_state);
2803 dm_new_plane_state->dc_state = NULL;
2804 }
2805 }
2806
2d1af6a1 2807 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2808
a80aa93d 2809 dm->cached_state = NULL;
0a214e2f 2810
9faa4237 2811 amdgpu_dm_irq_resume_late(adev);
4562236b 2812
9340dfd3
HW
2813 amdgpu_dm_smu_write_watermarks_table(adev);
2814
2d1af6a1 2815 return 0;
4562236b
HW
2816}
2817
b8592b48
LL
2818/**
2819 * DOC: DM Lifecycle
2820 *
2821 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2822 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2823 * the base driver's device list to be initialized and torn down accordingly.
2824 *
2825 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2826 */
2827
4562236b
HW
2828static const struct amd_ip_funcs amdgpu_dm_funcs = {
2829 .name = "dm",
2830 .early_init = dm_early_init,
7abcf6b5 2831 .late_init = dm_late_init,
4562236b
HW
2832 .sw_init = dm_sw_init,
2833 .sw_fini = dm_sw_fini,
e9669fb7 2834 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2835 .hw_init = dm_hw_init,
2836 .hw_fini = dm_hw_fini,
2837 .suspend = dm_suspend,
2838 .resume = dm_resume,
2839 .is_idle = dm_is_idle,
2840 .wait_for_idle = dm_wait_for_idle,
2841 .check_soft_reset = dm_check_soft_reset,
2842 .soft_reset = dm_soft_reset,
2843 .set_clockgating_state = dm_set_clockgating_state,
2844 .set_powergating_state = dm_set_powergating_state,
2845};
2846
2847const struct amdgpu_ip_block_version dm_ip_block =
2848{
2849 .type = AMD_IP_BLOCK_TYPE_DCE,
2850 .major = 1,
2851 .minor = 0,
2852 .rev = 0,
2853 .funcs = &amdgpu_dm_funcs,
2854};
2855
ca3268c4 2856
b8592b48
LL
2857/**
2858 * DOC: atomic
2859 *
2860 * *WIP*
2861 */
0a323b84 2862
b3663f70 2863static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2864 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2865 .get_format_info = amd_get_format_info,
366c1baa 2866 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2867 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2868 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2869};
2870
2871static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2872 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2873};
2874
94562810
RS
2875static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2876{
d8791dc7 2877 u32 max_avg, min_cll, max, min, q, r;
94562810
RS
2878 struct amdgpu_dm_backlight_caps *caps;
2879 struct amdgpu_display_manager *dm;
2880 struct drm_connector *conn_base;
2881 struct amdgpu_device *adev;
ec11fe37 2882 struct dc_link *link = NULL;
94562810
RS
2883 static const u8 pre_computed_values[] = {
2884 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2885 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2886 int i;
94562810
RS
2887
2888 if (!aconnector || !aconnector->dc_link)
2889 return;
2890
ec11fe37 2891 link = aconnector->dc_link;
2892 if (link->connector_signal != SIGNAL_TYPE_EDP)
2893 return;
2894
94562810 2895 conn_base = &aconnector->base;
1348969a 2896 adev = drm_to_adev(conn_base->dev);
94562810 2897 dm = &adev->dm;
7fd13bae
AD
2898 for (i = 0; i < dm->num_of_edps; i++) {
2899 if (link == dm->backlight_link[i])
2900 break;
2901 }
2902 if (i >= dm->num_of_edps)
2903 return;
2904 caps = &dm->backlight_caps[i];
94562810
RS
2905 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2906 caps->aux_support = false;
d8791dc7 2907 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
94562810
RS
2908 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2909
d0ae0b64 2910 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2911 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2912 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2913 caps->aux_support = true;
2914
7a46f05e
TI
2915 if (amdgpu_backlight == 0)
2916 caps->aux_support = false;
2917 else if (amdgpu_backlight == 1)
2918 caps->aux_support = true;
2919
94562810
RS
2920 /* From the specification (CTA-861-G), for calculating the maximum
2921 * luminance we need to use:
2922 * Luminance = 50*2**(CV/32)
2923 * Where CV is a one-byte value.
2924 * For calculating this expression we may need float point precision;
2925 * to avoid this complexity level, we take advantage that CV is divided
2926 * by a constant. From the Euclids division algorithm, we know that CV
2927 * can be written as: CV = 32*q + r. Next, we replace CV in the
2928 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2929 * need to pre-compute the value of r/32. For pre-computing the values
2930 * We just used the following Ruby line:
2931 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2932 * The results of the above expressions can be verified at
2933 * pre_computed_values.
2934 */
d8791dc7
RL
2935 q = max_avg >> 5;
2936 r = max_avg % 32;
94562810
RS
2937 max = (1 << q) * pre_computed_values[r];
2938
2939 // min luminance: maxLum * (CV/255)^2 / 100
2940 q = DIV_ROUND_CLOSEST(min_cll, 255);
2941 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2942
2943 caps->aux_max_input_signal = max;
2944 caps->aux_min_input_signal = min;
2945}
2946
97e51c16
HW
2947void amdgpu_dm_update_connector_after_detect(
2948 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2949{
2950 struct drm_connector *connector = &aconnector->base;
2951 struct drm_device *dev = connector->dev;
b73a22d3 2952 struct dc_sink *sink;
4562236b
HW
2953
2954 /* MST handled by drm_mst framework */
2955 if (aconnector->mst_mgr.mst_state == true)
2956 return;
2957
4562236b 2958 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2959 if (sink)
2960 dc_sink_retain(sink);
4562236b 2961
1f6010a9
DF
2962 /*
2963 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2964 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2965 * Skip if already done during boot.
4562236b
HW
2966 */
2967 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2968 && aconnector->dc_em_sink) {
2969
1f6010a9
DF
2970 /*
2971 * For S3 resume with headless use eml_sink to fake stream
2972 * because on resume connector->sink is set to NULL
4562236b
HW
2973 */
2974 mutex_lock(&dev->mode_config.mutex);
2975
2976 if (sink) {
922aa1e1 2977 if (aconnector->dc_sink) {
98e6436d 2978 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2979 /*
2980 * retain and release below are used to
2981 * bump up refcount for sink because the link doesn't point
2982 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2983 * reshuffle by UMD we will get into unwanted dc_sink release
2984 */
dcd5fb82 2985 dc_sink_release(aconnector->dc_sink);
922aa1e1 2986 }
4562236b 2987 aconnector->dc_sink = sink;
dcd5fb82 2988 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2989 amdgpu_dm_update_freesync_caps(connector,
2990 aconnector->edid);
4562236b 2991 } else {
98e6436d 2992 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2993 if (!aconnector->dc_sink) {
4562236b 2994 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2995 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2996 }
4562236b
HW
2997 }
2998
2999 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
3000
3001 if (sink)
3002 dc_sink_release(sink);
4562236b
HW
3003 return;
3004 }
3005
3006 /*
3007 * TODO: temporary guard to look for proper fix
3008 * if this sink is MST sink, we should not do anything
3009 */
dcd5fb82
MF
3010 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3011 dc_sink_release(sink);
4562236b 3012 return;
dcd5fb82 3013 }
4562236b
HW
3014
3015 if (aconnector->dc_sink == sink) {
1f6010a9
DF
3016 /*
3017 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3018 * Do nothing!!
3019 */
f1ad2f5e 3020 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 3021 aconnector->connector_id);
dcd5fb82
MF
3022 if (sink)
3023 dc_sink_release(sink);
4562236b
HW
3024 return;
3025 }
3026
f1ad2f5e 3027 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
3028 aconnector->connector_id, aconnector->dc_sink, sink);
3029
3030 mutex_lock(&dev->mode_config.mutex);
3031
1f6010a9
DF
3032 /*
3033 * 1. Update status of the drm connector
3034 * 2. Send an event and let userspace tell us what to do
3035 */
4562236b 3036 if (sink) {
1f6010a9
DF
3037 /*
3038 * TODO: check if we still need the S3 mode update workaround.
3039 * If yes, put it here.
3040 */
c64b0d6b 3041 if (aconnector->dc_sink) {
98e6436d 3042 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
3043 dc_sink_release(aconnector->dc_sink);
3044 }
4562236b
HW
3045
3046 aconnector->dc_sink = sink;
dcd5fb82 3047 dc_sink_retain(aconnector->dc_sink);
900b3cb1 3048 if (sink->dc_edid.length == 0) {
4562236b 3049 aconnector->edid = NULL;
e6142dd5
AP
3050 if (aconnector->dc_link->aux_mode) {
3051 drm_dp_cec_unset_edid(
3052 &aconnector->dm_dp_aux.aux);
3053 }
900b3cb1 3054 } else {
4562236b 3055 aconnector->edid =
e6142dd5 3056 (struct edid *)sink->dc_edid.raw_edid;
4562236b 3057
e6142dd5
AP
3058 if (aconnector->dc_link->aux_mode)
3059 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3060 aconnector->edid);
4562236b 3061 }
e6142dd5 3062
20543be9 3063 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 3064 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 3065 update_connector_ext_caps(aconnector);
4562236b 3066 } else {
e86e8947 3067 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 3068 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 3069 drm_connector_update_edid_property(connector, NULL);
4562236b 3070 aconnector->num_modes = 0;
dcd5fb82 3071 dc_sink_release(aconnector->dc_sink);
4562236b 3072 aconnector->dc_sink = NULL;
5326c452 3073 aconnector->edid = NULL;
0c8620d6
BL
3074#ifdef CONFIG_DRM_AMD_DC_HDCP
3075 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3076 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3077 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3078#endif
4562236b
HW
3079 }
3080
3081 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 3082
0f877894
OV
3083 update_subconnector_property(aconnector);
3084
dcd5fb82
MF
3085 if (sink)
3086 dc_sink_release(sink);
4562236b
HW
3087}
3088
e27c41d5 3089static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 3090{
4562236b
HW
3091 struct drm_connector *connector = &aconnector->base;
3092 struct drm_device *dev = connector->dev;
fbbdadf2 3093 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 3094 struct amdgpu_device *adev = drm_to_adev(dev);
10a36226 3095#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3096 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
10a36226 3097#endif
15c735e7 3098 bool ret = false;
4562236b 3099
b972b4f9
HW
3100 if (adev->dm.disable_hpd_irq)
3101 return;
3102
1f6010a9
DF
3103 /*
3104 * In case of failure or MST no need to update connector status or notify the OS
3105 * since (for MST case) MST does this in its own context.
4562236b
HW
3106 */
3107 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3108
0c8620d6 3109#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3110 if (adev->dm.hdcp_workqueue) {
96a3b32e 3111 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3112 dm_con_state->update_hdcp = true;
3113 }
0c8620d6 3114#endif
2e0ac3d6
HW
3115 if (aconnector->fake_enable)
3116 aconnector->fake_enable = false;
3117
fbbdadf2
BL
3118 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3119 DRM_ERROR("KMS: Failed to detect connector\n");
3120
3121 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3122 emulated_link_detect(aconnector->dc_link);
3123
fbbdadf2
BL
3124 drm_modeset_lock_all(dev);
3125 dm_restore_drm_connector_state(dev, connector);
3126 drm_modeset_unlock_all(dev);
3127
3128 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3129 drm_kms_helper_connector_hotplug_event(connector);
15c735e7
WL
3130 } else {
3131 mutex_lock(&adev->dm.dc_lock);
3132 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3133 mutex_unlock(&adev->dm.dc_lock);
3134 if (ret) {
3135 amdgpu_dm_update_connector_after_detect(aconnector);
fbbdadf2 3136
15c735e7
WL
3137 drm_modeset_lock_all(dev);
3138 dm_restore_drm_connector_state(dev, connector);
3139 drm_modeset_unlock_all(dev);
4562236b 3140
15c735e7
WL
3141 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3142 drm_kms_helper_connector_hotplug_event(connector);
3143 }
4562236b
HW
3144 }
3145 mutex_unlock(&aconnector->hpd_lock);
3146
3147}
3148
e27c41d5
JS
3149static void handle_hpd_irq(void *param)
3150{
3151 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3152
3153 handle_hpd_irq_helper(aconnector);
3154
3155}
3156
8e794421 3157static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3158{
3159 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3160 uint8_t dret;
3161 bool new_irq_handled = false;
3162 int dpcd_addr;
3163 int dpcd_bytes_to_read;
3164
3165 const int max_process_count = 30;
3166 int process_count = 0;
3167
3168 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3169
3170 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3171 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3172 /* DPCD 0x200 - 0x201 for downstream IRQ */
3173 dpcd_addr = DP_SINK_COUNT;
3174 } else {
3175 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3176 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3177 dpcd_addr = DP_SINK_COUNT_ESI;
3178 }
3179
3180 dret = drm_dp_dpcd_read(
3181 &aconnector->dm_dp_aux.aux,
3182 dpcd_addr,
3183 esi,
3184 dpcd_bytes_to_read);
3185
3186 while (dret == dpcd_bytes_to_read &&
3187 process_count < max_process_count) {
3188 uint8_t retry;
3189 dret = 0;
3190
3191 process_count++;
3192
f1ad2f5e 3193 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3194 /* handle HPD short pulse irq */
3195 if (aconnector->mst_mgr.mst_state)
3196 drm_dp_mst_hpd_irq(
3197 &aconnector->mst_mgr,
3198 esi,
3199 &new_irq_handled);
4562236b
HW
3200
3201 if (new_irq_handled) {
3202 /* ACK at DPCD to notify down stream */
3203 const int ack_dpcd_bytes_to_write =
3204 dpcd_bytes_to_read - 1;
3205
3206 for (retry = 0; retry < 3; retry++) {
3207 uint8_t wret;
3208
3209 wret = drm_dp_dpcd_write(
3210 &aconnector->dm_dp_aux.aux,
3211 dpcd_addr + 1,
3212 &esi[1],
3213 ack_dpcd_bytes_to_write);
3214 if (wret == ack_dpcd_bytes_to_write)
3215 break;
3216 }
3217
1f6010a9 3218 /* check if there is new irq to be handled */
4562236b
HW
3219 dret = drm_dp_dpcd_read(
3220 &aconnector->dm_dp_aux.aux,
3221 dpcd_addr,
3222 esi,
3223 dpcd_bytes_to_read);
3224
3225 new_irq_handled = false;
d4a6e8a9 3226 } else {
4562236b 3227 break;
d4a6e8a9 3228 }
4562236b
HW
3229 }
3230
3231 if (process_count == max_process_count)
f1ad2f5e 3232 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3233}
3234
8e794421
WL
3235static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3236 union hpd_irq_data hpd_irq_data)
3237{
3238 struct hpd_rx_irq_offload_work *offload_work =
3239 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3240
3241 if (!offload_work) {
3242 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3243 return;
3244 }
3245
3246 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3247 offload_work->data = hpd_irq_data;
3248 offload_work->offload_wq = offload_wq;
3249
3250 queue_work(offload_wq->wq, &offload_work->work);
3251 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3252}
3253
4562236b
HW
3254static void handle_hpd_rx_irq(void *param)
3255{
c84dec2f 3256 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3257 struct drm_connector *connector = &aconnector->base;
3258 struct drm_device *dev = connector->dev;
53cbf65c 3259 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3260 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3261 bool result = false;
fbbdadf2 3262 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3263 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3264 union hpd_irq_data hpd_irq_data;
8e794421
WL
3265 bool link_loss = false;
3266 bool has_left_work = false;
3267 int idx = aconnector->base.index;
3268 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3269
3270 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3271
b972b4f9
HW
3272 if (adev->dm.disable_hpd_irq)
3273 return;
3274
1f6010a9
DF
3275 /*
3276 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3277 * conflict, after implement i2c helper, this mutex should be
3278 * retired.
3279 */
b86e7eef 3280 mutex_lock(&aconnector->hpd_lock);
4562236b 3281
8e794421
WL
3282 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3283 &link_loss, true, &has_left_work);
3083a984 3284
8e794421
WL
3285 if (!has_left_work)
3286 goto out;
3287
3288 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3289 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3290 goto out;
3291 }
3292
3293 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3294 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3295 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3296 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3297 goto out;
3298 }
3083a984 3299
8e794421
WL
3300 if (link_loss) {
3301 bool skip = false;
d2aa1356 3302
8e794421
WL
3303 spin_lock(&offload_wq->offload_lock);
3304 skip = offload_wq->is_handling_link_loss;
3305
3306 if (!skip)
3307 offload_wq->is_handling_link_loss = true;
3308
3309 spin_unlock(&offload_wq->offload_lock);
3310
3311 if (!skip)
3312 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3313
3314 goto out;
3315 }
3316 }
c8ea79a8 3317
3083a984 3318out:
c8ea79a8 3319 if (result && !is_mst_root_connector) {
4562236b 3320 /* Downstream Port status changed. */
fbbdadf2
BL
3321 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3322 DRM_ERROR("KMS: Failed to detect connector\n");
3323
3324 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3325 emulated_link_detect(dc_link);
3326
3327 if (aconnector->fake_enable)
3328 aconnector->fake_enable = false;
3329
3330 amdgpu_dm_update_connector_after_detect(aconnector);
3331
3332
3333 drm_modeset_lock_all(dev);
3334 dm_restore_drm_connector_state(dev, connector);
3335 drm_modeset_unlock_all(dev);
3336
fc320a6f 3337 drm_kms_helper_connector_hotplug_event(connector);
15c735e7
WL
3338 } else {
3339 bool ret = false;
88ac3dda 3340
15c735e7
WL
3341 mutex_lock(&adev->dm.dc_lock);
3342 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3343 mutex_unlock(&adev->dm.dc_lock);
88ac3dda 3344
15c735e7
WL
3345 if (ret) {
3346 if (aconnector->fake_enable)
3347 aconnector->fake_enable = false;
4562236b 3348
15c735e7 3349 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b 3350
15c735e7
WL
3351 drm_modeset_lock_all(dev);
3352 dm_restore_drm_connector_state(dev, connector);
3353 drm_modeset_unlock_all(dev);
4562236b 3354
15c735e7
WL
3355 drm_kms_helper_connector_hotplug_event(connector);
3356 }
4562236b
HW
3357 }
3358 }
2a0f9270 3359#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3360 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3361 if (adev->dm.hdcp_workqueue)
3362 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3363 }
2a0f9270 3364#endif
4562236b 3365
b86e7eef 3366 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3367 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3368
3369 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3370}
3371
3372static void register_hpd_handlers(struct amdgpu_device *adev)
3373{
4a580877 3374 struct drm_device *dev = adev_to_drm(adev);
4562236b 3375 struct drm_connector *connector;
c84dec2f 3376 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3377 const struct dc_link *dc_link;
3378 struct dc_interrupt_params int_params = {0};
3379
3380 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3381 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3382
3383 list_for_each_entry(connector,
3384 &dev->mode_config.connector_list, head) {
3385
c84dec2f 3386 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3387 dc_link = aconnector->dc_link;
3388
3389 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3390 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3391 int_params.irq_source = dc_link->irq_source_hpd;
3392
3393 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3394 handle_hpd_irq,
3395 (void *) aconnector);
3396 }
3397
3398 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3399
3400 /* Also register for DP short pulse (hpd_rx). */
3401 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3402 int_params.irq_source = dc_link->irq_source_hpd_rx;
3403
3404 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3405 handle_hpd_rx_irq,
3406 (void *) aconnector);
8e794421
WL
3407
3408 if (adev->dm.hpd_rx_offload_wq)
3409 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3410 aconnector;
4562236b
HW
3411 }
3412 }
3413}
3414
55e56389
MR
3415#if defined(CONFIG_DRM_AMD_DC_SI)
3416/* Register IRQ sources and initialize IRQ callbacks */
3417static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3418{
3419 struct dc *dc = adev->dm.dc;
3420 struct common_irq_params *c_irq_params;
3421 struct dc_interrupt_params int_params = {0};
3422 int r;
3423 int i;
3424 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3425
3426 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3427 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3428
3429 /*
3430 * Actions of amdgpu_irq_add_id():
3431 * 1. Register a set() function with base driver.
3432 * Base driver will call set() function to enable/disable an
3433 * interrupt in DC hardware.
3434 * 2. Register amdgpu_dm_irq_handler().
3435 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3436 * coming from DC hardware.
3437 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3438 * for acknowledging and handling. */
3439
3440 /* Use VBLANK interrupt */
3441 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3442 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3443 if (r) {
3444 DRM_ERROR("Failed to add crtc irq id!\n");
3445 return r;
3446 }
3447
3448 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3449 int_params.irq_source =
3450 dc_interrupt_to_irq_source(dc, i+1 , 0);
3451
3452 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3453
3454 c_irq_params->adev = adev;
3455 c_irq_params->irq_src = int_params.irq_source;
3456
3457 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3458 dm_crtc_high_irq, c_irq_params);
3459 }
3460
3461 /* Use GRPH_PFLIP interrupt */
3462 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3463 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3464 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3465 if (r) {
3466 DRM_ERROR("Failed to add page flip irq id!\n");
3467 return r;
3468 }
3469
3470 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3471 int_params.irq_source =
3472 dc_interrupt_to_irq_source(dc, i, 0);
3473
3474 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3475
3476 c_irq_params->adev = adev;
3477 c_irq_params->irq_src = int_params.irq_source;
3478
3479 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3480 dm_pflip_high_irq, c_irq_params);
3481
3482 }
3483
3484 /* HPD */
3485 r = amdgpu_irq_add_id(adev, client_id,
3486 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3487 if (r) {
3488 DRM_ERROR("Failed to add hpd irq id!\n");
3489 return r;
3490 }
3491
3492 register_hpd_handlers(adev);
3493
3494 return 0;
3495}
3496#endif
3497
4562236b
HW
3498/* Register IRQ sources and initialize IRQ callbacks */
3499static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3500{
3501 struct dc *dc = adev->dm.dc;
3502 struct common_irq_params *c_irq_params;
3503 struct dc_interrupt_params int_params = {0};
3504 int r;
3505 int i;
1ffdeca6 3506 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3507
c08182f2 3508 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3509 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3510
3511 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3512 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3513
1f6010a9
DF
3514 /*
3515 * Actions of amdgpu_irq_add_id():
4562236b
HW
3516 * 1. Register a set() function with base driver.
3517 * Base driver will call set() function to enable/disable an
3518 * interrupt in DC hardware.
3519 * 2. Register amdgpu_dm_irq_handler().
3520 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3521 * coming from DC hardware.
3522 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3523 * for acknowledging and handling. */
3524
b57de80a 3525 /* Use VBLANK interrupt */
e9029155 3526 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3527 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3528 if (r) {
3529 DRM_ERROR("Failed to add crtc irq id!\n");
3530 return r;
3531 }
3532
3533 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3534 int_params.irq_source =
3d761e79 3535 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3536
b57de80a 3537 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3538
3539 c_irq_params->adev = adev;
3540 c_irq_params->irq_src = int_params.irq_source;
3541
3542 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3543 dm_crtc_high_irq, c_irq_params);
3544 }
3545
d2574c33
MK
3546 /* Use VUPDATE interrupt */
3547 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3548 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3549 if (r) {
3550 DRM_ERROR("Failed to add vupdate irq id!\n");
3551 return r;
3552 }
3553
3554 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3555 int_params.irq_source =
3556 dc_interrupt_to_irq_source(dc, i, 0);
3557
3558 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3559
3560 c_irq_params->adev = adev;
3561 c_irq_params->irq_src = int_params.irq_source;
3562
3563 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3564 dm_vupdate_high_irq, c_irq_params);
3565 }
3566
3d761e79 3567 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3568 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3569 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3570 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3571 if (r) {
3572 DRM_ERROR("Failed to add page flip irq id!\n");
3573 return r;
3574 }
3575
3576 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3577 int_params.irq_source =
3578 dc_interrupt_to_irq_source(dc, i, 0);
3579
3580 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3581
3582 c_irq_params->adev = adev;
3583 c_irq_params->irq_src = int_params.irq_source;
3584
3585 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3586 dm_pflip_high_irq, c_irq_params);
3587
3588 }
3589
3590 /* HPD */
2c8ad2d5
AD
3591 r = amdgpu_irq_add_id(adev, client_id,
3592 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3593 if (r) {
3594 DRM_ERROR("Failed to add hpd irq id!\n");
3595 return r;
3596 }
3597
3598 register_hpd_handlers(adev);
3599
3600 return 0;
3601}
3602
ff5ef992
AD
3603/* Register IRQ sources and initialize IRQ callbacks */
3604static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3605{
3606 struct dc *dc = adev->dm.dc;
3607 struct common_irq_params *c_irq_params;
3608 struct dc_interrupt_params int_params = {0};
3609 int r;
3610 int i;
660d5406
WL
3611#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3612 static const unsigned int vrtl_int_srcid[] = {
3613 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3614 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3615 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3616 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3617 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3618 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3619 };
3620#endif
ff5ef992
AD
3621
3622 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3623 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3624
1f6010a9
DF
3625 /*
3626 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3627 * 1. Register a set() function with base driver.
3628 * Base driver will call set() function to enable/disable an
3629 * interrupt in DC hardware.
3630 * 2. Register amdgpu_dm_irq_handler().
3631 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3632 * coming from DC hardware.
3633 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3634 * for acknowledging and handling.
1f6010a9 3635 */
ff5ef992
AD
3636
3637 /* Use VSTARTUP interrupt */
3638 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3639 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3640 i++) {
3760f76c 3641 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3642
3643 if (r) {
3644 DRM_ERROR("Failed to add crtc irq id!\n");
3645 return r;
3646 }
3647
3648 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3649 int_params.irq_source =
3650 dc_interrupt_to_irq_source(dc, i, 0);
3651
3652 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3653
3654 c_irq_params->adev = adev;
3655 c_irq_params->irq_src = int_params.irq_source;
3656
2346ef47
NK
3657 amdgpu_dm_irq_register_interrupt(
3658 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3659 }
3660
86bc2219
WL
3661 /* Use otg vertical line interrupt */
3662#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3663 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3664 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3665 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3666
3667 if (r) {
3668 DRM_ERROR("Failed to add vline0 irq id!\n");
3669 return r;
3670 }
3671
3672 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3673 int_params.irq_source =
660d5406
WL
3674 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3675
3676 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3677 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3678 break;
3679 }
86bc2219
WL
3680
3681 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3682 - DC_IRQ_SOURCE_DC1_VLINE0];
3683
3684 c_irq_params->adev = adev;
3685 c_irq_params->irq_src = int_params.irq_source;
3686
3687 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3688 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3689 }
3690#endif
3691
2346ef47
NK
3692 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3693 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3694 * to trigger at end of each vblank, regardless of state of the lock,
3695 * matching DCE behaviour.
3696 */
3697 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3698 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3699 i++) {
3700 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3701
3702 if (r) {
3703 DRM_ERROR("Failed to add vupdate irq id!\n");
3704 return r;
3705 }
3706
3707 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3708 int_params.irq_source =
3709 dc_interrupt_to_irq_source(dc, i, 0);
3710
3711 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3712
3713 c_irq_params->adev = adev;
3714 c_irq_params->irq_src = int_params.irq_source;
3715
ff5ef992 3716 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3717 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3718 }
3719
ff5ef992
AD
3720 /* Use GRPH_PFLIP interrupt */
3721 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
de95753c 3722 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
ff5ef992 3723 i++) {
3760f76c 3724 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3725 if (r) {
3726 DRM_ERROR("Failed to add page flip irq id!\n");
3727 return r;
3728 }
3729
3730 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3731 int_params.irq_source =
3732 dc_interrupt_to_irq_source(dc, i, 0);
3733
3734 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3735
3736 c_irq_params->adev = adev;
3737 c_irq_params->irq_src = int_params.irq_source;
3738
3739 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3740 dm_pflip_high_irq, c_irq_params);
3741
3742 }
3743
81927e28
JS
3744 /* HPD */
3745 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3746 &adev->hpd_irq);
3747 if (r) {
3748 DRM_ERROR("Failed to add hpd irq id!\n");
3749 return r;
3750 }
a08f16cf 3751
81927e28 3752 register_hpd_handlers(adev);
a08f16cf 3753
81927e28
JS
3754 return 0;
3755}
3756/* Register Outbox IRQ sources and initialize IRQ callbacks */
3757static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3758{
3759 struct dc *dc = adev->dm.dc;
3760 struct common_irq_params *c_irq_params;
3761 struct dc_interrupt_params int_params = {0};
3762 int r, i;
3763
3764 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3765 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3766
3767 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3768 &adev->dmub_outbox_irq);
3769 if (r) {
3770 DRM_ERROR("Failed to add outbox irq id!\n");
3771 return r;
3772 }
3773
3774 if (dc->ctx->dmub_srv) {
3775 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3776 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3777 int_params.irq_source =
81927e28 3778 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3779
81927e28 3780 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3781
3782 c_irq_params->adev = adev;
3783 c_irq_params->irq_src = int_params.irq_source;
3784
3785 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3786 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3787 }
3788
ff5ef992
AD
3789 return 0;
3790}
ff5ef992 3791
eb3dc897
NK
3792/*
3793 * Acquires the lock for the atomic state object and returns
3794 * the new atomic state.
3795 *
3796 * This should only be called during atomic check.
3797 */
17ce8a69
RL
3798int dm_atomic_get_state(struct drm_atomic_state *state,
3799 struct dm_atomic_state **dm_state)
eb3dc897
NK
3800{
3801 struct drm_device *dev = state->dev;
1348969a 3802 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3803 struct amdgpu_display_manager *dm = &adev->dm;
3804 struct drm_private_state *priv_state;
eb3dc897
NK
3805
3806 if (*dm_state)
3807 return 0;
3808
eb3dc897
NK
3809 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3810 if (IS_ERR(priv_state))
3811 return PTR_ERR(priv_state);
3812
3813 *dm_state = to_dm_atomic_state(priv_state);
3814
3815 return 0;
3816}
3817
dfd84d90 3818static struct dm_atomic_state *
eb3dc897
NK
3819dm_atomic_get_new_state(struct drm_atomic_state *state)
3820{
3821 struct drm_device *dev = state->dev;
1348969a 3822 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3823 struct amdgpu_display_manager *dm = &adev->dm;
3824 struct drm_private_obj *obj;
3825 struct drm_private_state *new_obj_state;
3826 int i;
3827
3828 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3829 if (obj->funcs == dm->atomic_obj.funcs)
3830 return to_dm_atomic_state(new_obj_state);
3831 }
3832
3833 return NULL;
3834}
3835
eb3dc897
NK
3836static struct drm_private_state *
3837dm_atomic_duplicate_state(struct drm_private_obj *obj)
3838{
3839 struct dm_atomic_state *old_state, *new_state;
3840
3841 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3842 if (!new_state)
3843 return NULL;
3844
3845 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3846
813d20dc
AW
3847 old_state = to_dm_atomic_state(obj->state);
3848
3849 if (old_state && old_state->context)
3850 new_state->context = dc_copy_state(old_state->context);
3851
eb3dc897
NK
3852 if (!new_state->context) {
3853 kfree(new_state);
3854 return NULL;
3855 }
3856
eb3dc897
NK
3857 return &new_state->base;
3858}
3859
3860static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3861 struct drm_private_state *state)
3862{
3863 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3864
3865 if (dm_state && dm_state->context)
3866 dc_release_state(dm_state->context);
3867
3868 kfree(dm_state);
3869}
3870
3871static struct drm_private_state_funcs dm_atomic_state_funcs = {
3872 .atomic_duplicate_state = dm_atomic_duplicate_state,
3873 .atomic_destroy_state = dm_atomic_destroy_state,
3874};
3875
4562236b
HW
3876static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3877{
eb3dc897 3878 struct dm_atomic_state *state;
4562236b
HW
3879 int r;
3880
3881 adev->mode_info.mode_config_initialized = true;
3882
4a580877
LT
3883 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3884 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3885
4a580877
LT
3886 adev_to_drm(adev)->mode_config.max_width = 16384;
3887 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3888
4a580877 3889 adev_to_drm(adev)->mode_config.preferred_depth = 24;
fc25fd60
AD
3890 /* disable prefer shadow for now due to hibernation issues */
3891 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
1f6010a9 3892 /* indicates support for immediate flip */
4a580877 3893 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3894
4a580877 3895 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3896
eb3dc897
NK
3897 state = kzalloc(sizeof(*state), GFP_KERNEL);
3898 if (!state)
3899 return -ENOMEM;
3900
813d20dc 3901 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3902 if (!state->context) {
3903 kfree(state);
3904 return -ENOMEM;
3905 }
3906
3907 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3908
4a580877 3909 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3910 &adev->dm.atomic_obj,
eb3dc897
NK
3911 &state->base,
3912 &dm_atomic_state_funcs);
3913
3dc9b1ce 3914 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3915 if (r) {
3916 dc_release_state(state->context);
3917 kfree(state);
4562236b 3918 return r;
b67a468a 3919 }
4562236b 3920
6ce8f316 3921 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3922 if (r) {
3923 dc_release_state(state->context);
3924 kfree(state);
6ce8f316 3925 return r;
b67a468a 3926 }
6ce8f316 3927
4562236b
HW
3928 return 0;
3929}
3930
206bbafe
DF
3931#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3932#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3933#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3934
7fd13bae
AD
3935static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3936 int bl_idx)
206bbafe
DF
3937{
3938#if defined(CONFIG_ACPI)
3939 struct amdgpu_dm_backlight_caps caps;
3940
58965855
FS
3941 memset(&caps, 0, sizeof(caps));
3942
7fd13bae 3943 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3944 return;
3945
f9b7f370 3946 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3947 if (caps.caps_valid) {
7fd13bae 3948 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3949 if (caps.aux_support)
3950 return;
7fd13bae
AD
3951 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3952 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3953 } else {
7fd13bae 3954 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3955 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3956 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3957 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3958 }
3959#else
7fd13bae 3960 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3961 return;
3962
7fd13bae
AD
3963 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3964 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3965#endif
3966}
3967
69d9f427
AM
3968static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3969 unsigned *min, unsigned *max)
94562810 3970{
94562810 3971 if (!caps)
69d9f427 3972 return 0;
94562810 3973
69d9f427
AM
3974 if (caps->aux_support) {
3975 // Firmware limits are in nits, DC API wants millinits.
3976 *max = 1000 * caps->aux_max_input_signal;
3977 *min = 1000 * caps->aux_min_input_signal;
94562810 3978 } else {
69d9f427
AM
3979 // Firmware limits are 8-bit, PWM control is 16-bit.
3980 *max = 0x101 * caps->max_input_signal;
3981 *min = 0x101 * caps->min_input_signal;
94562810 3982 }
69d9f427
AM
3983 return 1;
3984}
94562810 3985
69d9f427
AM
3986static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3987 uint32_t brightness)
3988{
3989 unsigned min, max;
94562810 3990
69d9f427
AM
3991 if (!get_brightness_range(caps, &min, &max))
3992 return brightness;
3993
3994 // Rescale 0..255 to min..max
3995 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3996 AMDGPU_MAX_BL_LEVEL);
3997}
3998
3999static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4000 uint32_t brightness)
4001{
4002 unsigned min, max;
4003
4004 if (!get_brightness_range(caps, &min, &max))
4005 return brightness;
4006
4007 if (brightness < min)
4008 return 0;
4009 // Rescale min..max to 0..255
4010 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4011 max - min);
94562810
RS
4012}
4013
4052287a 4014static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 4015 int bl_idx,
3d6c9164 4016 u32 user_brightness)
4562236b 4017{
206bbafe 4018 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
4019 struct dc_link *link;
4020 u32 brightness;
94562810 4021 bool rc;
4562236b 4022
7fd13bae
AD
4023 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4024 caps = dm->backlight_caps[bl_idx];
94562810 4025
7fd13bae 4026 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
4027 /* update scratch register */
4028 if (bl_idx == 0)
4029 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
4030 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4031 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 4032
3d6c9164 4033 /* Change brightness based on AUX property */
118b4627 4034 if (caps.aux_support) {
7fd13bae
AD
4035 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4036 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4037 if (!rc)
4038 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 4039 } else {
7fd13bae
AD
4040 rc = dc_link_set_backlight_level(link, brightness, 0);
4041 if (!rc)
4042 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 4043 }
94562810 4044
4052287a
S
4045 if (rc)
4046 dm->actual_brightness[bl_idx] = user_brightness;
4562236b
HW
4047}
4048
3d6c9164 4049static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 4050{
620a0d27 4051 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4052 int i;
3d6c9164 4053
7fd13bae
AD
4054 for (i = 0; i < dm->num_of_edps; i++) {
4055 if (bd == dm->backlight_dev[i])
4056 break;
4057 }
4058 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4059 i = 0;
4060 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
4061
4062 return 0;
4063}
4064
7fd13bae
AD
4065static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4066 int bl_idx)
3d6c9164 4067{
0ad3e64e 4068 struct amdgpu_dm_backlight_caps caps;
7fd13bae 4069 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 4070
7fd13bae
AD
4071 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4072 caps = dm->backlight_caps[bl_idx];
620a0d27 4073
0ad3e64e 4074 if (caps.aux_support) {
0ad3e64e
AD
4075 u32 avg, peak;
4076 bool rc;
4077
4078 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4079 if (!rc)
7fd13bae 4080 return dm->brightness[bl_idx];
0ad3e64e
AD
4081 return convert_brightness_to_user(&caps, avg);
4082 } else {
7fd13bae 4083 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
4084
4085 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 4086 return dm->brightness[bl_idx];
0ad3e64e
AD
4087 return convert_brightness_to_user(&caps, ret);
4088 }
4562236b
HW
4089}
4090
3d6c9164
AD
4091static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4092{
4093 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4094 int i;
3d6c9164 4095
7fd13bae
AD
4096 for (i = 0; i < dm->num_of_edps; i++) {
4097 if (bd == dm->backlight_dev[i])
4098 break;
4099 }
4100 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4101 i = 0;
4102 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4103}
4104
4562236b 4105static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4106 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4107 .get_brightness = amdgpu_dm_backlight_get_brightness,
4108 .update_status = amdgpu_dm_backlight_update_status,
4109};
4110
7578ecda
AD
4111static void
4112amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4113{
4114 char bl_name[16];
4115 struct backlight_properties props = { 0 };
4116
7fd13bae
AD
4117 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4118 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4119
4562236b 4120 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4121 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4122 props.type = BACKLIGHT_RAW;
4123
4124 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4125 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4126
7fd13bae
AD
4127 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4128 adev_to_drm(dm->adev)->dev,
4129 dm,
4130 &amdgpu_dm_backlight_ops,
4131 &props);
4562236b 4132
7fd13bae 4133 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4134 DRM_ERROR("DM: Backlight registration failed!\n");
4135 else
f1ad2f5e 4136 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4137}
4562236b 4138
df534fff 4139static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4140 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4141 enum drm_plane_type plane_type,
4142 const struct dc_plane_cap *plane_cap)
df534fff 4143{
f180b4bc 4144 struct drm_plane *plane;
df534fff
S
4145 unsigned long possible_crtcs;
4146 int ret = 0;
4147
f180b4bc 4148 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4149 if (!plane) {
4150 DRM_ERROR("KMS: Failed to allocate plane\n");
4151 return -ENOMEM;
4152 }
b2fddb13 4153 plane->type = plane_type;
df534fff
S
4154
4155 /*
b2fddb13
NK
4156 * HACK: IGT tests expect that the primary plane for a CRTC
4157 * can only have one possible CRTC. Only expose support for
4158 * any CRTC if they're not going to be used as a primary plane
4159 * for a CRTC - like overlay or underlay planes.
df534fff
S
4160 */
4161 possible_crtcs = 1 << plane_id;
4162 if (plane_id >= dm->dc->caps.max_streams)
4163 possible_crtcs = 0xff;
4164
cc1fec57 4165 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4166
4167 if (ret) {
4168 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4169 kfree(plane);
df534fff
S
4170 return ret;
4171 }
4172
54087768
NK
4173 if (mode_info)
4174 mode_info->planes[plane_id] = plane;
4175
df534fff
S
4176 return ret;
4177}
4178
89fc8d4e
HW
4179
4180static void register_backlight_device(struct amdgpu_display_manager *dm,
4181 struct dc_link *link)
4182{
89fc8d4e
HW
4183 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4184 link->type != dc_connection_none) {
1f6010a9
DF
4185 /*
4186 * Event if registration failed, we should continue with
89fc8d4e
HW
4187 * DM initialization because not having a backlight control
4188 * is better then a black screen.
4189 */
7fd13bae 4190 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4191 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4192
7fd13bae 4193 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4194 dm->backlight_link[dm->num_of_edps] = link;
4195 dm->num_of_edps++;
4196 }
89fc8d4e 4197 }
89fc8d4e
HW
4198}
4199
4200
1f6010a9
DF
4201/*
4202 * In this architecture, the association
4562236b
HW
4203 * connector -> encoder -> crtc
4204 * id not really requried. The crtc and connector will hold the
4205 * display_index as an abstraction to use with DAL component
4206 *
4207 * Returns 0 on success
4208 */
7578ecda 4209static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4210{
4211 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4212 int32_t i;
c84dec2f 4213 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4214 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4215 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4216 uint32_t link_cnt;
cc1fec57 4217 int32_t primary_planes;
fbbdadf2 4218 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4219 const struct dc_plane_cap *plane;
9470620e 4220 bool psr_feature_enabled = false;
4562236b 4221
d58159de
AD
4222 dm->display_indexes_num = dm->dc->caps.max_streams;
4223 /* Update the actual used number of crtc */
4224 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4225
4562236b 4226 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4227 if (amdgpu_dm_mode_config_init(dm->adev)) {
4228 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4229 return -EINVAL;
4562236b
HW
4230 }
4231
b2fddb13
NK
4232 /* There is one primary plane per CRTC */
4233 primary_planes = dm->dc->caps.max_streams;
54087768 4234 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4235
b2fddb13
NK
4236 /*
4237 * Initialize primary planes, implicit planes for legacy IOCTLS.
4238 * Order is reversed to match iteration order in atomic check.
4239 */
4240 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4241 plane = &dm->dc->caps.planes[i];
4242
b2fddb13 4243 if (initialize_plane(dm, mode_info, i,
cc1fec57 4244 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4245 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4246 goto fail;
d4e13b0d 4247 }
df534fff 4248 }
92f3ac40 4249
0d579c7e
NK
4250 /*
4251 * Initialize overlay planes, index starting after primary planes.
4252 * These planes have a higher DRM index than the primary planes since
4253 * they should be considered as having a higher z-order.
4254 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4255 *
4256 * Only support DCN for now, and only expose one so we don't encourage
4257 * userspace to use up all the pipes.
0d579c7e 4258 */
cc1fec57
NK
4259 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4260 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4261
4262 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4263 continue;
4264
4265 if (!plane->blends_with_above || !plane->blends_with_below)
4266 continue;
4267
ea36ad34 4268 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4269 continue;
4270
54087768 4271 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4272 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4273 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4274 goto fail;
d4e13b0d 4275 }
cc1fec57
NK
4276
4277 /* Only create one overlay plane. */
4278 break;
d4e13b0d 4279 }
4562236b 4280
d4e13b0d 4281 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4282 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4283 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4284 goto fail;
4562236b 4285 }
4562236b 4286
81927e28 4287 /* Use Outbox interrupt */
1d789535 4288 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4289 case IP_VERSION(3, 0, 0):
4290 case IP_VERSION(3, 1, 2):
4291 case IP_VERSION(3, 1, 3):
b5b8ed44 4292 case IP_VERSION(3, 1, 5):
de7cc1b4 4293 case IP_VERSION(3, 1, 6):
577359ca
AP
4294 case IP_VERSION(3, 2, 0):
4295 case IP_VERSION(3, 2, 1):
c08182f2 4296 case IP_VERSION(2, 1, 0):
81927e28
JS
4297 if (register_outbox_irq_handlers(dm->adev)) {
4298 DRM_ERROR("DM: Failed to initialize IRQ\n");
4299 goto fail;
4300 }
4301 break;
4302 default:
c08182f2 4303 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4304 adev->ip_versions[DCE_HWIP][0]);
81927e28 4305 }
9470620e
NK
4306
4307 /* Determine whether to enable PSR support by default. */
4308 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4309 switch (adev->ip_versions[DCE_HWIP][0]) {
4310 case IP_VERSION(3, 1, 2):
4311 case IP_VERSION(3, 1, 3):
b5b8ed44 4312 case IP_VERSION(3, 1, 5):
de7cc1b4 4313 case IP_VERSION(3, 1, 6):
577359ca
AP
4314 case IP_VERSION(3, 2, 0):
4315 case IP_VERSION(3, 2, 1):
9470620e
NK
4316 psr_feature_enabled = true;
4317 break;
4318 default:
4319 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4320 break;
4321 }
4322 }
81927e28 4323
4562236b
HW
4324 /* loops over all connectors on the board */
4325 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4326 struct dc_link *link = NULL;
4562236b
HW
4327
4328 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4329 DRM_ERROR(
4330 "KMS: Cannot support more than %d display indexes\n",
4331 AMDGPU_DM_MAX_DISPLAY_INDEX);
4332 continue;
4333 }
4334
4335 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4336 if (!aconnector)
cd8a2ae8 4337 goto fail;
4562236b
HW
4338
4339 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4340 if (!aencoder)
cd8a2ae8 4341 goto fail;
4562236b
HW
4342
4343 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4344 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4345 goto fail;
4562236b
HW
4346 }
4347
4348 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4349 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4350 goto fail;
4562236b
HW
4351 }
4352
89fc8d4e
HW
4353 link = dc_get_link_at_index(dm->dc, i);
4354
fbbdadf2
BL
4355 if (!dc_link_detect_sink(link, &new_connection_type))
4356 DRM_ERROR("KMS: Failed to detect connector\n");
4357
4358 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4359 emulated_link_detect(link);
4360 amdgpu_dm_update_connector_after_detect(aconnector);
15c735e7
WL
4361 } else {
4362 bool ret = false;
fbbdadf2 4363
15c735e7
WL
4364 mutex_lock(&dm->dc_lock);
4365 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4366 mutex_unlock(&dm->dc_lock);
4367
4368 if (ret) {
4369 amdgpu_dm_update_connector_after_detect(aconnector);
4370 register_backlight_device(dm, link);
89fc8d4e 4371
15c735e7
WL
4372 if (dm->num_of_edps)
4373 update_connector_ext_caps(aconnector);
89fc8d4e 4374
15c735e7
WL
4375 if (psr_feature_enabled)
4376 amdgpu_dm_set_psr_caps(link);
4377
4378 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4379 * PSR is also supported.
4380 */
4381 if (link->psr_settings.psr_feature_enabled)
4382 adev_to_drm(adev)->vblank_disable_immediate = false;
4383 }
4384 }
4562236b
HW
4385 }
4386
4387 /* Software is initialized. Now we can register interrupt handlers. */
4388 switch (adev->asic_type) {
55e56389
MR
4389#if defined(CONFIG_DRM_AMD_DC_SI)
4390 case CHIP_TAHITI:
4391 case CHIP_PITCAIRN:
4392 case CHIP_VERDE:
4393 case CHIP_OLAND:
4394 if (dce60_register_irq_handlers(dm->adev)) {
4395 DRM_ERROR("DM: Failed to initialize IRQ\n");
4396 goto fail;
4397 }
4398 break;
4399#endif
4562236b
HW
4400 case CHIP_BONAIRE:
4401 case CHIP_HAWAII:
cd4b356f
AD
4402 case CHIP_KAVERI:
4403 case CHIP_KABINI:
4404 case CHIP_MULLINS:
4562236b
HW
4405 case CHIP_TONGA:
4406 case CHIP_FIJI:
4407 case CHIP_CARRIZO:
4408 case CHIP_STONEY:
4409 case CHIP_POLARIS11:
4410 case CHIP_POLARIS10:
b264d345 4411 case CHIP_POLARIS12:
7737de91 4412 case CHIP_VEGAM:
2c8ad2d5 4413 case CHIP_VEGA10:
2325ff30 4414 case CHIP_VEGA12:
1fe6bf2f 4415 case CHIP_VEGA20:
4562236b
HW
4416 if (dce110_register_irq_handlers(dm->adev)) {
4417 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4418 goto fail;
4562236b
HW
4419 }
4420 break;
4421 default:
1d789535 4422 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4423 case IP_VERSION(1, 0, 0):
4424 case IP_VERSION(1, 0, 1):
c08182f2
AD
4425 case IP_VERSION(2, 0, 2):
4426 case IP_VERSION(2, 0, 3):
4427 case IP_VERSION(2, 0, 0):
4428 case IP_VERSION(2, 1, 0):
4429 case IP_VERSION(3, 0, 0):
4430 case IP_VERSION(3, 0, 2):
4431 case IP_VERSION(3, 0, 3):
4432 case IP_VERSION(3, 0, 1):
4433 case IP_VERSION(3, 1, 2):
4434 case IP_VERSION(3, 1, 3):
b5b8ed44 4435 case IP_VERSION(3, 1, 5):
de7cc1b4 4436 case IP_VERSION(3, 1, 6):
577359ca
AP
4437 case IP_VERSION(3, 2, 0):
4438 case IP_VERSION(3, 2, 1):
c08182f2
AD
4439 if (dcn10_register_irq_handlers(dm->adev)) {
4440 DRM_ERROR("DM: Failed to initialize IRQ\n");
4441 goto fail;
4442 }
4443 break;
4444 default:
2cbc6f42 4445 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4446 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4447 goto fail;
c08182f2 4448 }
2cbc6f42 4449 break;
4562236b
HW
4450 }
4451
4562236b 4452 return 0;
cd8a2ae8 4453fail:
4562236b 4454 kfree(aencoder);
4562236b 4455 kfree(aconnector);
54087768 4456
59d0f396 4457 return -EINVAL;
4562236b
HW
4458}
4459
7578ecda 4460static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4461{
eb3dc897 4462 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4463 return;
4464}
4465
4466/******************************************************************************
4467 * amdgpu_display_funcs functions
4468 *****************************************************************************/
4469
1f6010a9 4470/*
4562236b
HW
4471 * dm_bandwidth_update - program display watermarks
4472 *
4473 * @adev: amdgpu_device pointer
4474 *
4475 * Calculate and program the display watermarks and line buffer allocation.
4476 */
4477static void dm_bandwidth_update(struct amdgpu_device *adev)
4478{
49c07a99 4479 /* TODO: implement later */
4562236b
HW
4480}
4481
39cc5be2 4482static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4483 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4484 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4485 .backlight_set_level = NULL, /* never called for DC */
4486 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4487 .hpd_sense = NULL,/* called unconditionally */
4488 .hpd_set_polarity = NULL, /* called unconditionally */
4489 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4490 .page_flip_get_scanoutpos =
4491 dm_crtc_get_scanoutpos,/* called unconditionally */
4492 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4493 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4494};
4495
4496#if defined(CONFIG_DEBUG_KERNEL_DC)
4497
3ee6b26b
AD
4498static ssize_t s3_debug_store(struct device *device,
4499 struct device_attribute *attr,
4500 const char *buf,
4501 size_t count)
4562236b
HW
4502{
4503 int ret;
4504 int s3_state;
ef1de361 4505 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4506 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4507
4508 ret = kstrtoint(buf, 0, &s3_state);
4509
4510 if (ret == 0) {
4511 if (s3_state) {
4512 dm_resume(adev);
4a580877 4513 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4514 } else
4515 dm_suspend(adev);
4516 }
4517
4518 return ret == 0 ? count : 0;
4519}
4520
4521DEVICE_ATTR_WO(s3_debug);
4522
4523#endif
4524
4525static int dm_early_init(void *handle)
4526{
4527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4528
4562236b 4529 switch (adev->asic_type) {
55e56389
MR
4530#if defined(CONFIG_DRM_AMD_DC_SI)
4531 case CHIP_TAHITI:
4532 case CHIP_PITCAIRN:
4533 case CHIP_VERDE:
4534 adev->mode_info.num_crtc = 6;
4535 adev->mode_info.num_hpd = 6;
4536 adev->mode_info.num_dig = 6;
4537 break;
4538 case CHIP_OLAND:
4539 adev->mode_info.num_crtc = 2;
4540 adev->mode_info.num_hpd = 2;
4541 adev->mode_info.num_dig = 2;
4542 break;
4543#endif
4562236b
HW
4544 case CHIP_BONAIRE:
4545 case CHIP_HAWAII:
4546 adev->mode_info.num_crtc = 6;
4547 adev->mode_info.num_hpd = 6;
4548 adev->mode_info.num_dig = 6;
4562236b 4549 break;
cd4b356f
AD
4550 case CHIP_KAVERI:
4551 adev->mode_info.num_crtc = 4;
4552 adev->mode_info.num_hpd = 6;
4553 adev->mode_info.num_dig = 7;
cd4b356f
AD
4554 break;
4555 case CHIP_KABINI:
4556 case CHIP_MULLINS:
4557 adev->mode_info.num_crtc = 2;
4558 adev->mode_info.num_hpd = 6;
4559 adev->mode_info.num_dig = 6;
cd4b356f 4560 break;
4562236b
HW
4561 case CHIP_FIJI:
4562 case CHIP_TONGA:
4563 adev->mode_info.num_crtc = 6;
4564 adev->mode_info.num_hpd = 6;
4565 adev->mode_info.num_dig = 7;
4562236b
HW
4566 break;
4567 case CHIP_CARRIZO:
4568 adev->mode_info.num_crtc = 3;
4569 adev->mode_info.num_hpd = 6;
4570 adev->mode_info.num_dig = 9;
4562236b
HW
4571 break;
4572 case CHIP_STONEY:
4573 adev->mode_info.num_crtc = 2;
4574 adev->mode_info.num_hpd = 6;
4575 adev->mode_info.num_dig = 9;
4562236b
HW
4576 break;
4577 case CHIP_POLARIS11:
b264d345 4578 case CHIP_POLARIS12:
4562236b
HW
4579 adev->mode_info.num_crtc = 5;
4580 adev->mode_info.num_hpd = 5;
4581 adev->mode_info.num_dig = 5;
4562236b
HW
4582 break;
4583 case CHIP_POLARIS10:
7737de91 4584 case CHIP_VEGAM:
4562236b
HW
4585 adev->mode_info.num_crtc = 6;
4586 adev->mode_info.num_hpd = 6;
4587 adev->mode_info.num_dig = 6;
4562236b 4588 break;
2c8ad2d5 4589 case CHIP_VEGA10:
2325ff30 4590 case CHIP_VEGA12:
1fe6bf2f 4591 case CHIP_VEGA20:
2c8ad2d5
AD
4592 adev->mode_info.num_crtc = 6;
4593 adev->mode_info.num_hpd = 6;
4594 adev->mode_info.num_dig = 6;
4595 break;
4562236b 4596 default:
cae5c1ab 4597
1d789535 4598 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4599 case IP_VERSION(2, 0, 2):
4600 case IP_VERSION(3, 0, 0):
4601 adev->mode_info.num_crtc = 6;
4602 adev->mode_info.num_hpd = 6;
4603 adev->mode_info.num_dig = 6;
4604 break;
4605 case IP_VERSION(2, 0, 0):
4606 case IP_VERSION(3, 0, 2):
4607 adev->mode_info.num_crtc = 5;
4608 adev->mode_info.num_hpd = 5;
4609 adev->mode_info.num_dig = 5;
4610 break;
4611 case IP_VERSION(2, 0, 3):
4612 case IP_VERSION(3, 0, 3):
4613 adev->mode_info.num_crtc = 2;
4614 adev->mode_info.num_hpd = 2;
4615 adev->mode_info.num_dig = 2;
4616 break;
559f591d
AD
4617 case IP_VERSION(1, 0, 0):
4618 case IP_VERSION(1, 0, 1):
c08182f2
AD
4619 case IP_VERSION(3, 0, 1):
4620 case IP_VERSION(2, 1, 0):
4621 case IP_VERSION(3, 1, 2):
4622 case IP_VERSION(3, 1, 3):
b5b8ed44 4623 case IP_VERSION(3, 1, 5):
de7cc1b4 4624 case IP_VERSION(3, 1, 6):
577359ca
AP
4625 case IP_VERSION(3, 2, 0):
4626 case IP_VERSION(3, 2, 1):
c08182f2
AD
4627 adev->mode_info.num_crtc = 4;
4628 adev->mode_info.num_hpd = 4;
4629 adev->mode_info.num_dig = 4;
4630 break;
4631 default:
2cbc6f42 4632 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4633 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4634 return -EINVAL;
c08182f2 4635 }
2cbc6f42 4636 break;
4562236b
HW
4637 }
4638
c8dd5715
MD
4639 amdgpu_dm_set_irq_funcs(adev);
4640
39cc5be2
AD
4641 if (adev->mode_info.funcs == NULL)
4642 adev->mode_info.funcs = &dm_display_funcs;
4643
1f6010a9
DF
4644 /*
4645 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4646 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4647 * amdgpu_device_init()
4648 */
4562236b
HW
4649#if defined(CONFIG_DEBUG_KERNEL_DC)
4650 device_create_file(
4a580877 4651 adev_to_drm(adev)->dev,
4562236b
HW
4652 &dev_attr_s3_debug);
4653#endif
4654
4655 return 0;
4656}
4657
9b690ef3 4658static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4659 struct dc_stream_state *new_stream,
4660 struct dc_stream_state *old_stream)
9b690ef3 4661{
2afda735 4662 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4663}
4664
4665static bool modereset_required(struct drm_crtc_state *crtc_state)
4666{
2afda735 4667 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4668}
4669
7578ecda 4670static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4671{
4672 drm_encoder_cleanup(encoder);
4673 kfree(encoder);
4674}
4675
4676static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4677 .destroy = amdgpu_dm_encoder_destroy,
4678};
4679
e7b07cee 4680
6300b3bd
MK
4681static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4682 struct drm_framebuffer *fb,
4683 int *min_downscale, int *max_upscale)
4684{
4685 struct amdgpu_device *adev = drm_to_adev(dev);
4686 struct dc *dc = adev->dm.dc;
4687 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4688 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4689
4690 switch (fb->format->format) {
4691 case DRM_FORMAT_P010:
4692 case DRM_FORMAT_NV12:
4693 case DRM_FORMAT_NV21:
4694 *max_upscale = plane_cap->max_upscale_factor.nv12;
4695 *min_downscale = plane_cap->max_downscale_factor.nv12;
4696 break;
4697
4698 case DRM_FORMAT_XRGB16161616F:
4699 case DRM_FORMAT_ARGB16161616F:
4700 case DRM_FORMAT_XBGR16161616F:
4701 case DRM_FORMAT_ABGR16161616F:
4702 *max_upscale = plane_cap->max_upscale_factor.fp16;
4703 *min_downscale = plane_cap->max_downscale_factor.fp16;
4704 break;
4705
4706 default:
4707 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4708 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4709 break;
4710 }
4711
4712 /*
4713 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4714 * scaling factor of 1.0 == 1000 units.
4715 */
4716 if (*max_upscale == 1)
4717 *max_upscale = 1000;
4718
4719 if (*min_downscale == 1)
4720 *min_downscale = 1000;
4721}
4722
4723
4375d625
S
4724static int fill_dc_scaling_info(struct amdgpu_device *adev,
4725 const struct drm_plane_state *state,
695af5f9 4726 struct dc_scaling_info *scaling_info)
e7b07cee 4727{
6300b3bd 4728 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4729
695af5f9 4730 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4731
695af5f9
NK
4732 /* Source is fixed 16.16 but we ignore mantissa for now... */
4733 scaling_info->src_rect.x = state->src_x >> 16;
4734 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4735
d89f6048
HW
4736 /*
4737 * For reasons we don't (yet) fully understand a non-zero
4738 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4739 * system hang on DCN1x.
4740 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4741 * let's reject both non-zero src_x and src_y.
4742 *
4743 * We currently know of only one use-case to reproduce a
4744 * scenario with non-zero src_x and src_y for NV12, which
4745 * is to gesture the YouTube Android app into full screen
4746 * on ChromeOS.
4747 */
4375d625
S
4748 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4749 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4750 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4751 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4752 return -EINVAL;
4753
695af5f9
NK
4754 scaling_info->src_rect.width = state->src_w >> 16;
4755 if (scaling_info->src_rect.width == 0)
4756 return -EINVAL;
4757
4758 scaling_info->src_rect.height = state->src_h >> 16;
4759 if (scaling_info->src_rect.height == 0)
4760 return -EINVAL;
4761
4762 scaling_info->dst_rect.x = state->crtc_x;
4763 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4764
4765 if (state->crtc_w == 0)
695af5f9 4766 return -EINVAL;
e7b07cee 4767
695af5f9 4768 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4769
4770 if (state->crtc_h == 0)
695af5f9 4771 return -EINVAL;
e7b07cee 4772
695af5f9 4773 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4774
695af5f9
NK
4775 /* DRM doesn't specify clipping on destination output. */
4776 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4777
6300b3bd
MK
4778 /* Validate scaling per-format with DC plane caps */
4779 if (state->plane && state->plane->dev && state->fb) {
4780 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4781 &min_downscale, &max_upscale);
4782 } else {
4783 min_downscale = 250;
4784 max_upscale = 16000;
4785 }
4786
6491f0c0
NK
4787 scale_w = scaling_info->dst_rect.width * 1000 /
4788 scaling_info->src_rect.width;
e7b07cee 4789
6300b3bd 4790 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4791 return -EINVAL;
4792
4793 scale_h = scaling_info->dst_rect.height * 1000 /
4794 scaling_info->src_rect.height;
4795
6300b3bd 4796 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4797 return -EINVAL;
4798
695af5f9
NK
4799 /*
4800 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4801 * assume reasonable defaults based on the format.
4802 */
e7b07cee 4803
695af5f9 4804 return 0;
4562236b 4805}
695af5f9 4806
a3241991
BN
4807static void
4808fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4809 uint64_t tiling_flags)
e7b07cee 4810{
a3241991
BN
4811 /* Fill GFX8 params */
4812 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4813 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4814
a3241991
BN
4815 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4816 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4817 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4818 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4819 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4820
a3241991
BN
4821 /* XXX fix me for VI */
4822 tiling_info->gfx8.num_banks = num_banks;
4823 tiling_info->gfx8.array_mode =
4824 DC_ARRAY_2D_TILED_THIN1;
4825 tiling_info->gfx8.tile_split = tile_split;
4826 tiling_info->gfx8.bank_width = bankw;
4827 tiling_info->gfx8.bank_height = bankh;
4828 tiling_info->gfx8.tile_aspect = mtaspect;
4829 tiling_info->gfx8.tile_mode =
4830 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4831 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4832 == DC_ARRAY_1D_TILED_THIN1) {
4833 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4834 }
4835
a3241991
BN
4836 tiling_info->gfx8.pipe_config =
4837 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4838}
4839
a3241991
BN
4840static void
4841fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4842 union dc_tiling_info *tiling_info)
4843{
4844 tiling_info->gfx9.num_pipes =
4845 adev->gfx.config.gb_addr_config_fields.num_pipes;
4846 tiling_info->gfx9.num_banks =
4847 adev->gfx.config.gb_addr_config_fields.num_banks;
4848 tiling_info->gfx9.pipe_interleave =
4849 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4850 tiling_info->gfx9.num_shader_engines =
4851 adev->gfx.config.gb_addr_config_fields.num_se;
4852 tiling_info->gfx9.max_compressed_frags =
4853 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4854 tiling_info->gfx9.num_rb_per_se =
4855 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4856 tiling_info->gfx9.shaderEnable = 1;
1d789535 4857 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4858 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4859}
4860
695af5f9 4861static int
a3241991
BN
4862validate_dcc(struct amdgpu_device *adev,
4863 const enum surface_pixel_format format,
4864 const enum dc_rotation_angle rotation,
4865 const union dc_tiling_info *tiling_info,
4866 const struct dc_plane_dcc_param *dcc,
4867 const struct dc_plane_address *address,
4868 const struct plane_size *plane_size)
7df7e505
NK
4869{
4870 struct dc *dc = adev->dm.dc;
8daa1218
NC
4871 struct dc_dcc_surface_param input;
4872 struct dc_surface_dcc_cap output;
7df7e505 4873
8daa1218
NC
4874 memset(&input, 0, sizeof(input));
4875 memset(&output, 0, sizeof(output));
4876
a3241991 4877 if (!dcc->enable)
87b7ebc2
RS
4878 return 0;
4879
a3241991
BN
4880 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4881 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4882 return -EINVAL;
7df7e505 4883
695af5f9 4884 input.format = format;
12e2b2d4
DL
4885 input.surface_size.width = plane_size->surface_size.width;
4886 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4887 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4888
695af5f9 4889 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4890 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4891 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4892 input.scan = SCAN_DIRECTION_VERTICAL;
4893
4894 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4895 return -EINVAL;
7df7e505
NK
4896
4897 if (!output.capable)
09e5665a 4898 return -EINVAL;
7df7e505 4899
a3241991
BN
4900 if (dcc->independent_64b_blks == 0 &&
4901 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4902 return -EINVAL;
7df7e505 4903
a3241991
BN
4904 return 0;
4905}
4906
37384b3f
BN
4907static bool
4908modifier_has_dcc(uint64_t modifier)
4909{
4910 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4911}
4912
4913static unsigned
4914modifier_gfx9_swizzle_mode(uint64_t modifier)
4915{
4916 if (modifier == DRM_FORMAT_MOD_LINEAR)
4917 return 0;
4918
4919 return AMD_FMT_MOD_GET(TILE, modifier);
4920}
4921
dfbbfe3c
BN
4922static const struct drm_format_info *
4923amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4924{
816853f9 4925 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4926}
4927
37384b3f
BN
4928static void
4929fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4930 union dc_tiling_info *tiling_info,
4931 uint64_t modifier)
4932{
4933 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4934 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4935 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
543036a2
AP
4936 unsigned int pipes_log2;
4937
4938 pipes_log2 = min(5u, mod_pipe_xor_bits);
37384b3f
BN
4939
4940 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4941
4942 if (!IS_AMD_FMT_MOD(modifier))
4943 return;
4944
4945 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4946 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4947
4948 if (adev->family >= AMDGPU_FAMILY_NV) {
4949 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4950 } else {
4951 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4952
4953 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4954 }
4955}
4956
faa37f54
BN
4957enum dm_micro_swizzle {
4958 MICRO_SWIZZLE_Z = 0,
4959 MICRO_SWIZZLE_S = 1,
4960 MICRO_SWIZZLE_D = 2,
4961 MICRO_SWIZZLE_R = 3
4962};
4963
4964static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4965 uint32_t format,
4966 uint64_t modifier)
4967{
4968 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4969 const struct drm_format_info *info = drm_format_info(format);
366e817e
BN
4970 int i;
4971
faa37f54
BN
4972 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4973
4974 if (!info)
4975 return false;
4976
4977 /*
fe180178
QZ
4978 * We always have to allow these modifiers:
4979 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4980 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4981 */
fe180178
QZ
4982 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4983 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4984 return true;
fe180178 4985 }
faa37f54 4986
366e817e
BN
4987 /* Check that the modifier is on the list of the plane's supported modifiers. */
4988 for (i = 0; i < plane->modifier_count; i++) {
4989 if (modifier == plane->modifiers[i])
fe180178
QZ
4990 break;
4991 }
366e817e
BN
4992 if (i == plane->modifier_count)
4993 return false;
faa37f54
BN
4994
4995 /*
4996 * For D swizzle the canonical modifier depends on the bpp, so check
4997 * it here.
4998 */
4999 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5000 adev->family >= AMDGPU_FAMILY_NV) {
5001 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5002 return false;
5003 }
5004
5005 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5006 info->cpp[0] < 8)
5007 return false;
5008
5009 if (modifier_has_dcc(modifier)) {
5010 /* Per radeonsi comments 16/64 bpp are more complicated. */
5011 if (info->cpp[0] != 4)
5012 return false;
951796f2
SS
5013 /* We support multi-planar formats, but not when combined with
5014 * additional DCC metadata planes. */
5015 if (info->num_planes > 1)
5016 return false;
faa37f54
BN
5017 }
5018
5019 return true;
5020}
5021
5022static void
5023add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5024{
5025 if (!*mods)
5026 return;
5027
5028 if (*cap - *size < 1) {
5029 uint64_t new_cap = *cap * 2;
5030 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5031
5032 if (!new_mods) {
5033 kfree(*mods);
5034 *mods = NULL;
5035 return;
5036 }
5037
5038 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5039 kfree(*mods);
5040 *mods = new_mods;
5041 *cap = new_cap;
5042 }
5043
5044 (*mods)[*size] = mod;
5045 *size += 1;
5046}
5047
5048static void
5049add_gfx9_modifiers(const struct amdgpu_device *adev,
5050 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5051{
5052 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5053 int pipe_xor_bits = min(8, pipes +
5054 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5055 int bank_xor_bits = min(8 - pipe_xor_bits,
5056 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5057 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5058 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5059
5060
5061 if (adev->family == AMDGPU_FAMILY_RV) {
5062 /* Raven2 and later */
5063 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5064
5065 /*
5066 * No _D DCC swizzles yet because we only allow 32bpp, which
5067 * doesn't support _D on DCN
5068 */
5069
5070 if (has_constant_encode) {
5071 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5072 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5073 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5074 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5075 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5076 AMD_FMT_MOD_SET(DCC, 1) |
5077 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5078 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5079 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5080 }
5081
5082 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5083 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5084 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5085 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5086 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5087 AMD_FMT_MOD_SET(DCC, 1) |
5088 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5089 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5090 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5091
5092 if (has_constant_encode) {
5093 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5095 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5096 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5098 AMD_FMT_MOD_SET(DCC, 1) |
5099 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5100 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5101 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5102
5103 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5104 AMD_FMT_MOD_SET(RB, rb) |
5105 AMD_FMT_MOD_SET(PIPE, pipes));
5106 }
5107
5108 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5110 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5111 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5112 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5113 AMD_FMT_MOD_SET(DCC, 1) |
5114 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5115 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5116 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5117 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5118 AMD_FMT_MOD_SET(RB, rb) |
5119 AMD_FMT_MOD_SET(PIPE, pipes));
5120 }
5121
5122 /*
5123 * Only supported for 64bpp on Raven, will be filtered on format in
5124 * dm_plane_format_mod_supported.
5125 */
5126 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5128 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5129 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5130 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5131
5132 if (adev->family == AMDGPU_FAMILY_RV) {
5133 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5134 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5135 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5136 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5137 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5138 }
5139
5140 /*
5141 * Only supported for 64bpp on Raven, will be filtered on format in
5142 * dm_plane_format_mod_supported.
5143 */
5144 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5146 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5147
5148 if (adev->family == AMDGPU_FAMILY_RV) {
5149 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5150 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5151 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5152 }
5153}
5154
5155static void
5156add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5157 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5158{
5159 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5160
5161 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5162 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5163 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5164 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5165 AMD_FMT_MOD_SET(DCC, 1) |
5166 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5167 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5168 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5169
5170 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5171 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5172 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5173 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5174 AMD_FMT_MOD_SET(DCC, 1) |
5175 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5176 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5177 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5178 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5179
5180 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5181 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5182 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5183 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5184
5185 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5186 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5187 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5188 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5189
5190
5191 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5192 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5194 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5195
5196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5199}
5200
5201static void
5202add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5203 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5204{
5205 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5206 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5207
5208 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5209 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5210 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5211 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5212 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5213 AMD_FMT_MOD_SET(DCC, 1) |
5214 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5215 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5216 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5217 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5218
7f6ab50a
JA
5219 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5220 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5221 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5222 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5223 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5224 AMD_FMT_MOD_SET(DCC, 1) |
5225 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5226 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5227 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5228
faa37f54
BN
5229 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5230 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5231 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5232 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5233 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5234 AMD_FMT_MOD_SET(DCC, 1) |
5235 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5236 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5237 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5238 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5239 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5240
7f6ab50a
JA
5241 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5242 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5243 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5244 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5245 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5246 AMD_FMT_MOD_SET(DCC, 1) |
5247 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5248 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5249 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5250 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5251
faa37f54
BN
5252 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5253 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5254 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5255 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5256 AMD_FMT_MOD_SET(PACKERS, pkrs));
5257
5258 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5259 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5260 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5261 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5262 AMD_FMT_MOD_SET(PACKERS, pkrs));
5263
5264 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5265 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5266 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5267 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5268
5269 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5270 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5271 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5272}
5273
543036a2
AP
5274static void
5275add_gfx11_modifiers(struct amdgpu_device *adev,
5276 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5277{
5278 int num_pipes = 0;
5279 int pipe_xor_bits = 0;
5280 int num_pkrs = 0;
5281 int pkrs = 0;
5282 u32 gb_addr_config;
ff15cea3 5283 u8 i = 0;
543036a2
AP
5284 unsigned swizzle_r_x;
5285 uint64_t modifier_r_x;
5286 uint64_t modifier_dcc_best;
5287 uint64_t modifier_dcc_4k;
5288
5289 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5290 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5291 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5292 ASSERT(gb_addr_config != 0);
5293
5294 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5295 pkrs = ilog2(num_pkrs);
5296 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5297 pipe_xor_bits = ilog2(num_pipes);
5298
ff15cea3
AP
5299 for (i = 0; i < 2; i++) {
5300 /* Insert the best one first. */
5301 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5302 if (num_pipes > 16)
5303 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5304 else
5305 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5306
5307 modifier_r_x = AMD_FMT_MOD |
5308 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5309 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5310 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5311 AMD_FMT_MOD_SET(PACKERS, pkrs);
5312
5313 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5314 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5315 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5316 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5317 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5318
5319 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5320 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5321 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5322 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5323 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5324
5325 add_modifier(mods, size, capacity, modifier_dcc_best);
5326 add_modifier(mods, size, capacity, modifier_dcc_4k);
5327
5328 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5329 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5330
5331 add_modifier(mods, size, capacity, modifier_r_x);
5332 }
543036a2
AP
5333
5334 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5335 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5336 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5337}
5338
faa37f54 5339static int
543036a2 5340get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
faa37f54
BN
5341{
5342 uint64_t size = 0, capacity = 128;
5343 *mods = NULL;
5344
5345 /* We have not hooked up any pre-GFX9 modifiers. */
5346 if (adev->family < AMDGPU_FAMILY_AI)
5347 return 0;
5348
5349 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5350
5351 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5352 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5353 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5354 return *mods ? 0 : -ENOMEM;
5355 }
5356
5357 switch (adev->family) {
5358 case AMDGPU_FAMILY_AI:
5359 case AMDGPU_FAMILY_RV:
5360 add_gfx9_modifiers(adev, mods, &size, &capacity);
5361 break;
5362 case AMDGPU_FAMILY_NV:
5363 case AMDGPU_FAMILY_VGH:
1ebcaebd 5364 case AMDGPU_FAMILY_YC:
b5b8ed44 5365 case AMDGPU_FAMILY_GC_10_3_6:
de7cc1b4 5366 case AMDGPU_FAMILY_GC_10_3_7:
1d789535 5367 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5368 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5369 else
5370 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5371 break;
543036a2
AP
5372 case AMDGPU_FAMILY_GC_11_0_0:
5373 add_gfx11_modifiers(adev, mods, &size, &capacity);
5374 break;
faa37f54
BN
5375 }
5376
5377 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5378
5379 /* INVALID marks the end of the list. */
5380 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5381
5382 if (!*mods)
5383 return -ENOMEM;
5384
5385 return 0;
5386}
5387
37384b3f
BN
5388static int
5389fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5390 const struct amdgpu_framebuffer *afb,
5391 const enum surface_pixel_format format,
5392 const enum dc_rotation_angle rotation,
5393 const struct plane_size *plane_size,
5394 union dc_tiling_info *tiling_info,
5395 struct dc_plane_dcc_param *dcc,
5396 struct dc_plane_address *address,
5397 const bool force_disable_dcc)
5398{
5399 const uint64_t modifier = afb->base.modifier;
2be7f77f 5400 int ret = 0;
37384b3f
BN
5401
5402 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5403 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5404
5405 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5406 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5407 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5408 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5409
5410 dcc->enable = 1;
5411 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5412 dcc->independent_64b_blks = independent_64b_blks;
543036a2 5413 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
a86396c3 5414 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5415 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5416 else if (independent_128b_blks)
5417 dcc->dcc_ind_blk = hubp_ind_block_128b;
5418 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5419 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5420 else
5421 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5422 } else {
5423 if (independent_64b_blks)
5424 dcc->dcc_ind_blk = hubp_ind_block_64b;
5425 else
5426 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5427 }
37384b3f
BN
5428
5429 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5430 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5431 }
5432
5433 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5434 if (ret)
2be7f77f 5435 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5436
2be7f77f 5437 return ret;
09e5665a
NK
5438}
5439
5440static int
320932bf 5441fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5442 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5443 const enum surface_pixel_format format,
5444 const enum dc_rotation_angle rotation,
5445 const uint64_t tiling_flags,
09e5665a 5446 union dc_tiling_info *tiling_info,
12e2b2d4 5447 struct plane_size *plane_size,
09e5665a 5448 struct dc_plane_dcc_param *dcc,
87b7ebc2 5449 struct dc_plane_address *address,
5888f07a 5450 bool tmz_surface,
87b7ebc2 5451 bool force_disable_dcc)
09e5665a 5452{
320932bf 5453 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5454 int ret;
5455
5456 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5457 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5458 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5459 memset(address, 0, sizeof(*address));
5460
5888f07a
HW
5461 address->tmz_surface = tmz_surface;
5462
695af5f9 5463 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5464 uint64_t addr = afb->address + fb->offsets[0];
5465
12e2b2d4
DL
5466 plane_size->surface_size.x = 0;
5467 plane_size->surface_size.y = 0;
5468 plane_size->surface_size.width = fb->width;
5469 plane_size->surface_size.height = fb->height;
5470 plane_size->surface_pitch =
320932bf
NK
5471 fb->pitches[0] / fb->format->cpp[0];
5472
e0634e8d 5473 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5474 address->grph.addr.low_part = lower_32_bits(addr);
5475 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5476 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5477 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5478 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5479
12e2b2d4
DL
5480 plane_size->surface_size.x = 0;
5481 plane_size->surface_size.y = 0;
5482 plane_size->surface_size.width = fb->width;
5483 plane_size->surface_size.height = fb->height;
5484 plane_size->surface_pitch =
320932bf
NK
5485 fb->pitches[0] / fb->format->cpp[0];
5486
12e2b2d4
DL
5487 plane_size->chroma_size.x = 0;
5488 plane_size->chroma_size.y = 0;
320932bf 5489 /* TODO: set these based on surface format */
12e2b2d4
DL
5490 plane_size->chroma_size.width = fb->width / 2;
5491 plane_size->chroma_size.height = fb->height / 2;
320932bf 5492
12e2b2d4 5493 plane_size->chroma_pitch =
320932bf
NK
5494 fb->pitches[1] / fb->format->cpp[1];
5495
e0634e8d
NK
5496 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5497 address->video_progressive.luma_addr.low_part =
be7b9b32 5498 lower_32_bits(luma_addr);
e0634e8d 5499 address->video_progressive.luma_addr.high_part =
be7b9b32 5500 upper_32_bits(luma_addr);
e0634e8d
NK
5501 address->video_progressive.chroma_addr.low_part =
5502 lower_32_bits(chroma_addr);
5503 address->video_progressive.chroma_addr.high_part =
5504 upper_32_bits(chroma_addr);
5505 }
09e5665a 5506
a3241991 5507 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5508 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5509 rotation, plane_size,
5510 tiling_info, dcc,
5511 address,
5512 force_disable_dcc);
09e5665a
NK
5513 if (ret)
5514 return ret;
a3241991
BN
5515 } else {
5516 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5517 }
5518
5519 return 0;
7df7e505
NK
5520}
5521
d74004b6 5522static void
695af5f9 5523fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
76818cdd
SJK
5524 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5525 bool *global_alpha, int *global_alpha_value)
d74004b6
NK
5526{
5527 *per_pixel_alpha = false;
76818cdd 5528 *pre_multiplied_alpha = true;
d74004b6
NK
5529 *global_alpha = false;
5530 *global_alpha_value = 0xff;
5531
5532 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5533 return;
5534
76818cdd
SJK
5535 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5536 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
d74004b6
NK
5537 static const uint32_t alpha_formats[] = {
5538 DRM_FORMAT_ARGB8888,
5539 DRM_FORMAT_RGBA8888,
5540 DRM_FORMAT_ABGR8888,
5541 };
5542 uint32_t format = plane_state->fb->format->format;
5543 unsigned int i;
5544
5545 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5546 if (format == alpha_formats[i]) {
5547 *per_pixel_alpha = true;
5548 break;
5549 }
5550 }
76818cdd
SJK
5551
5552 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5553 *pre_multiplied_alpha = false;
d74004b6
NK
5554 }
5555
5556 if (plane_state->alpha < 0xffff) {
5557 *global_alpha = true;
5558 *global_alpha_value = plane_state->alpha >> 8;
5559 }
5560}
5561
004fefa3
NK
5562static int
5563fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5564 const enum surface_pixel_format format,
004fefa3
NK
5565 enum dc_color_space *color_space)
5566{
5567 bool full_range;
5568
5569 *color_space = COLOR_SPACE_SRGB;
5570
5571 /* DRM color properties only affect non-RGB formats. */
695af5f9 5572 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5573 return 0;
5574
5575 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5576
5577 switch (plane_state->color_encoding) {
5578 case DRM_COLOR_YCBCR_BT601:
5579 if (full_range)
5580 *color_space = COLOR_SPACE_YCBCR601;
5581 else
5582 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5583 break;
5584
5585 case DRM_COLOR_YCBCR_BT709:
5586 if (full_range)
5587 *color_space = COLOR_SPACE_YCBCR709;
5588 else
5589 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5590 break;
5591
5592 case DRM_COLOR_YCBCR_BT2020:
5593 if (full_range)
5594 *color_space = COLOR_SPACE_2020_YCBCR;
5595 else
5596 return -EINVAL;
5597 break;
5598
5599 default:
5600 return -EINVAL;
5601 }
5602
5603 return 0;
5604}
5605
695af5f9
NK
5606static int
5607fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5608 const struct drm_plane_state *plane_state,
5609 const uint64_t tiling_flags,
5610 struct dc_plane_info *plane_info,
87b7ebc2 5611 struct dc_plane_address *address,
5888f07a 5612 bool tmz_surface,
87b7ebc2 5613 bool force_disable_dcc)
695af5f9
NK
5614{
5615 const struct drm_framebuffer *fb = plane_state->fb;
5616 const struct amdgpu_framebuffer *afb =
5617 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5618 int ret;
5619
5620 memset(plane_info, 0, sizeof(*plane_info));
5621
5622 switch (fb->format->format) {
5623 case DRM_FORMAT_C8:
5624 plane_info->format =
5625 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5626 break;
5627 case DRM_FORMAT_RGB565:
5628 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5629 break;
5630 case DRM_FORMAT_XRGB8888:
5631 case DRM_FORMAT_ARGB8888:
5632 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5633 break;
5634 case DRM_FORMAT_XRGB2101010:
5635 case DRM_FORMAT_ARGB2101010:
5636 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5637 break;
5638 case DRM_FORMAT_XBGR2101010:
5639 case DRM_FORMAT_ABGR2101010:
5640 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5641 break;
5642 case DRM_FORMAT_XBGR8888:
5643 case DRM_FORMAT_ABGR8888:
5644 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5645 break;
5646 case DRM_FORMAT_NV21:
5647 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5648 break;
5649 case DRM_FORMAT_NV12:
5650 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5651 break;
cbec6477
SW
5652 case DRM_FORMAT_P010:
5653 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5654 break;
492548dc
SW
5655 case DRM_FORMAT_XRGB16161616F:
5656 case DRM_FORMAT_ARGB16161616F:
5657 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5658 break;
2a5195dc
MK
5659 case DRM_FORMAT_XBGR16161616F:
5660 case DRM_FORMAT_ABGR16161616F:
5661 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5662 break;
58020403
MK
5663 case DRM_FORMAT_XRGB16161616:
5664 case DRM_FORMAT_ARGB16161616:
5665 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5666 break;
5667 case DRM_FORMAT_XBGR16161616:
5668 case DRM_FORMAT_ABGR16161616:
5669 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5670 break;
695af5f9
NK
5671 default:
5672 DRM_ERROR(
92f1d09c
SA
5673 "Unsupported screen format %p4cc\n",
5674 &fb->format->format);
695af5f9
NK
5675 return -EINVAL;
5676 }
5677
5678 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5679 case DRM_MODE_ROTATE_0:
5680 plane_info->rotation = ROTATION_ANGLE_0;
5681 break;
5682 case DRM_MODE_ROTATE_90:
5683 plane_info->rotation = ROTATION_ANGLE_90;
5684 break;
5685 case DRM_MODE_ROTATE_180:
5686 plane_info->rotation = ROTATION_ANGLE_180;
5687 break;
5688 case DRM_MODE_ROTATE_270:
5689 plane_info->rotation = ROTATION_ANGLE_270;
5690 break;
5691 default:
5692 plane_info->rotation = ROTATION_ANGLE_0;
5693 break;
5694 }
5695
5696 plane_info->visible = true;
5697 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5698
6d83a32d
MS
5699 plane_info->layer_index = 0;
5700
695af5f9
NK
5701 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5702 &plane_info->color_space);
5703 if (ret)
5704 return ret;
5705
5706 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5707 plane_info->rotation, tiling_flags,
5708 &plane_info->tiling_info,
5709 &plane_info->plane_size,
5888f07a 5710 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5711 force_disable_dcc);
695af5f9
NK
5712 if (ret)
5713 return ret;
5714
5715 fill_blending_from_plane_state(
76818cdd 5716 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
695af5f9
NK
5717 &plane_info->global_alpha, &plane_info->global_alpha_value);
5718
5719 return 0;
5720}
5721
5722static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5723 struct dc_plane_state *dc_plane_state,
5724 struct drm_plane_state *plane_state,
5725 struct drm_crtc_state *crtc_state)
e7b07cee 5726{
cf020d49 5727 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5728 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5729 struct dc_scaling_info scaling_info;
5730 struct dc_plane_info plane_info;
695af5f9 5731 int ret;
87b7ebc2 5732 bool force_disable_dcc = false;
e7b07cee 5733
4375d625 5734 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5735 if (ret)
5736 return ret;
e7b07cee 5737
695af5f9
NK
5738 dc_plane_state->src_rect = scaling_info.src_rect;
5739 dc_plane_state->dst_rect = scaling_info.dst_rect;
5740 dc_plane_state->clip_rect = scaling_info.clip_rect;
5741 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5742
87b7ebc2 5743 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5744 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5745 afb->tiling_flags,
695af5f9 5746 &plane_info,
87b7ebc2 5747 &dc_plane_state->address,
6eed95b0 5748 afb->tmz_surface,
87b7ebc2 5749 force_disable_dcc);
004fefa3
NK
5750 if (ret)
5751 return ret;
5752
695af5f9
NK
5753 dc_plane_state->format = plane_info.format;
5754 dc_plane_state->color_space = plane_info.color_space;
5755 dc_plane_state->format = plane_info.format;
5756 dc_plane_state->plane_size = plane_info.plane_size;
5757 dc_plane_state->rotation = plane_info.rotation;
5758 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5759 dc_plane_state->stereo_format = plane_info.stereo_format;
5760 dc_plane_state->tiling_info = plane_info.tiling_info;
5761 dc_plane_state->visible = plane_info.visible;
5762 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
76818cdd 5763 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
695af5f9
NK
5764 dc_plane_state->global_alpha = plane_info.global_alpha;
5765 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5766 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5767 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5768 dc_plane_state->flip_int_enabled = true;
695af5f9 5769
e277adc5
LSL
5770 /*
5771 * Always set input transfer function, since plane state is refreshed
5772 * every time.
5773 */
cf020d49
NK
5774 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5775 if (ret)
5776 return ret;
e7b07cee 5777
cf020d49 5778 return 0;
e7b07cee
HW
5779}
5780
7cc191ee
LL
5781/**
5782 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5783 *
5784 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5785 * remote fb
5786 * @old_plane_state: Old state of @plane
5787 * @new_plane_state: New state of @plane
5788 * @crtc_state: New state of CRTC connected to the @plane
5789 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5790 *
5791 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5792 * (referred to as "damage clips" in DRM nomenclature) that require updating on
5793 * the eDP remote buffer. The responsibility of specifying the dirty regions is
5794 * amdgpu_dm's.
5795 *
5796 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5797 * plane with regions that require flushing to the eDP remote buffer. In
5798 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5799 * implicitly provide damage clips without any client support via the plane
5800 * bounds.
5801 *
5802 * Today, amdgpu_dm only supports the MPO and cursor usecase.
5803 *
5804 * TODO: Also enable for FB_DAMAGE_CLIPS
5805 */
5806static void fill_dc_dirty_rects(struct drm_plane *plane,
5807 struct drm_plane_state *old_plane_state,
5808 struct drm_plane_state *new_plane_state,
5809 struct drm_crtc_state *crtc_state,
5810 struct dc_flip_addrs *flip_addrs)
5811{
5812 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5813 struct rect *dirty_rects = flip_addrs->dirty_rects;
5814 uint32_t num_clips;
5815 bool bb_changed;
5816 bool fb_changed;
5817 uint32_t i = 0;
5818
5819 flip_addrs->dirty_rect_count = 0;
5820
5821 /*
5822 * Cursor plane has it's own dirty rect update interface. See
5823 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5824 */
5825 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5826 return;
5827
5828 /*
5829 * Today, we only consider MPO use-case for PSR SU. If MPO not
5830 * requested, and there is a plane update, do FFU.
5831 */
5832 if (!dm_crtc_state->mpo_requested) {
5833 dirty_rects[0].x = 0;
5834 dirty_rects[0].y = 0;
5835 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5836 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5837 flip_addrs->dirty_rect_count = 1;
5838 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5839 new_plane_state->plane->base.id,
5840 dm_crtc_state->base.mode.crtc_hdisplay,
5841 dm_crtc_state->base.mode.crtc_vdisplay);
5842 return;
5843 }
5844
5845 /*
5846 * MPO is requested. Add entire plane bounding box to dirty rects if
5847 * flipped to or damaged.
5848 *
5849 * If plane is moved or resized, also add old bounding box to dirty
5850 * rects.
5851 */
5852 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5853 fb_changed = old_plane_state->fb->base.id !=
5854 new_plane_state->fb->base.id;
5855 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5856 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5857 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5858 old_plane_state->crtc_h != new_plane_state->crtc_h);
5859
5860 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5861 new_plane_state->plane->base.id,
5862 bb_changed, fb_changed, num_clips);
5863
5864 if (num_clips || fb_changed || bb_changed) {
5865 dirty_rects[i].x = new_plane_state->crtc_x;
5866 dirty_rects[i].y = new_plane_state->crtc_y;
5867 dirty_rects[i].width = new_plane_state->crtc_w;
5868 dirty_rects[i].height = new_plane_state->crtc_h;
5869 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5870 new_plane_state->plane->base.id,
5871 dirty_rects[i].x, dirty_rects[i].y,
5872 dirty_rects[i].width, dirty_rects[i].height);
5873 i += 1;
5874 }
5875
5876 /* Add old plane bounding-box if plane is moved or resized */
5877 if (bb_changed) {
5878 dirty_rects[i].x = old_plane_state->crtc_x;
5879 dirty_rects[i].y = old_plane_state->crtc_y;
5880 dirty_rects[i].width = old_plane_state->crtc_w;
5881 dirty_rects[i].height = old_plane_state->crtc_h;
5882 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5883 old_plane_state->plane->base.id,
5884 dirty_rects[i].x, dirty_rects[i].y,
5885 dirty_rects[i].width, dirty_rects[i].height);
5886 i += 1;
5887 }
5888
5889 flip_addrs->dirty_rect_count = i;
5890}
5891
3ee6b26b
AD
5892static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5893 const struct dm_connector_state *dm_state,
5894 struct dc_stream_state *stream)
e7b07cee
HW
5895{
5896 enum amdgpu_rmx_type rmx_type;
5897
5898 struct rect src = { 0 }; /* viewport in composition space*/
5899 struct rect dst = { 0 }; /* stream addressable area */
5900
5901 /* no mode. nothing to be done */
5902 if (!mode)
5903 return;
5904
5905 /* Full screen scaling by default */
5906 src.width = mode->hdisplay;
5907 src.height = mode->vdisplay;
5908 dst.width = stream->timing.h_addressable;
5909 dst.height = stream->timing.v_addressable;
5910
f4791779
HW
5911 if (dm_state) {
5912 rmx_type = dm_state->scaling;
5913 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5914 if (src.width * dst.height <
5915 src.height * dst.width) {
5916 /* height needs less upscaling/more downscaling */
5917 dst.width = src.width *
5918 dst.height / src.height;
5919 } else {
5920 /* width needs less upscaling/more downscaling */
5921 dst.height = src.height *
5922 dst.width / src.width;
5923 }
5924 } else if (rmx_type == RMX_CENTER) {
5925 dst = src;
e7b07cee 5926 }
e7b07cee 5927
f4791779
HW
5928 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5929 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5930
f4791779
HW
5931 if (dm_state->underscan_enable) {
5932 dst.x += dm_state->underscan_hborder / 2;
5933 dst.y += dm_state->underscan_vborder / 2;
5934 dst.width -= dm_state->underscan_hborder;
5935 dst.height -= dm_state->underscan_vborder;
5936 }
e7b07cee
HW
5937 }
5938
5939 stream->src = src;
5940 stream->dst = dst;
5941
4711c033
LT
5942 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5943 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5944
5945}
5946
3ee6b26b 5947static enum dc_color_depth
42ba01fc 5948convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5949 bool is_y420, int requested_bpc)
e7b07cee 5950{
1bc22f20 5951 uint8_t bpc;
01c22997 5952
1bc22f20
SW
5953 if (is_y420) {
5954 bpc = 8;
5955
5956 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5957 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5958 bpc = 16;
5959 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5960 bpc = 12;
5961 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5962 bpc = 10;
5963 } else {
5964 bpc = (uint8_t)connector->display_info.bpc;
5965 /* Assume 8 bpc by default if no bpc is specified. */
5966 bpc = bpc ? bpc : 8;
5967 }
e7b07cee 5968
cbd14ae7 5969 if (requested_bpc > 0) {
01c22997
NK
5970 /*
5971 * Cap display bpc based on the user requested value.
5972 *
5973 * The value for state->max_bpc may not correctly updated
5974 * depending on when the connector gets added to the state
5975 * or if this was called outside of atomic check, so it
5976 * can't be used directly.
5977 */
cbd14ae7 5978 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5979
1825fd34
NK
5980 /* Round down to the nearest even number. */
5981 bpc = bpc - (bpc & 1);
5982 }
07e3a1cf 5983
e7b07cee
HW
5984 switch (bpc) {
5985 case 0:
1f6010a9
DF
5986 /*
5987 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5988 * EDID revision before 1.4
5989 * TODO: Fix edid parsing
5990 */
5991 return COLOR_DEPTH_888;
5992 case 6:
5993 return COLOR_DEPTH_666;
5994 case 8:
5995 return COLOR_DEPTH_888;
5996 case 10:
5997 return COLOR_DEPTH_101010;
5998 case 12:
5999 return COLOR_DEPTH_121212;
6000 case 14:
6001 return COLOR_DEPTH_141414;
6002 case 16:
6003 return COLOR_DEPTH_161616;
6004 default:
6005 return COLOR_DEPTH_UNDEFINED;
6006 }
6007}
6008
3ee6b26b
AD
6009static enum dc_aspect_ratio
6010get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 6011{
e11d4147
LSL
6012 /* 1-1 mapping, since both enums follow the HDMI spec. */
6013 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
6014}
6015
3ee6b26b
AD
6016static enum dc_color_space
6017get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
6018{
6019 enum dc_color_space color_space = COLOR_SPACE_SRGB;
6020
6021 switch (dc_crtc_timing->pixel_encoding) {
6022 case PIXEL_ENCODING_YCBCR422:
6023 case PIXEL_ENCODING_YCBCR444:
6024 case PIXEL_ENCODING_YCBCR420:
6025 {
6026 /*
6027 * 27030khz is the separation point between HDTV and SDTV
6028 * according to HDMI spec, we use YCbCr709 and YCbCr601
6029 * respectively
6030 */
380604e2 6031 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
6032 if (dc_crtc_timing->flags.Y_ONLY)
6033 color_space =
6034 COLOR_SPACE_YCBCR709_LIMITED;
6035 else
6036 color_space = COLOR_SPACE_YCBCR709;
6037 } else {
6038 if (dc_crtc_timing->flags.Y_ONLY)
6039 color_space =
6040 COLOR_SPACE_YCBCR601_LIMITED;
6041 else
6042 color_space = COLOR_SPACE_YCBCR601;
6043 }
6044
6045 }
6046 break;
6047 case PIXEL_ENCODING_RGB:
6048 color_space = COLOR_SPACE_SRGB;
6049 break;
6050
6051 default:
6052 WARN_ON(1);
6053 break;
6054 }
6055
6056 return color_space;
6057}
6058
ea117312
TA
6059static bool adjust_colour_depth_from_display_info(
6060 struct dc_crtc_timing *timing_out,
6061 const struct drm_display_info *info)
400443e8 6062{
ea117312 6063 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 6064 int normalized_clk;
400443e8 6065 do {
380604e2 6066 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
6067 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6068 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6069 normalized_clk /= 2;
6070 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
6071 switch (depth) {
6072 case COLOR_DEPTH_888:
6073 break;
400443e8
ML
6074 case COLOR_DEPTH_101010:
6075 normalized_clk = (normalized_clk * 30) / 24;
6076 break;
6077 case COLOR_DEPTH_121212:
6078 normalized_clk = (normalized_clk * 36) / 24;
6079 break;
6080 case COLOR_DEPTH_161616:
6081 normalized_clk = (normalized_clk * 48) / 24;
6082 break;
6083 default:
ea117312
TA
6084 /* The above depths are the only ones valid for HDMI. */
6085 return false;
400443e8 6086 }
ea117312
TA
6087 if (normalized_clk <= info->max_tmds_clock) {
6088 timing_out->display_color_depth = depth;
6089 return true;
6090 }
6091 } while (--depth > COLOR_DEPTH_666);
6092 return false;
400443e8 6093}
e7b07cee 6094
42ba01fc
NK
6095static void fill_stream_properties_from_drm_display_mode(
6096 struct dc_stream_state *stream,
6097 const struct drm_display_mode *mode_in,
6098 const struct drm_connector *connector,
6099 const struct drm_connector_state *connector_state,
cbd14ae7
SW
6100 const struct dc_stream_state *old_stream,
6101 int requested_bpc)
e7b07cee
HW
6102{
6103 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 6104 const struct drm_display_info *info = &connector->display_info;
d4252eee 6105 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
6106 struct hdmi_vendor_infoframe hv_frame;
6107 struct hdmi_avi_infoframe avi_frame;
e7b07cee 6108
acf83f86
WL
6109 memset(&hv_frame, 0, sizeof(hv_frame));
6110 memset(&avi_frame, 0, sizeof(avi_frame));
6111
e7b07cee
HW
6112 timing_out->h_border_left = 0;
6113 timing_out->h_border_right = 0;
6114 timing_out->v_border_top = 0;
6115 timing_out->v_border_bottom = 0;
6116 /* TODO: un-hardcode */
fe61a2f1 6117 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 6118 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 6119 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
6120 else if (drm_mode_is_420_also(info, mode_in)
6121 && aconnector->force_yuv420_output)
6122 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
c03d0b52 6123 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
ceb3dbb4 6124 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
6125 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6126 else
6127 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6128
6129 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6130 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
6131 connector,
6132 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6133 requested_bpc);
e7b07cee
HW
6134 timing_out->scan_type = SCANNING_TYPE_NODATA;
6135 timing_out->hdmi_vic = 0;
b333730d
BL
6136
6137 if(old_stream) {
6138 timing_out->vic = old_stream->timing.vic;
6139 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6140 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6141 } else {
6142 timing_out->vic = drm_match_cea_mode(mode_in);
6143 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6144 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6145 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6146 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6147 }
e7b07cee 6148
1cb1d477
WL
6149 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6150 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6151 timing_out->vic = avi_frame.video_code;
6152 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6153 timing_out->hdmi_vic = hv_frame.vic;
6154 }
6155
fe8858bb
NC
6156 if (is_freesync_video_mode(mode_in, aconnector)) {
6157 timing_out->h_addressable = mode_in->hdisplay;
6158 timing_out->h_total = mode_in->htotal;
6159 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6160 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6161 timing_out->v_total = mode_in->vtotal;
6162 timing_out->v_addressable = mode_in->vdisplay;
6163 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6164 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6165 timing_out->pix_clk_100hz = mode_in->clock * 10;
6166 } else {
6167 timing_out->h_addressable = mode_in->crtc_hdisplay;
6168 timing_out->h_total = mode_in->crtc_htotal;
6169 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6170 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6171 timing_out->v_total = mode_in->crtc_vtotal;
6172 timing_out->v_addressable = mode_in->crtc_vdisplay;
6173 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6174 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6175 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6176 }
a85ba005 6177
e7b07cee 6178 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
6179
6180 stream->output_color_space = get_output_color_space(timing_out);
6181
e43a432c
AK
6182 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6183 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
6184 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6185 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6186 drm_mode_is_420_also(info, mode_in) &&
6187 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6188 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6189 adjust_colour_depth_from_display_info(timing_out, info);
6190 }
6191 }
e7b07cee
HW
6192}
6193
3ee6b26b
AD
6194static void fill_audio_info(struct audio_info *audio_info,
6195 const struct drm_connector *drm_connector,
6196 const struct dc_sink *dc_sink)
e7b07cee
HW
6197{
6198 int i = 0;
6199 int cea_revision = 0;
6200 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6201
6202 audio_info->manufacture_id = edid_caps->manufacturer_id;
6203 audio_info->product_id = edid_caps->product_id;
6204
6205 cea_revision = drm_connector->display_info.cea_rev;
6206
090afc1e 6207 strscpy(audio_info->display_name,
d2b2562c 6208 edid_caps->display_name,
090afc1e 6209 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 6210
b830ebc9 6211 if (cea_revision >= 3) {
e7b07cee
HW
6212 audio_info->mode_count = edid_caps->audio_mode_count;
6213
6214 for (i = 0; i < audio_info->mode_count; ++i) {
6215 audio_info->modes[i].format_code =
6216 (enum audio_format_code)
6217 (edid_caps->audio_modes[i].format_code);
6218 audio_info->modes[i].channel_count =
6219 edid_caps->audio_modes[i].channel_count;
6220 audio_info->modes[i].sample_rates.all =
6221 edid_caps->audio_modes[i].sample_rate;
6222 audio_info->modes[i].sample_size =
6223 edid_caps->audio_modes[i].sample_size;
6224 }
6225 }
6226
6227 audio_info->flags.all = edid_caps->speaker_flags;
6228
6229 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 6230 if (drm_connector->latency_present[0]) {
e7b07cee
HW
6231 audio_info->video_latency = drm_connector->video_latency[0];
6232 audio_info->audio_latency = drm_connector->audio_latency[0];
6233 }
6234
6235 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6236
6237}
6238
3ee6b26b
AD
6239static void
6240copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6241 struct drm_display_mode *dst_mode)
e7b07cee
HW
6242{
6243 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6244 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6245 dst_mode->crtc_clock = src_mode->crtc_clock;
6246 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6247 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 6248 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
6249 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6250 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6251 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6252 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6253 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6254 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6255 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6256 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6257}
6258
3ee6b26b
AD
6259static void
6260decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6261 const struct drm_display_mode *native_mode,
6262 bool scale_enabled)
e7b07cee
HW
6263{
6264 if (scale_enabled) {
6265 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6266 } else if (native_mode->clock == drm_mode->clock &&
6267 native_mode->htotal == drm_mode->htotal &&
6268 native_mode->vtotal == drm_mode->vtotal) {
6269 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6270 } else {
6271 /* no scaling nor amdgpu inserted, no need to patch */
6272 }
6273}
6274
aed15309
ML
6275static struct dc_sink *
6276create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 6277{
2e0ac3d6 6278 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 6279 struct dc_sink *sink = NULL;
2e0ac3d6
HW
6280 sink_init_data.link = aconnector->dc_link;
6281 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6282
6283 sink = dc_sink_create(&sink_init_data);
423788c7 6284 if (!sink) {
2e0ac3d6 6285 DRM_ERROR("Failed to create sink!\n");
aed15309 6286 return NULL;
423788c7 6287 }
2e0ac3d6 6288 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 6289
aed15309 6290 return sink;
2e0ac3d6
HW
6291}
6292
fa2123db
ML
6293static void set_multisync_trigger_params(
6294 struct dc_stream_state *stream)
6295{
ec372186
ML
6296 struct dc_stream_state *master = NULL;
6297
fa2123db 6298 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
6299 master = stream->triggered_crtc_reset.event_source;
6300 stream->triggered_crtc_reset.event =
6301 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6302 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6303 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
6304 }
6305}
6306
6307static void set_master_stream(struct dc_stream_state *stream_set[],
6308 int stream_count)
6309{
6310 int j, highest_rfr = 0, master_stream = 0;
6311
6312 for (j = 0; j < stream_count; j++) {
6313 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6314 int refresh_rate = 0;
6315
380604e2 6316 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6317 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6318 if (refresh_rate > highest_rfr) {
6319 highest_rfr = refresh_rate;
6320 master_stream = j;
6321 }
6322 }
6323 }
6324 for (j = 0; j < stream_count; j++) {
03736f4c 6325 if (stream_set[j])
fa2123db
ML
6326 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6327 }
6328}
6329
6330static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6331{
6332 int i = 0;
ec372186 6333 struct dc_stream_state *stream;
fa2123db
ML
6334
6335 if (context->stream_count < 2)
6336 return;
6337 for (i = 0; i < context->stream_count ; i++) {
6338 if (!context->streams[i])
6339 continue;
1f6010a9
DF
6340 /*
6341 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6342 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6343 * For now it's set to false
fa2123db 6344 */
fa2123db 6345 }
ec372186 6346
fa2123db 6347 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6348
6349 for (i = 0; i < context->stream_count ; i++) {
6350 stream = context->streams[i];
6351
6352 if (!stream)
6353 continue;
6354
6355 set_multisync_trigger_params(stream);
6356 }
fa2123db
ML
6357}
6358
ea2be5c0 6359#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6360static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6361 struct dc_sink *sink, struct dc_stream_state *stream,
6362 struct dsc_dec_dpcd_caps *dsc_caps)
6363{
6364 stream->timing.flags.DSC = 0;
63ad5371 6365 dsc_caps->is_dsc_supported = false;
998b7ad2 6366
2665f63a
ML
6367 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6368 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6369 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6370 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6371 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6372 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6373 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6374 dsc_caps);
998b7ad2
FZ
6375 }
6376}
6377
2665f63a
ML
6378static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6379 struct dc_sink *sink, struct dc_stream_state *stream,
6380 struct dsc_dec_dpcd_caps *dsc_caps,
6381 uint32_t max_dsc_target_bpp_limit_override)
6382{
6383 const struct dc_link_settings *verified_link_cap = NULL;
6384 uint32_t link_bw_in_kbps;
6385 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6386 struct dc *dc = sink->ctx->dc;
6387 struct dc_dsc_bw_range bw_range = {0};
6388 struct dc_dsc_config dsc_cfg = {0};
6389
6390 verified_link_cap = dc_link_get_link_cap(stream->link);
6391 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6392 edp_min_bpp_x16 = 8 * 16;
6393 edp_max_bpp_x16 = 8 * 16;
6394
6395 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6396 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6397
6398 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6399 edp_min_bpp_x16 = edp_max_bpp_x16;
6400
6401 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6402 dc->debug.dsc_min_slice_height_override,
6403 edp_min_bpp_x16, edp_max_bpp_x16,
6404 dsc_caps,
6405 &stream->timing,
6406 &bw_range)) {
6407
6408 if (bw_range.max_kbps < link_bw_in_kbps) {
6409 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6410 dsc_caps,
6411 dc->debug.dsc_min_slice_height_override,
6412 max_dsc_target_bpp_limit_override,
6413 0,
6414 &stream->timing,
6415 &dsc_cfg)) {
6416 stream->timing.dsc_cfg = dsc_cfg;
6417 stream->timing.flags.DSC = 1;
6418 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6419 }
6420 return;
6421 }
6422 }
6423
6424 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6425 dsc_caps,
6426 dc->debug.dsc_min_slice_height_override,
6427 max_dsc_target_bpp_limit_override,
6428 link_bw_in_kbps,
6429 &stream->timing,
6430 &dsc_cfg)) {
6431 stream->timing.dsc_cfg = dsc_cfg;
6432 stream->timing.flags.DSC = 1;
6433 }
6434}
6435
998b7ad2
FZ
6436static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6437 struct dc_sink *sink, struct dc_stream_state *stream,
6438 struct dsc_dec_dpcd_caps *dsc_caps)
6439{
6440 struct drm_connector *drm_connector = &aconnector->base;
6441 uint32_t link_bandwidth_kbps;
f1c1a982 6442 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6443 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6444 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6445 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6446
6447 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6448 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6449
6450 if (stream->link && stream->link->local_sink)
6451 max_dsc_target_bpp_limit_override =
6452 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
de7cc1b4 6453
998b7ad2
FZ
6454 /* Set DSC policy according to dsc_clock_en */
6455 dc_dsc_policy_set_enable_dsc_when_not_needed(
6456 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6457
2665f63a
ML
6458 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6459 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6460
6461 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6462
6463 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6464 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6465 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6466 dsc_caps,
6467 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6468 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6469 link_bandwidth_kbps,
6470 &stream->timing,
6471 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6472 stream->timing.flags.DSC = 1;
6473 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6474 __func__, drm_connector->name);
6475 }
6476 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6477 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6478 max_supported_bw_in_kbps = link_bandwidth_kbps;
6479 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6480
6481 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6482 max_supported_bw_in_kbps > 0 &&
6483 dsc_max_supported_bw_in_kbps > 0)
6484 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6485 dsc_caps,
6486 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6487 max_dsc_target_bpp_limit_override,
6488 dsc_max_supported_bw_in_kbps,
6489 &stream->timing,
6490 &stream->timing.dsc_cfg)) {
6491 stream->timing.flags.DSC = 1;
6492 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6493 __func__, drm_connector->name);
6494 }
998b7ad2
FZ
6495 }
6496 }
6497
6498 /* Overwrite the stream flag if DSC is enabled through debugfs */
6499 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6500 stream->timing.flags.DSC = 1;
6501
6502 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6503 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6504
6505 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6506 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6507
6508 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6509 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6510}
433e5dec 6511#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6512
5fd953a3
RS
6513/**
6514 * DOC: FreeSync Video
6515 *
6516 * When a userspace application wants to play a video, the content follows a
6517 * standard format definition that usually specifies the FPS for that format.
6518 * The below list illustrates some video format and the expected FPS,
6519 * respectively:
6520 *
6521 * - TV/NTSC (23.976 FPS)
6522 * - Cinema (24 FPS)
6523 * - TV/PAL (25 FPS)
6524 * - TV/NTSC (29.97 FPS)
6525 * - TV/NTSC (30 FPS)
6526 * - Cinema HFR (48 FPS)
6527 * - TV/PAL (50 FPS)
6528 * - Commonly used (60 FPS)
12cdff6b 6529 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6530 *
6531 * The list of standards video format is not huge and can be added to the
6532 * connector modeset list beforehand. With that, userspace can leverage
6533 * FreeSync to extends the front porch in order to attain the target refresh
6534 * rate. Such a switch will happen seamlessly, without screen blanking or
6535 * reprogramming of the output in any other way. If the userspace requests a
6536 * modesetting change compatible with FreeSync modes that only differ in the
6537 * refresh rate, DC will skip the full update and avoid blink during the
6538 * transition. For example, the video player can change the modesetting from
6539 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6540 * causing any display blink. This same concept can be applied to a mode
6541 * setting change.
6542 */
a85ba005
NC
6543static struct drm_display_mode *
6544get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6545 bool use_probed_modes)
6546{
6547 struct drm_display_mode *m, *m_pref = NULL;
6548 u16 current_refresh, highest_refresh;
6549 struct list_head *list_head = use_probed_modes ?
6550 &aconnector->base.probed_modes :
6551 &aconnector->base.modes;
6552
6553 if (aconnector->freesync_vid_base.clock != 0)
6554 return &aconnector->freesync_vid_base;
6555
6556 /* Find the preferred mode */
6557 list_for_each_entry (m, list_head, head) {
6558 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6559 m_pref = m;
6560 break;
6561 }
6562 }
6563
6564 if (!m_pref) {
6565 /* Probably an EDID with no preferred mode. Fallback to first entry */
6566 m_pref = list_first_entry_or_null(
6567 &aconnector->base.modes, struct drm_display_mode, head);
6568 if (!m_pref) {
6569 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6570 return NULL;
6571 }
6572 }
6573
6574 highest_refresh = drm_mode_vrefresh(m_pref);
6575
6576 /*
6577 * Find the mode with highest refresh rate with same resolution.
6578 * For some monitors, preferred mode is not the mode with highest
6579 * supported refresh rate.
6580 */
6581 list_for_each_entry (m, list_head, head) {
6582 current_refresh = drm_mode_vrefresh(m);
6583
6584 if (m->hdisplay == m_pref->hdisplay &&
6585 m->vdisplay == m_pref->vdisplay &&
6586 highest_refresh < current_refresh) {
6587 highest_refresh = current_refresh;
6588 m_pref = m;
6589 }
6590 }
6591
426c89aa 6592 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
a85ba005
NC
6593 return m_pref;
6594}
6595
fe8858bb 6596static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6597 struct amdgpu_dm_connector *aconnector)
6598{
6599 struct drm_display_mode *high_mode;
6600 int timing_diff;
6601
6602 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6603 if (!high_mode || !mode)
6604 return false;
6605
6606 timing_diff = high_mode->vtotal - mode->vtotal;
6607
6608 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6609 high_mode->hdisplay != mode->hdisplay ||
6610 high_mode->vdisplay != mode->vdisplay ||
6611 high_mode->hsync_start != mode->hsync_start ||
6612 high_mode->hsync_end != mode->hsync_end ||
6613 high_mode->htotal != mode->htotal ||
6614 high_mode->hskew != mode->hskew ||
6615 high_mode->vscan != mode->vscan ||
6616 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6617 high_mode->vsync_end - mode->vsync_end != timing_diff)
6618 return false;
6619 else
6620 return true;
6621}
6622
f11d9373 6623static struct dc_stream_state *
3ee6b26b
AD
6624create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6625 const struct drm_display_mode *drm_mode,
b333730d 6626 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6627 const struct dc_stream_state *old_stream,
6628 int requested_bpc)
e7b07cee
HW
6629{
6630 struct drm_display_mode *preferred_mode = NULL;
391ef035 6631 struct drm_connector *drm_connector;
42ba01fc
NK
6632 const struct drm_connector_state *con_state =
6633 dm_state ? &dm_state->base : NULL;
0971c40e 6634 struct dc_stream_state *stream = NULL;
e7b07cee 6635 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6636 struct drm_display_mode saved_mode;
6637 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6638 bool native_mode_found = false;
b0781603
NK
6639 bool recalculate_timing = false;
6640 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6641 int mode_refresh;
58124bf8 6642 int preferred_refresh = 0;
defeb878 6643#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6644 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6645#endif
aed15309 6646 struct dc_sink *sink = NULL;
a85ba005
NC
6647
6648 memset(&saved_mode, 0, sizeof(saved_mode));
6649
b830ebc9 6650 if (aconnector == NULL) {
e7b07cee 6651 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6652 return stream;
e7b07cee
HW
6653 }
6654
e7b07cee 6655 drm_connector = &aconnector->base;
2e0ac3d6 6656
f4ac176e 6657 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6658 sink = create_fake_sink(aconnector);
6659 if (!sink)
6660 return stream;
aed15309
ML
6661 } else {
6662 sink = aconnector->dc_sink;
dcd5fb82 6663 dc_sink_retain(sink);
f4ac176e 6664 }
2e0ac3d6 6665
aed15309 6666 stream = dc_create_stream_for_sink(sink);
4562236b 6667
b830ebc9 6668 if (stream == NULL) {
e7b07cee 6669 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6670 goto finish;
e7b07cee
HW
6671 }
6672
ceb3dbb4
JL
6673 stream->dm_stream_context = aconnector;
6674
4a36fcba
WL
6675 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6676 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6677
e7b07cee
HW
6678 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6679 /* Search for preferred mode */
6680 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6681 native_mode_found = true;
6682 break;
6683 }
6684 }
6685 if (!native_mode_found)
6686 preferred_mode = list_first_entry_or_null(
6687 &aconnector->base.modes,
6688 struct drm_display_mode,
6689 head);
6690
b333730d
BL
6691 mode_refresh = drm_mode_vrefresh(&mode);
6692
b830ebc9 6693 if (preferred_mode == NULL) {
1f6010a9
DF
6694 /*
6695 * This may not be an error, the use case is when we have no
e7b07cee
HW
6696 * usermode calls to reset and set mode upon hotplug. In this
6697 * case, we call set mode ourselves to restore the previous mode
6698 * and the modelist may not be filled in in time.
6699 */
f1ad2f5e 6700 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6701 } else {
de05abe6 6702 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
a85ba005
NC
6703 if (recalculate_timing) {
6704 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
426c89aa
VS
6705 drm_mode_copy(&saved_mode, &mode);
6706 drm_mode_copy(&mode, freesync_mode);
a85ba005
NC
6707 } else {
6708 decide_crtc_timing_for_drm_display_mode(
b0781603 6709 &mode, preferred_mode, scale);
a85ba005 6710
b0781603
NK
6711 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6712 }
e7b07cee
HW
6713 }
6714
a85ba005
NC
6715 if (recalculate_timing)
6716 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6717 else if (!dm_state)
f783577c
JFZ
6718 drm_mode_set_crtcinfo(&mode, 0);
6719
a85ba005 6720 /*
b333730d
BL
6721 * If scaling is enabled and refresh rate didn't change
6722 * we copy the vic and polarities of the old timings
6723 */
b0781603 6724 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6725 fill_stream_properties_from_drm_display_mode(
6726 stream, &mode, &aconnector->base, con_state, NULL,
6727 requested_bpc);
b333730d 6728 else
a85ba005
NC
6729 fill_stream_properties_from_drm_display_mode(
6730 stream, &mode, &aconnector->base, con_state, old_stream,
6731 requested_bpc);
b333730d 6732
defeb878 6733#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6734 /* SST DSC determination policy */
6735 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6736 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6737 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6738#endif
6739
e7b07cee
HW
6740 update_stream_scaling_settings(&mode, dm_state, stream);
6741
6742 fill_audio_info(
6743 &stream->audio_info,
6744 drm_connector,
aed15309 6745 sink);
e7b07cee 6746
ceb3dbb4 6747 update_stream_signal(stream, sink);
9182b4cb 6748
d832fc3b 6749 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6750 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6751
8a488f5d
RL
6752 if (stream->link->psr_settings.psr_feature_enabled) {
6753 //
6754 // should decide stream support vsc sdp colorimetry capability
6755 // before building vsc info packet
6756 //
6757 stream->use_vsc_sdp_for_colorimetry = false;
6758 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6759 stream->use_vsc_sdp_for_colorimetry =
6760 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6761 } else {
6762 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6763 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6764 }
0c5a0bbb 6765 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
1a365683
RL
6766 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6767
8c322309 6768 }
aed15309 6769finish:
dcd5fb82 6770 dc_sink_release(sink);
9e3efe3e 6771
e7b07cee
HW
6772 return stream;
6773}
6774
7578ecda 6775static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6776{
6777 drm_crtc_cleanup(crtc);
6778 kfree(crtc);
6779}
6780
6781static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6782 struct drm_crtc_state *state)
e7b07cee
HW
6783{
6784 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6785
6786 /* TODO Destroy dc_stream objects are stream object is flattened */
6787 if (cur->stream)
6788 dc_stream_release(cur->stream);
6789
6790
6791 __drm_atomic_helper_crtc_destroy_state(state);
6792
6793
6794 kfree(state);
6795}
6796
6797static void dm_crtc_reset_state(struct drm_crtc *crtc)
6798{
6799 struct dm_crtc_state *state;
6800
6801 if (crtc->state)
6802 dm_crtc_destroy_state(crtc, crtc->state);
6803
6804 state = kzalloc(sizeof(*state), GFP_KERNEL);
6805 if (WARN_ON(!state))
6806 return;
6807
1f8a52ec 6808 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6809}
6810
6811static struct drm_crtc_state *
6812dm_crtc_duplicate_state(struct drm_crtc *crtc)
6813{
6814 struct dm_crtc_state *state, *cur;
6815
6816 cur = to_dm_crtc_state(crtc->state);
6817
6818 if (WARN_ON(!crtc->state))
6819 return NULL;
6820
2004f45e 6821 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6822 if (!state)
6823 return NULL;
e7b07cee
HW
6824
6825 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6826
6827 if (cur->stream) {
6828 state->stream = cur->stream;
6829 dc_stream_retain(state->stream);
6830 }
6831
d6ef9b41 6832 state->active_planes = cur->active_planes;
98e6436d 6833 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6834 state->abm_level = cur->abm_level;
bb47de73
NK
6835 state->vrr_supported = cur->vrr_supported;
6836 state->freesync_config = cur->freesync_config;
cf020d49
NK
6837 state->cm_has_degamma = cur->cm_has_degamma;
6838 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
7cc191ee 6839 state->mpo_requested = cur->mpo_requested;
e7b07cee
HW
6840 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6841
6842 return &state->base;
6843}
6844
86bc2219 6845#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6846static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6847{
6848 crtc_debugfs_init(crtc);
6849
6850 return 0;
6851}
6852#endif
6853
d2574c33
MK
6854static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6855{
6856 enum dc_irq_source irq_source;
6857 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6858 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6859 int rc;
6860
6861 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6862
6863 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6864
4711c033
LT
6865 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6866 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6867 return rc;
6868}
589d2739
HW
6869
6870static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6871{
6872 enum dc_irq_source irq_source;
6873 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6874 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6875 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
71338cb4 6876 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6877 struct vblank_control_work *work;
d2574c33
MK
6878 int rc = 0;
6879
6880 if (enable) {
6881 /* vblank irq on -> Only need vupdate irq in vrr mode */
6882 if (amdgpu_dm_vrr_active(acrtc_state))
6883 rc = dm_set_vupdate_irq(crtc, true);
6884 } else {
6885 /* vblank irq off -> vupdate irq off */
6886 rc = dm_set_vupdate_irq(crtc, false);
6887 }
6888
6889 if (rc)
6890 return rc;
589d2739
HW
6891
6892 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6893
6894 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6895 return -EBUSY;
6896
98ab5f35
BL
6897 if (amdgpu_in_reset(adev))
6898 return 0;
6899
06dd1888
NK
6900 if (dm->vblank_control_workqueue) {
6901 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6902 if (!work)
6903 return -ENOMEM;
09a5df6c 6904
06dd1888
NK
6905 INIT_WORK(&work->work, vblank_control_worker);
6906 work->dm = dm;
6907 work->acrtc = acrtc;
6908 work->enable = enable;
09a5df6c 6909
06dd1888
NK
6910 if (acrtc_state->stream) {
6911 dc_stream_retain(acrtc_state->stream);
6912 work->stream = acrtc_state->stream;
6913 }
58aa1c50 6914
06dd1888
NK
6915 queue_work(dm->vblank_control_workqueue, &work->work);
6916 }
71338cb4 6917
71338cb4 6918 return 0;
589d2739
HW
6919}
6920
6921static int dm_enable_vblank(struct drm_crtc *crtc)
6922{
6923 return dm_set_vblank(crtc, true);
6924}
6925
6926static void dm_disable_vblank(struct drm_crtc *crtc)
6927{
6928 dm_set_vblank(crtc, false);
6929}
6930
faf26f2b 6931/* Implemented only the options currently available for the driver */
e7b07cee
HW
6932static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6933 .reset = dm_crtc_reset_state,
6934 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6935 .set_config = drm_atomic_helper_set_config,
6936 .page_flip = drm_atomic_helper_page_flip,
6937 .atomic_duplicate_state = dm_crtc_duplicate_state,
6938 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6939 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6940 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6941 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6942 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6943 .enable_vblank = dm_enable_vblank,
6944 .disable_vblank = dm_disable_vblank,
e3eff4b5 6945 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6946#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6947 .late_register = amdgpu_dm_crtc_late_register,
6948#endif
e7b07cee
HW
6949};
6950
6951static enum drm_connector_status
6952amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6953{
6954 bool connected;
c84dec2f 6955 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6956
1f6010a9
DF
6957 /*
6958 * Notes:
e7b07cee
HW
6959 * 1. This interface is NOT called in context of HPD irq.
6960 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6961 * makes it a bad place for *any* MST-related activity.
6962 */
e7b07cee 6963
8580d60b
HW
6964 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6965 !aconnector->fake_enable)
e7b07cee
HW
6966 connected = (aconnector->dc_sink != NULL);
6967 else
6968 connected = (aconnector->base.force == DRM_FORCE_ON);
6969
0f877894
OV
6970 update_subconnector_property(aconnector);
6971
e7b07cee
HW
6972 return (connected ? connector_status_connected :
6973 connector_status_disconnected);
6974}
6975
3ee6b26b
AD
6976int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6977 struct drm_connector_state *connector_state,
6978 struct drm_property *property,
6979 uint64_t val)
e7b07cee
HW
6980{
6981 struct drm_device *dev = connector->dev;
1348969a 6982 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6983 struct dm_connector_state *dm_old_state =
6984 to_dm_connector_state(connector->state);
6985 struct dm_connector_state *dm_new_state =
6986 to_dm_connector_state(connector_state);
6987
6988 int ret = -EINVAL;
6989
6990 if (property == dev->mode_config.scaling_mode_property) {
6991 enum amdgpu_rmx_type rmx_type;
6992
6993 switch (val) {
6994 case DRM_MODE_SCALE_CENTER:
6995 rmx_type = RMX_CENTER;
6996 break;
6997 case DRM_MODE_SCALE_ASPECT:
6998 rmx_type = RMX_ASPECT;
6999 break;
7000 case DRM_MODE_SCALE_FULLSCREEN:
7001 rmx_type = RMX_FULL;
7002 break;
7003 case DRM_MODE_SCALE_NONE:
7004 default:
7005 rmx_type = RMX_OFF;
7006 break;
7007 }
7008
7009 if (dm_old_state->scaling == rmx_type)
7010 return 0;
7011
7012 dm_new_state->scaling = rmx_type;
7013 ret = 0;
7014 } else if (property == adev->mode_info.underscan_hborder_property) {
7015 dm_new_state->underscan_hborder = val;
7016 ret = 0;
7017 } else if (property == adev->mode_info.underscan_vborder_property) {
7018 dm_new_state->underscan_vborder = val;
7019 ret = 0;
7020 } else if (property == adev->mode_info.underscan_property) {
7021 dm_new_state->underscan_enable = val;
7022 ret = 0;
c1ee92f9
DF
7023 } else if (property == adev->mode_info.abm_level_property) {
7024 dm_new_state->abm_level = val;
7025 ret = 0;
e7b07cee
HW
7026 }
7027
7028 return ret;
7029}
7030
3ee6b26b
AD
7031int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7032 const struct drm_connector_state *state,
7033 struct drm_property *property,
7034 uint64_t *val)
e7b07cee
HW
7035{
7036 struct drm_device *dev = connector->dev;
1348969a 7037 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7038 struct dm_connector_state *dm_state =
7039 to_dm_connector_state(state);
7040 int ret = -EINVAL;
7041
7042 if (property == dev->mode_config.scaling_mode_property) {
7043 switch (dm_state->scaling) {
7044 case RMX_CENTER:
7045 *val = DRM_MODE_SCALE_CENTER;
7046 break;
7047 case RMX_ASPECT:
7048 *val = DRM_MODE_SCALE_ASPECT;
7049 break;
7050 case RMX_FULL:
7051 *val = DRM_MODE_SCALE_FULLSCREEN;
7052 break;
7053 case RMX_OFF:
7054 default:
7055 *val = DRM_MODE_SCALE_NONE;
7056 break;
7057 }
7058 ret = 0;
7059 } else if (property == adev->mode_info.underscan_hborder_property) {
7060 *val = dm_state->underscan_hborder;
7061 ret = 0;
7062 } else if (property == adev->mode_info.underscan_vborder_property) {
7063 *val = dm_state->underscan_vborder;
7064 ret = 0;
7065 } else if (property == adev->mode_info.underscan_property) {
7066 *val = dm_state->underscan_enable;
7067 ret = 0;
c1ee92f9
DF
7068 } else if (property == adev->mode_info.abm_level_property) {
7069 *val = dm_state->abm_level;
7070 ret = 0;
e7b07cee 7071 }
c1ee92f9 7072
e7b07cee
HW
7073 return ret;
7074}
7075
526c654a
ED
7076static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7077{
7078 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7079
7080 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7081}
7082
7578ecda 7083static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 7084{
c84dec2f 7085 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 7086 const struct dc_link *link = aconnector->dc_link;
1348969a 7087 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 7088 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 7089 int i;
ada8ce15 7090
5dff80bd
AG
7091 /*
7092 * Call only if mst_mgr was iniitalized before since it's not done
7093 * for all connector types.
7094 */
7095 if (aconnector->mst_mgr.dev)
7096 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7097
7fd13bae
AD
7098 for (i = 0; i < dm->num_of_edps; i++) {
7099 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7100 backlight_device_unregister(dm->backlight_dev[i]);
7101 dm->backlight_dev[i] = NULL;
7102 }
e7b07cee 7103 }
dcd5fb82
MF
7104
7105 if (aconnector->dc_em_sink)
7106 dc_sink_release(aconnector->dc_em_sink);
7107 aconnector->dc_em_sink = NULL;
7108 if (aconnector->dc_sink)
7109 dc_sink_release(aconnector->dc_sink);
7110 aconnector->dc_sink = NULL;
7111
e86e8947 7112 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
7113 drm_connector_unregister(connector);
7114 drm_connector_cleanup(connector);
526c654a
ED
7115 if (aconnector->i2c) {
7116 i2c_del_adapter(&aconnector->i2c->base);
7117 kfree(aconnector->i2c);
7118 }
7daec99f 7119 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 7120
e7b07cee
HW
7121 kfree(connector);
7122}
7123
7124void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7125{
7126 struct dm_connector_state *state =
7127 to_dm_connector_state(connector->state);
7128
df099b9b
LSL
7129 if (connector->state)
7130 __drm_atomic_helper_connector_destroy_state(connector->state);
7131
e7b07cee
HW
7132 kfree(state);
7133
7134 state = kzalloc(sizeof(*state), GFP_KERNEL);
7135
7136 if (state) {
7137 state->scaling = RMX_OFF;
7138 state->underscan_enable = false;
7139 state->underscan_hborder = 0;
7140 state->underscan_vborder = 0;
01933ba4 7141 state->base.max_requested_bpc = 8;
3261e013
ML
7142 state->vcpi_slots = 0;
7143 state->pbn = 0;
c3e50f89
NK
7144 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7145 state->abm_level = amdgpu_dm_abm_level;
7146
df099b9b 7147 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
7148 }
7149}
7150
3ee6b26b
AD
7151struct drm_connector_state *
7152amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
7153{
7154 struct dm_connector_state *state =
7155 to_dm_connector_state(connector->state);
7156
7157 struct dm_connector_state *new_state =
7158 kmemdup(state, sizeof(*state), GFP_KERNEL);
7159
98e6436d
AK
7160 if (!new_state)
7161 return NULL;
e7b07cee 7162
98e6436d
AK
7163 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7164
7165 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 7166 new_state->abm_level = state->abm_level;
922454c2
NK
7167 new_state->scaling = state->scaling;
7168 new_state->underscan_enable = state->underscan_enable;
7169 new_state->underscan_hborder = state->underscan_hborder;
7170 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
7171 new_state->vcpi_slots = state->vcpi_slots;
7172 new_state->pbn = state->pbn;
98e6436d 7173 return &new_state->base;
e7b07cee
HW
7174}
7175
14f04fa4
AD
7176static int
7177amdgpu_dm_connector_late_register(struct drm_connector *connector)
7178{
7179 struct amdgpu_dm_connector *amdgpu_dm_connector =
7180 to_amdgpu_dm_connector(connector);
00a8037e 7181 int r;
14f04fa4 7182
00a8037e
AD
7183 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7184 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7185 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7186 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7187 if (r)
7188 return r;
7189 }
7190
7191#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
7192 connector_debugfs_init(amdgpu_dm_connector);
7193#endif
7194
7195 return 0;
7196}
7197
e7b07cee
HW
7198static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7199 .reset = amdgpu_dm_connector_funcs_reset,
7200 .detect = amdgpu_dm_connector_detect,
7201 .fill_modes = drm_helper_probe_single_connector_modes,
7202 .destroy = amdgpu_dm_connector_destroy,
7203 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7204 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7205 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 7206 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 7207 .late_register = amdgpu_dm_connector_late_register,
526c654a 7208 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
7209};
7210
e7b07cee
HW
7211static int get_modes(struct drm_connector *connector)
7212{
7213 return amdgpu_dm_connector_get_modes(connector);
7214}
7215
c84dec2f 7216static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
7217{
7218 struct dc_sink_init_data init_params = {
7219 .link = aconnector->dc_link,
7220 .sink_signal = SIGNAL_TYPE_VIRTUAL
7221 };
70e8ffc5 7222 struct edid *edid;
e7b07cee 7223
a89ff457 7224 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
7225 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7226 aconnector->base.name);
7227
7228 aconnector->base.force = DRM_FORCE_OFF;
7229 aconnector->base.override_edid = false;
7230 return;
7231 }
7232
70e8ffc5
HW
7233 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7234
e7b07cee
HW
7235 aconnector->edid = edid;
7236
7237 aconnector->dc_em_sink = dc_link_add_remote_sink(
7238 aconnector->dc_link,
7239 (uint8_t *)edid,
7240 (edid->extensions + 1) * EDID_LENGTH,
7241 &init_params);
7242
dcd5fb82 7243 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
7244 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7245 aconnector->dc_link->local_sink :
7246 aconnector->dc_em_sink;
dcd5fb82
MF
7247 dc_sink_retain(aconnector->dc_sink);
7248 }
e7b07cee
HW
7249}
7250
c84dec2f 7251static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
7252{
7253 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7254
1f6010a9
DF
7255 /*
7256 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
7257 * Those settings have to be != 0 to get initial modeset
7258 */
7259 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7260 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7261 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7262 }
7263
7264
7265 aconnector->base.override_edid = true;
7266 create_eml_sink(aconnector);
7267}
7268
17ce8a69 7269struct dc_stream_state *
cbd14ae7
SW
7270create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7271 const struct drm_display_mode *drm_mode,
7272 const struct dm_connector_state *dm_state,
7273 const struct dc_stream_state *old_stream)
7274{
7275 struct drm_connector *connector = &aconnector->base;
1348969a 7276 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 7277 struct dc_stream_state *stream;
4b7da34b
SW
7278 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7279 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
7280 enum dc_status dc_result = DC_OK;
7281
7282 do {
7283 stream = create_stream_for_sink(aconnector, drm_mode,
7284 dm_state, old_stream,
7285 requested_bpc);
7286 if (stream == NULL) {
7287 DRM_ERROR("Failed to create stream for sink!\n");
7288 break;
7289 }
7290
e9a7d236
RS
7291 dc_result = dc_validate_stream(adev->dm.dc, stream);
7292 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
f04d275d 7293 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7294
cbd14ae7 7295 if (dc_result != DC_OK) {
74a16675 7296 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
7297 drm_mode->hdisplay,
7298 drm_mode->vdisplay,
7299 drm_mode->clock,
74a16675
RS
7300 dc_result,
7301 dc_status_to_str(dc_result));
cbd14ae7
SW
7302
7303 dc_stream_release(stream);
7304 stream = NULL;
7305 requested_bpc -= 2; /* lower bpc to retry validation */
7306 }
7307
7308 } while (stream == NULL && requested_bpc >= 6);
7309
68eb3ae3
WS
7310 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7311 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7312
7313 aconnector->force_yuv420_output = true;
7314 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7315 dm_state, old_stream);
7316 aconnector->force_yuv420_output = false;
7317 }
7318
cbd14ae7
SW
7319 return stream;
7320}
7321
ba9ca088 7322enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7323 struct drm_display_mode *mode)
e7b07cee
HW
7324{
7325 int result = MODE_ERROR;
7326 struct dc_sink *dc_sink;
e7b07cee 7327 /* TODO: Unhardcode stream count */
0971c40e 7328 struct dc_stream_state *stream;
c84dec2f 7329 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7330
7331 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7332 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7333 return result;
7334
1f6010a9
DF
7335 /*
7336 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7337 * EDID mgmt
7338 */
7339 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7340 !aconnector->dc_em_sink)
7341 handle_edid_mgmt(aconnector);
7342
c84dec2f 7343 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7344
ad975f44
VL
7345 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7346 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7347 DRM_ERROR("dc_sink is NULL!\n");
7348 goto fail;
7349 }
7350
cbd14ae7
SW
7351 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7352 if (stream) {
7353 dc_stream_release(stream);
e7b07cee 7354 result = MODE_OK;
cbd14ae7 7355 }
e7b07cee
HW
7356
7357fail:
7358 /* TODO: error handling*/
7359 return result;
7360}
7361
88694af9
NK
7362static int fill_hdr_info_packet(const struct drm_connector_state *state,
7363 struct dc_info_packet *out)
7364{
7365 struct hdmi_drm_infoframe frame;
7366 unsigned char buf[30]; /* 26 + 4 */
7367 ssize_t len;
7368 int ret, i;
7369
7370 memset(out, 0, sizeof(*out));
7371
7372 if (!state->hdr_output_metadata)
7373 return 0;
7374
7375 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7376 if (ret)
7377 return ret;
7378
7379 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7380 if (len < 0)
7381 return (int)len;
7382
7383 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7384 if (len != 30)
7385 return -EINVAL;
7386
7387 /* Prepare the infopacket for DC. */
7388 switch (state->connector->connector_type) {
7389 case DRM_MODE_CONNECTOR_HDMIA:
7390 out->hb0 = 0x87; /* type */
7391 out->hb1 = 0x01; /* version */
7392 out->hb2 = 0x1A; /* length */
7393 out->sb[0] = buf[3]; /* checksum */
7394 i = 1;
7395 break;
7396
7397 case DRM_MODE_CONNECTOR_DisplayPort:
7398 case DRM_MODE_CONNECTOR_eDP:
7399 out->hb0 = 0x00; /* sdp id, zero */
7400 out->hb1 = 0x87; /* type */
7401 out->hb2 = 0x1D; /* payload len - 1 */
7402 out->hb3 = (0x13 << 2); /* sdp version */
7403 out->sb[0] = 0x01; /* version */
7404 out->sb[1] = 0x1A; /* length */
7405 i = 2;
7406 break;
7407
7408 default:
7409 return -EINVAL;
7410 }
7411
7412 memcpy(&out->sb[i], &buf[4], 26);
7413 out->valid = true;
7414
7415 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7416 sizeof(out->sb), false);
7417
7418 return 0;
7419}
7420
88694af9
NK
7421static int
7422amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7423 struct drm_atomic_state *state)
88694af9 7424{
51e857af
SP
7425 struct drm_connector_state *new_con_state =
7426 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7427 struct drm_connector_state *old_con_state =
7428 drm_atomic_get_old_connector_state(state, conn);
7429 struct drm_crtc *crtc = new_con_state->crtc;
7430 struct drm_crtc_state *new_crtc_state;
7431 int ret;
7432
e8a98235
RS
7433 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7434
88694af9
NK
7435 if (!crtc)
7436 return 0;
7437
72921cdf 7438 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7439 struct dc_info_packet hdr_infopacket;
7440
7441 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7442 if (ret)
7443 return ret;
7444
7445 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7446 if (IS_ERR(new_crtc_state))
7447 return PTR_ERR(new_crtc_state);
7448
7449 /*
7450 * DC considers the stream backends changed if the
7451 * static metadata changes. Forcing the modeset also
7452 * gives a simple way for userspace to switch from
b232d4ed
NK
7453 * 8bpc to 10bpc when setting the metadata to enter
7454 * or exit HDR.
7455 *
7456 * Changing the static metadata after it's been
7457 * set is permissible, however. So only force a
7458 * modeset if we're entering or exiting HDR.
88694af9 7459 */
b232d4ed
NK
7460 new_crtc_state->mode_changed =
7461 !old_con_state->hdr_output_metadata ||
7462 !new_con_state->hdr_output_metadata;
88694af9
NK
7463 }
7464
7465 return 0;
7466}
7467
e7b07cee
HW
7468static const struct drm_connector_helper_funcs
7469amdgpu_dm_connector_helper_funcs = {
7470 /*
1f6010a9 7471 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7472 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7473 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7474 * in get_modes call back, not just return the modes count
7475 */
e7b07cee
HW
7476 .get_modes = get_modes,
7477 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7478 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7479};
7480
7481static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7482{
7483}
7484
d6ef9b41 7485static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7486{
7487 struct drm_atomic_state *state = new_crtc_state->state;
7488 struct drm_plane *plane;
7489 int num_active = 0;
7490
7491 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7492 struct drm_plane_state *new_plane_state;
7493
7494 /* Cursor planes are "fake". */
7495 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7496 continue;
7497
7498 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7499
7500 if (!new_plane_state) {
7501 /*
7502 * The plane is enable on the CRTC and hasn't changed
7503 * state. This means that it previously passed
7504 * validation and is therefore enabled.
7505 */
7506 num_active += 1;
7507 continue;
7508 }
7509
7510 /* We need a framebuffer to be considered enabled. */
7511 num_active += (new_plane_state->fb != NULL);
7512 }
7513
d6ef9b41
NK
7514 return num_active;
7515}
7516
8fe684e9
NK
7517static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7518 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7519{
7520 struct dm_crtc_state *dm_new_crtc_state =
7521 to_dm_crtc_state(new_crtc_state);
7522
7523 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7524
7525 if (!dm_new_crtc_state->stream)
7526 return;
7527
7528 dm_new_crtc_state->active_planes =
7529 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7530}
7531
3ee6b26b 7532static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7533 struct drm_atomic_state *state)
e7b07cee 7534{
29b77ad7
MR
7535 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7536 crtc);
1348969a 7537 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7538 struct dc *dc = adev->dm.dc;
29b77ad7 7539 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7540 int ret = -EINVAL;
7541
5b8c5969 7542 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7543
29b77ad7 7544 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7545
bcd74374
ND
7546 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7547 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7548 return ret;
7549 }
7550
bc92c065 7551 /*
b836a274
MD
7552 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7553 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7554 * planes are disabled, which is not supported by the hardware. And there is legacy
7555 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7556 */
29b77ad7 7557 if (crtc_state->enable &&
ea9522f5
SS
7558 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7559 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7560 return -EINVAL;
ea9522f5 7561 }
c14a005c 7562
b836a274
MD
7563 /* In some use cases, like reset, no stream is attached */
7564 if (!dm_crtc_state->stream)
7565 return 0;
7566
62c933f9 7567 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7568 return 0;
7569
ea9522f5 7570 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7571 return ret;
7572}
7573
3ee6b26b
AD
7574static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7575 const struct drm_display_mode *mode,
7576 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7577{
7578 return true;
7579}
7580
7581static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7582 .disable = dm_crtc_helper_disable,
7583 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7584 .mode_fixup = dm_crtc_helper_mode_fixup,
7585 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7586};
7587
7588static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7589{
7590
7591}
7592
f04d275d 7593int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
3261e013
ML
7594{
7595 switch (display_color_depth) {
7596 case COLOR_DEPTH_666:
7597 return 6;
7598 case COLOR_DEPTH_888:
7599 return 8;
7600 case COLOR_DEPTH_101010:
7601 return 10;
7602 case COLOR_DEPTH_121212:
7603 return 12;
7604 case COLOR_DEPTH_141414:
7605 return 14;
7606 case COLOR_DEPTH_161616:
7607 return 16;
7608 default:
7609 break;
7610 }
7611 return 0;
7612}
7613
3ee6b26b
AD
7614static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7615 struct drm_crtc_state *crtc_state,
7616 struct drm_connector_state *conn_state)
e7b07cee 7617{
3261e013
ML
7618 struct drm_atomic_state *state = crtc_state->state;
7619 struct drm_connector *connector = conn_state->connector;
7620 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7621 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7622 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7623 struct drm_dp_mst_topology_mgr *mst_mgr;
7624 struct drm_dp_mst_port *mst_port;
7625 enum dc_color_depth color_depth;
7626 int clock, bpp = 0;
1bc22f20 7627 bool is_y420 = false;
3261e013
ML
7628
7629 if (!aconnector->port || !aconnector->dc_sink)
7630 return 0;
7631
7632 mst_port = aconnector->port;
7633 mst_mgr = &aconnector->mst_port->mst_mgr;
7634
7635 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7636 return 0;
7637
7638 if (!state->duplicated) {
cbd14ae7 7639 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7640 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7641 aconnector->force_yuv420_output;
cbd14ae7
SW
7642 color_depth = convert_color_depth_from_display_info(connector,
7643 is_y420,
7644 max_bpc);
3261e013
ML
7645 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7646 clock = adjusted_mode->clock;
dc48529f 7647 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7648 }
7649 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7650 mst_mgr,
7651 mst_port,
1c6c1cb5 7652 dm_new_connector_state->pbn,
03ca9600 7653 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7654 if (dm_new_connector_state->vcpi_slots < 0) {
7655 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7656 return dm_new_connector_state->vcpi_slots;
7657 }
e7b07cee
HW
7658 return 0;
7659}
7660
7661const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7662 .disable = dm_encoder_helper_disable,
7663 .atomic_check = dm_encoder_helper_atomic_check
7664};
7665
d9fe1a4c 7666#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7667static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7668 struct dc_state *dc_state,
7669 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7670{
7671 struct dc_stream_state *stream = NULL;
7672 struct drm_connector *connector;
5760dcb9 7673 struct drm_connector_state *new_con_state;
29b9ba74
ML
7674 struct amdgpu_dm_connector *aconnector;
7675 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7676 int i, j;
7677 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7678
5760dcb9 7679 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7680
7681 aconnector = to_amdgpu_dm_connector(connector);
7682
7683 if (!aconnector->port)
7684 continue;
7685
7686 if (!new_con_state || !new_con_state->crtc)
7687 continue;
7688
7689 dm_conn_state = to_dm_connector_state(new_con_state);
7690
7691 for (j = 0; j < dc_state->stream_count; j++) {
7692 stream = dc_state->streams[j];
7693 if (!stream)
7694 continue;
7695
7696 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7697 break;
7698
7699 stream = NULL;
7700 }
7701
7702 if (!stream)
7703 continue;
7704
29b9ba74 7705 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7706 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7707 for (j = 0; j < dc_state->stream_count; j++) {
7708 if (vars[j].aconnector == aconnector) {
7709 pbn = vars[j].pbn;
7710 break;
7711 }
7712 }
7713
a550bb16
HW
7714 if (j == dc_state->stream_count)
7715 continue;
7716
7717 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7718
7719 if (stream->timing.flags.DSC != 1) {
7720 dm_conn_state->pbn = pbn;
7721 dm_conn_state->vcpi_slots = slot_num;
7722
7723 drm_dp_mst_atomic_enable_dsc(state,
7724 aconnector->port,
7725 dm_conn_state->pbn,
7726 0,
7727 false);
7728 continue;
7729 }
7730
29b9ba74
ML
7731 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7732 aconnector->port,
7733 pbn, pbn_div,
7734 true);
7735 if (vcpi < 0)
7736 return vcpi;
7737
7738 dm_conn_state->pbn = pbn;
7739 dm_conn_state->vcpi_slots = vcpi;
7740 }
7741 return 0;
7742}
d9fe1a4c 7743#endif
29b9ba74 7744
e7b07cee
HW
7745static void dm_drm_plane_reset(struct drm_plane *plane)
7746{
7747 struct dm_plane_state *amdgpu_state = NULL;
7748
7749 if (plane->state)
7750 plane->funcs->atomic_destroy_state(plane, plane->state);
7751
7752 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7753 WARN_ON(amdgpu_state == NULL);
1f6010a9 7754
7ddaef96
NK
7755 if (amdgpu_state)
7756 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7757}
7758
7759static struct drm_plane_state *
7760dm_drm_plane_duplicate_state(struct drm_plane *plane)
7761{
7762 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7763
7764 old_dm_plane_state = to_dm_plane_state(plane->state);
7765 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7766 if (!dm_plane_state)
7767 return NULL;
7768
7769 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7770
3be5262e
HW
7771 if (old_dm_plane_state->dc_state) {
7772 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7773 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7774 }
7775
7776 return &dm_plane_state->base;
7777}
7778
dfd84d90 7779static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7780 struct drm_plane_state *state)
e7b07cee
HW
7781{
7782 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7783
3be5262e
HW
7784 if (dm_plane_state->dc_state)
7785 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7786
0627bbd3 7787 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7788}
7789
7790static const struct drm_plane_funcs dm_plane_funcs = {
7791 .update_plane = drm_atomic_helper_update_plane,
7792 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7793 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7794 .reset = dm_drm_plane_reset,
7795 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7796 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7797 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7798};
7799
3ee6b26b
AD
7800static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7801 struct drm_plane_state *new_state)
e7b07cee
HW
7802{
7803 struct amdgpu_framebuffer *afb;
7804 struct drm_gem_object *obj;
5d43be0c 7805 struct amdgpu_device *adev;
e7b07cee 7806 struct amdgpu_bo *rbo;
e7b07cee 7807 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5d43be0c
CK
7808 uint32_t domain;
7809 int r;
e7b07cee
HW
7810
7811 if (!new_state->fb) {
4711c033 7812 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7813 return 0;
7814 }
7815
7816 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7817 obj = new_state->fb->obj[0];
e7b07cee 7818 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7819 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09 7820
f06e2167 7821 r = amdgpu_bo_reserve(rbo, true);
0f257b09
CZ
7822 if (r) {
7823 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7824 return r;
0f257b09 7825 }
e7b07cee 7826
f06e2167
CK
7827 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7828 if (r) {
7829 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7830 goto error_unlock;
7831 }
7832
5d43be0c 7833 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7834 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7835 else
7836 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7837
7b7c6c81 7838 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7839 if (unlikely(r != 0)) {
30b7c614
HW
7840 if (r != -ERESTARTSYS)
7841 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
f06e2167 7842 goto error_unlock;
e7b07cee
HW
7843 }
7844
bb812f1e
JZ
7845 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7846 if (unlikely(r != 0)) {
bb812f1e 7847 DRM_ERROR("%p bind failed\n", rbo);
f06e2167 7848 goto error_unpin;
e7b07cee 7849 }
7df7e505 7850
f06e2167 7851 amdgpu_bo_unreserve(rbo);
bb812f1e 7852
7b7c6c81 7853 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7854
7855 amdgpu_bo_ref(rbo);
7856
cf322b49
NK
7857 /**
7858 * We don't do surface updates on planes that have been newly created,
7859 * but we also don't have the afb->address during atomic check.
7860 *
7861 * Fill in buffer attributes depending on the address here, but only on
7862 * newly created planes since they're not being used by DC yet and this
7863 * won't modify global state.
7864 */
7865 dm_plane_state_old = to_dm_plane_state(plane->state);
7866 dm_plane_state_new = to_dm_plane_state(new_state);
7867
3be5262e 7868 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7869 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7870 struct dc_plane_state *plane_state =
7871 dm_plane_state_new->dc_state;
7872 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7873
320932bf 7874 fill_plane_buffer_attributes(
695af5f9 7875 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7876 afb->tiling_flags,
cf322b49
NK
7877 &plane_state->tiling_info, &plane_state->plane_size,
7878 &plane_state->dcc, &plane_state->address,
6eed95b0 7879 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7880 }
7881
e7b07cee 7882 return 0;
f06e2167
CK
7883
7884error_unpin:
7885 amdgpu_bo_unpin(rbo);
7886
7887error_unlock:
7888 amdgpu_bo_unreserve(rbo);
7889 return r;
e7b07cee
HW
7890}
7891
3ee6b26b
AD
7892static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7893 struct drm_plane_state *old_state)
e7b07cee
HW
7894{
7895 struct amdgpu_bo *rbo;
e7b07cee
HW
7896 int r;
7897
7898 if (!old_state->fb)
7899 return;
7900
e68d14dd 7901 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7902 r = amdgpu_bo_reserve(rbo, false);
7903 if (unlikely(r)) {
7904 DRM_ERROR("failed to reserve rbo before unpin\n");
7905 return;
b830ebc9
HW
7906 }
7907
7908 amdgpu_bo_unpin(rbo);
7909 amdgpu_bo_unreserve(rbo);
7910 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7911}
7912
8c44515b
AP
7913static int dm_plane_helper_check_state(struct drm_plane_state *state,
7914 struct drm_crtc_state *new_crtc_state)
7915{
6300b3bd
MK
7916 struct drm_framebuffer *fb = state->fb;
7917 int min_downscale, max_upscale;
7918 int min_scale = 0;
7919 int max_scale = INT_MAX;
7920
40d916a2 7921 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7922 if (fb && state->crtc) {
40d916a2
NC
7923 /* Validate viewport to cover the case when only the position changes */
7924 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7925 int viewport_width = state->crtc_w;
7926 int viewport_height = state->crtc_h;
7927
7928 if (state->crtc_x < 0)
7929 viewport_width += state->crtc_x;
7930 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7931 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7932
7933 if (state->crtc_y < 0)
7934 viewport_height += state->crtc_y;
7935 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7936 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7937
4abdb72b
NC
7938 if (viewport_width < 0 || viewport_height < 0) {
7939 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7940 return -EINVAL;
7941 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7942 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7943 return -EINVAL;
4abdb72b
NC
7944 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7945 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7946 return -EINVAL;
4abdb72b
NC
7947 }
7948
40d916a2
NC
7949 }
7950
7951 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7952 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7953 &min_downscale, &max_upscale);
7954 /*
7955 * Convert to drm convention: 16.16 fixed point, instead of dc's
7956 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7957 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7958 */
7959 min_scale = (1000 << 16) / max_upscale;
7960 max_scale = (1000 << 16) / min_downscale;
7961 }
8c44515b 7962
8c44515b 7963 return drm_atomic_helper_check_plane_state(
6300b3bd 7964 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7965}
7966
7578ecda 7967static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7968 struct drm_atomic_state *state)
cbd19488 7969{
7c11b99a
MR
7970 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7971 plane);
1348969a 7972 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7973 struct dc *dc = adev->dm.dc;
78171832 7974 struct dm_plane_state *dm_plane_state;
695af5f9 7975 struct dc_scaling_info scaling_info;
8c44515b 7976 struct drm_crtc_state *new_crtc_state;
695af5f9 7977 int ret;
78171832 7978
ba5c1649 7979 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7980
ba5c1649 7981 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7982
3be5262e 7983 if (!dm_plane_state->dc_state)
9a3329b1 7984 return 0;
cbd19488 7985
8c44515b 7986 new_crtc_state =
dec92020 7987 drm_atomic_get_new_crtc_state(state,
ba5c1649 7988 new_plane_state->crtc);
8c44515b
AP
7989 if (!new_crtc_state)
7990 return -EINVAL;
7991
ba5c1649 7992 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7993 if (ret)
7994 return ret;
7995
4375d625 7996 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7997 if (ret)
7998 return ret;
a05bcff1 7999
62c933f9 8000 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
8001 return 0;
8002
8003 return -EINVAL;
8004}
8005
674e78ac 8006static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 8007 struct drm_atomic_state *state)
674e78ac
NK
8008{
8009 /* Only support async updates on cursor planes. */
8010 if (plane->type != DRM_PLANE_TYPE_CURSOR)
8011 return -EINVAL;
8012
8013 return 0;
8014}
8015
8016static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 8017 struct drm_atomic_state *state)
674e78ac 8018{
5ddb0bd4
MR
8019 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8020 plane);
674e78ac 8021 struct drm_plane_state *old_state =
5ddb0bd4 8022 drm_atomic_get_old_plane_state(state, plane);
674e78ac 8023
e8a98235
RS
8024 trace_amdgpu_dm_atomic_update_cursor(new_state);
8025
332af874 8026 swap(plane->state->fb, new_state->fb);
674e78ac
NK
8027
8028 plane->state->src_x = new_state->src_x;
8029 plane->state->src_y = new_state->src_y;
8030 plane->state->src_w = new_state->src_w;
8031 plane->state->src_h = new_state->src_h;
8032 plane->state->crtc_x = new_state->crtc_x;
8033 plane->state->crtc_y = new_state->crtc_y;
8034 plane->state->crtc_w = new_state->crtc_w;
8035 plane->state->crtc_h = new_state->crtc_h;
8036
8037 handle_cursor_update(plane, old_state);
8038}
8039
e7b07cee
HW
8040static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8041 .prepare_fb = dm_plane_helper_prepare_fb,
8042 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 8043 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
8044 .atomic_async_check = dm_plane_atomic_async_check,
8045 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
8046};
8047
8048/*
8049 * TODO: these are currently initialized to rgb formats only.
8050 * For future use cases we should either initialize them dynamically based on
8051 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 8052 * check will succeed, and let DC implement proper check
e7b07cee 8053 */
d90371b0 8054static const uint32_t rgb_formats[] = {
e7b07cee
HW
8055 DRM_FORMAT_XRGB8888,
8056 DRM_FORMAT_ARGB8888,
8057 DRM_FORMAT_RGBA8888,
8058 DRM_FORMAT_XRGB2101010,
8059 DRM_FORMAT_XBGR2101010,
8060 DRM_FORMAT_ARGB2101010,
8061 DRM_FORMAT_ABGR2101010,
58020403
MK
8062 DRM_FORMAT_XRGB16161616,
8063 DRM_FORMAT_XBGR16161616,
8064 DRM_FORMAT_ARGB16161616,
8065 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
8066 DRM_FORMAT_XBGR8888,
8067 DRM_FORMAT_ABGR8888,
46dd9ff7 8068 DRM_FORMAT_RGB565,
e7b07cee
HW
8069};
8070
0d579c7e
NK
8071static const uint32_t overlay_formats[] = {
8072 DRM_FORMAT_XRGB8888,
8073 DRM_FORMAT_ARGB8888,
8074 DRM_FORMAT_RGBA8888,
8075 DRM_FORMAT_XBGR8888,
8076 DRM_FORMAT_ABGR8888,
7267a1a9 8077 DRM_FORMAT_RGB565
e7b07cee
HW
8078};
8079
8080static const u32 cursor_formats[] = {
8081 DRM_FORMAT_ARGB8888
8082};
8083
37c6a93b
NK
8084static int get_plane_formats(const struct drm_plane *plane,
8085 const struct dc_plane_cap *plane_cap,
8086 uint32_t *formats, int max_formats)
e7b07cee 8087{
37c6a93b
NK
8088 int i, num_formats = 0;
8089
8090 /*
8091 * TODO: Query support for each group of formats directly from
8092 * DC plane caps. This will require adding more formats to the
8093 * caps list.
8094 */
e7b07cee 8095
f180b4bc 8096 switch (plane->type) {
e7b07cee 8097 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
8098 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8099 if (num_formats >= max_formats)
8100 break;
8101
8102 formats[num_formats++] = rgb_formats[i];
8103 }
8104
ea36ad34 8105 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 8106 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
8107 if (plane_cap && plane_cap->pixel_format_support.p010)
8108 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
8109 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8110 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8111 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
8112 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8113 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 8114 }
e7b07cee 8115 break;
37c6a93b 8116
e7b07cee 8117 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
8118 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8119 if (num_formats >= max_formats)
8120 break;
8121
8122 formats[num_formats++] = overlay_formats[i];
8123 }
e7b07cee 8124 break;
37c6a93b 8125
e7b07cee 8126 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
8127 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8128 if (num_formats >= max_formats)
8129 break;
8130
8131 formats[num_formats++] = cursor_formats[i];
8132 }
e7b07cee
HW
8133 break;
8134 }
8135
37c6a93b
NK
8136 return num_formats;
8137}
8138
8139static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8140 struct drm_plane *plane,
8141 unsigned long possible_crtcs,
8142 const struct dc_plane_cap *plane_cap)
8143{
8144 uint32_t formats[32];
8145 int num_formats;
8146 int res = -EPERM;
ecc874a6 8147 unsigned int supported_rotations;
faa37f54 8148 uint64_t *modifiers = NULL;
37c6a93b
NK
8149
8150 num_formats = get_plane_formats(plane, plane_cap, formats,
8151 ARRAY_SIZE(formats));
8152
faa37f54
BN
8153 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8154 if (res)
8155 return res;
8156
2af10429
TE
8157 if (modifiers == NULL)
8158 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8159
4a580877 8160 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 8161 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
8162 modifiers, plane->type, NULL);
8163 kfree(modifiers);
37c6a93b
NK
8164 if (res)
8165 return res;
8166
cc1fec57
NK
8167 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8168 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6 8169 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
76818cdd
SJK
8170 BIT(DRM_MODE_BLEND_PREMULTI) |
8171 BIT(DRM_MODE_BLEND_COVERAGE);
d74004b6
NK
8172
8173 drm_plane_create_alpha_property(plane);
8174 drm_plane_create_blend_mode_property(plane, blend_caps);
8175 }
8176
fc8e5230 8177 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
8178 plane_cap &&
8179 (plane_cap->pixel_format_support.nv12 ||
8180 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
8181 /* This only affects YUV formats. */
8182 drm_plane_create_color_properties(
8183 plane,
8184 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
8185 BIT(DRM_COLOR_YCBCR_BT709) |
8186 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
8187 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8188 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8189 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8190 }
8191
ecc874a6
PLG
8192 supported_rotations =
8193 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8194 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8195
1347385f
SS
8196 if (dm->adev->asic_type >= CHIP_BONAIRE &&
8197 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
8198 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8199 supported_rotations);
ecc874a6 8200
f180b4bc 8201 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 8202
96719c54 8203 /* Create (reset) the plane state */
f180b4bc
HW
8204 if (plane->funcs->reset)
8205 plane->funcs->reset(plane);
96719c54 8206
37c6a93b 8207 return 0;
e7b07cee
HW
8208}
8209
7578ecda
AD
8210static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8211 struct drm_plane *plane,
8212 uint32_t crtc_index)
e7b07cee
HW
8213{
8214 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 8215 struct drm_plane *cursor_plane;
e7b07cee
HW
8216
8217 int res = -ENOMEM;
8218
8219 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8220 if (!cursor_plane)
8221 goto fail;
8222
f180b4bc 8223 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 8224 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
8225
8226 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8227 if (!acrtc)
8228 goto fail;
8229
8230 res = drm_crtc_init_with_planes(
8231 dm->ddev,
8232 &acrtc->base,
8233 plane,
f180b4bc 8234 cursor_plane,
e7b07cee
HW
8235 &amdgpu_dm_crtc_funcs, NULL);
8236
8237 if (res)
8238 goto fail;
8239
8240 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8241
96719c54
HW
8242 /* Create (reset) the plane state */
8243 if (acrtc->base.funcs->reset)
8244 acrtc->base.funcs->reset(&acrtc->base);
8245
e7b07cee
HW
8246 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8247 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8248
8249 acrtc->crtc_id = crtc_index;
8250 acrtc->base.enabled = false;
c37e2d29 8251 acrtc->otg_inst = -1;
e7b07cee
HW
8252
8253 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
8254 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8255 true, MAX_COLOR_LUT_ENTRIES);
086247a4 8256 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 8257
e7b07cee
HW
8258 return 0;
8259
8260fail:
b830ebc9
HW
8261 kfree(acrtc);
8262 kfree(cursor_plane);
e7b07cee
HW
8263 return res;
8264}
8265
8266
8267static int to_drm_connector_type(enum signal_type st)
8268{
8269 switch (st) {
8270 case SIGNAL_TYPE_HDMI_TYPE_A:
8271 return DRM_MODE_CONNECTOR_HDMIA;
8272 case SIGNAL_TYPE_EDP:
8273 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
8274 case SIGNAL_TYPE_LVDS:
8275 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
8276 case SIGNAL_TYPE_RGB:
8277 return DRM_MODE_CONNECTOR_VGA;
8278 case SIGNAL_TYPE_DISPLAY_PORT:
8279 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8280 return DRM_MODE_CONNECTOR_DisplayPort;
8281 case SIGNAL_TYPE_DVI_DUAL_LINK:
8282 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8283 return DRM_MODE_CONNECTOR_DVID;
8284 case SIGNAL_TYPE_VIRTUAL:
8285 return DRM_MODE_CONNECTOR_VIRTUAL;
8286
8287 default:
8288 return DRM_MODE_CONNECTOR_Unknown;
8289 }
8290}
8291
2b4c1c05
DV
8292static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8293{
62afb4ad
JRS
8294 struct drm_encoder *encoder;
8295
8296 /* There is only one encoder per connector */
8297 drm_connector_for_each_possible_encoder(connector, encoder)
8298 return encoder;
8299
8300 return NULL;
2b4c1c05
DV
8301}
8302
e7b07cee
HW
8303static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8304{
e7b07cee
HW
8305 struct drm_encoder *encoder;
8306 struct amdgpu_encoder *amdgpu_encoder;
8307
2b4c1c05 8308 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
8309
8310 if (encoder == NULL)
8311 return;
8312
8313 amdgpu_encoder = to_amdgpu_encoder(encoder);
8314
8315 amdgpu_encoder->native_mode.clock = 0;
8316
8317 if (!list_empty(&connector->probed_modes)) {
8318 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8319
e7b07cee 8320 list_for_each_entry(preferred_mode,
b830ebc9
HW
8321 &connector->probed_modes,
8322 head) {
8323 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8324 amdgpu_encoder->native_mode = *preferred_mode;
8325
e7b07cee
HW
8326 break;
8327 }
8328
8329 }
8330}
8331
3ee6b26b
AD
8332static struct drm_display_mode *
8333amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8334 char *name,
8335 int hdisplay, int vdisplay)
e7b07cee
HW
8336{
8337 struct drm_device *dev = encoder->dev;
8338 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8339 struct drm_display_mode *mode = NULL;
8340 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8341
8342 mode = drm_mode_duplicate(dev, native_mode);
8343
b830ebc9 8344 if (mode == NULL)
e7b07cee
HW
8345 return NULL;
8346
8347 mode->hdisplay = hdisplay;
8348 mode->vdisplay = vdisplay;
8349 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8350 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8351
8352 return mode;
8353
8354}
8355
8356static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8357 struct drm_connector *connector)
e7b07cee
HW
8358{
8359 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8360 struct drm_display_mode *mode = NULL;
8361 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8362 struct amdgpu_dm_connector *amdgpu_dm_connector =
8363 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8364 int i;
8365 int n;
8366 struct mode_size {
8367 char name[DRM_DISPLAY_MODE_LEN];
8368 int w;
8369 int h;
b830ebc9 8370 } common_modes[] = {
e7b07cee
HW
8371 { "640x480", 640, 480},
8372 { "800x600", 800, 600},
8373 { "1024x768", 1024, 768},
8374 { "1280x720", 1280, 720},
8375 { "1280x800", 1280, 800},
8376 {"1280x1024", 1280, 1024},
8377 { "1440x900", 1440, 900},
8378 {"1680x1050", 1680, 1050},
8379 {"1600x1200", 1600, 1200},
8380 {"1920x1080", 1920, 1080},
8381 {"1920x1200", 1920, 1200}
8382 };
8383
b830ebc9 8384 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8385
8386 for (i = 0; i < n; i++) {
8387 struct drm_display_mode *curmode = NULL;
8388 bool mode_existed = false;
8389
8390 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8391 common_modes[i].h > native_mode->vdisplay ||
8392 (common_modes[i].w == native_mode->hdisplay &&
8393 common_modes[i].h == native_mode->vdisplay))
8394 continue;
e7b07cee
HW
8395
8396 list_for_each_entry(curmode, &connector->probed_modes, head) {
8397 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8398 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8399 mode_existed = true;
8400 break;
8401 }
8402 }
8403
8404 if (mode_existed)
8405 continue;
8406
8407 mode = amdgpu_dm_create_common_mode(encoder,
8408 common_modes[i].name, common_modes[i].w,
8409 common_modes[i].h);
588a7017
ZQ
8410 if (!mode)
8411 continue;
8412
e7b07cee 8413 drm_mode_probed_add(connector, mode);
c84dec2f 8414 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8415 }
8416}
8417
d77de788
SS
8418static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8419{
8420 struct drm_encoder *encoder;
8421 struct amdgpu_encoder *amdgpu_encoder;
8422 const struct drm_display_mode *native_mode;
8423
8424 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8425 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8426 return;
8427
8428 encoder = amdgpu_dm_connector_to_encoder(connector);
8429 if (!encoder)
8430 return;
8431
8432 amdgpu_encoder = to_amdgpu_encoder(encoder);
8433
8434 native_mode = &amdgpu_encoder->native_mode;
8435 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8436 return;
8437
8438 drm_connector_set_panel_orientation_with_quirk(connector,
8439 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8440 native_mode->hdisplay,
8441 native_mode->vdisplay);
8442}
8443
3ee6b26b
AD
8444static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8445 struct edid *edid)
e7b07cee 8446{
c84dec2f
HW
8447 struct amdgpu_dm_connector *amdgpu_dm_connector =
8448 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8449
8450 if (edid) {
8451 /* empty probed_modes */
8452 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8453 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8454 drm_add_edid_modes(connector, edid);
8455
f1e5e913
YMM
8456 /* sorting the probed modes before calling function
8457 * amdgpu_dm_get_native_mode() since EDID can have
8458 * more than one preferred mode. The modes that are
8459 * later in the probed mode list could be of higher
8460 * and preferred resolution. For example, 3840x2160
8461 * resolution in base EDID preferred timing and 4096x2160
8462 * preferred resolution in DID extension block later.
8463 */
8464 drm_mode_sort(&connector->probed_modes);
e7b07cee 8465 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8466
8467 /* Freesync capabilities are reset by calling
8468 * drm_add_edid_modes() and need to be
8469 * restored here.
8470 */
8471 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8472
8473 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8474 } else {
c84dec2f 8475 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8476 }
e7b07cee
HW
8477}
8478
a85ba005
NC
8479static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8480 struct drm_display_mode *mode)
8481{
8482 struct drm_display_mode *m;
8483
8484 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8485 if (drm_mode_equal(m, mode))
8486 return true;
8487 }
8488
8489 return false;
8490}
8491
8492static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8493{
8494 const struct drm_display_mode *m;
8495 struct drm_display_mode *new_mode;
8496 uint i;
8497 uint32_t new_modes_count = 0;
8498
8499 /* Standard FPS values
8500 *
12cdff6b
SC
8501 * 23.976 - TV/NTSC
8502 * 24 - Cinema
8503 * 25 - TV/PAL
8504 * 29.97 - TV/NTSC
8505 * 30 - TV/NTSC
8506 * 48 - Cinema HFR
8507 * 50 - TV/PAL
8508 * 60 - Commonly used
8509 * 48,72,96,120 - Multiples of 24
a85ba005 8510 */
9ce5ed6e
CIK
8511 static const uint32_t common_rates[] = {
8512 23976, 24000, 25000, 29970, 30000,
12cdff6b 8513 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8514 };
a85ba005
NC
8515
8516 /*
8517 * Find mode with highest refresh rate with the same resolution
8518 * as the preferred mode. Some monitors report a preferred mode
8519 * with lower resolution than the highest refresh rate supported.
8520 */
8521
8522 m = get_highest_refresh_rate_mode(aconnector, true);
8523 if (!m)
8524 return 0;
8525
8526 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8527 uint64_t target_vtotal, target_vtotal_diff;
8528 uint64_t num, den;
8529
8530 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8531 continue;
8532
8533 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8534 common_rates[i] > aconnector->max_vfreq * 1000)
8535 continue;
8536
8537 num = (unsigned long long)m->clock * 1000 * 1000;
8538 den = common_rates[i] * (unsigned long long)m->htotal;
8539 target_vtotal = div_u64(num, den);
8540 target_vtotal_diff = target_vtotal - m->vtotal;
8541
8542 /* Check for illegal modes */
8543 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8544 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8545 m->vtotal + target_vtotal_diff < m->vsync_end)
8546 continue;
8547
8548 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8549 if (!new_mode)
8550 goto out;
8551
8552 new_mode->vtotal += (u16)target_vtotal_diff;
8553 new_mode->vsync_start += (u16)target_vtotal_diff;
8554 new_mode->vsync_end += (u16)target_vtotal_diff;
8555 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8556 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8557
8558 if (!is_duplicate_mode(aconnector, new_mode)) {
8559 drm_mode_probed_add(&aconnector->base, new_mode);
8560 new_modes_count += 1;
8561 } else
8562 drm_mode_destroy(aconnector->base.dev, new_mode);
8563 }
8564 out:
8565 return new_modes_count;
8566}
8567
8568static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8569 struct edid *edid)
8570{
8571 struct amdgpu_dm_connector *amdgpu_dm_connector =
8572 to_amdgpu_dm_connector(connector);
8573
de05abe6 8574 if (!edid)
a85ba005 8575 return;
fe8858bb 8576
a85ba005
NC
8577 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8578 amdgpu_dm_connector->num_modes +=
8579 add_fs_modes(amdgpu_dm_connector);
8580}
8581
7578ecda 8582static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8583{
c84dec2f
HW
8584 struct amdgpu_dm_connector *amdgpu_dm_connector =
8585 to_amdgpu_dm_connector(connector);
e7b07cee 8586 struct drm_encoder *encoder;
c84dec2f 8587 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8588
2b4c1c05 8589 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8590
5c0e6840 8591 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8592 amdgpu_dm_connector->num_modes =
8593 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8594 } else {
8595 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8596 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8597 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8598 }
3e332d3a 8599 amdgpu_dm_fbc_init(connector);
5099114b 8600
c84dec2f 8601 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8602}
8603
3ee6b26b
AD
8604void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8605 struct amdgpu_dm_connector *aconnector,
8606 int connector_type,
8607 struct dc_link *link,
8608 int link_index)
e7b07cee 8609{
1348969a 8610 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8611
f04bee34
NK
8612 /*
8613 * Some of the properties below require access to state, like bpc.
8614 * Allocate some default initial connector state with our reset helper.
8615 */
8616 if (aconnector->base.funcs->reset)
8617 aconnector->base.funcs->reset(&aconnector->base);
8618
e7b07cee
HW
8619 aconnector->connector_id = link_index;
8620 aconnector->dc_link = link;
8621 aconnector->base.interlace_allowed = false;
8622 aconnector->base.doublescan_allowed = false;
8623 aconnector->base.stereo_allowed = false;
8624 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8625 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8626 aconnector->audio_inst = -1;
e7b07cee
HW
8627 mutex_init(&aconnector->hpd_lock);
8628
1f6010a9
DF
8629 /*
8630 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8631 * which means HPD hot plug not supported
8632 */
e7b07cee
HW
8633 switch (connector_type) {
8634 case DRM_MODE_CONNECTOR_HDMIA:
8635 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8636 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8637 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8638 break;
8639 case DRM_MODE_CONNECTOR_DisplayPort:
8640 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
d715c9a2 8641 link->link_enc = link_enc_cfg_get_link_enc(link);
7b201d53 8642 ASSERT(link->link_enc);
f6e03f80
JS
8643 if (link->link_enc)
8644 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8645 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8646 break;
8647 case DRM_MODE_CONNECTOR_DVID:
8648 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8649 break;
8650 default:
8651 break;
8652 }
8653
8654 drm_object_attach_property(&aconnector->base.base,
8655 dm->ddev->mode_config.scaling_mode_property,
8656 DRM_MODE_SCALE_NONE);
8657
8658 drm_object_attach_property(&aconnector->base.base,
8659 adev->mode_info.underscan_property,
8660 UNDERSCAN_OFF);
8661 drm_object_attach_property(&aconnector->base.base,
8662 adev->mode_info.underscan_hborder_property,
8663 0);
8664 drm_object_attach_property(&aconnector->base.base,
8665 adev->mode_info.underscan_vborder_property,
8666 0);
1825fd34 8667
8c61b31e
JFZ
8668 if (!aconnector->mst_port)
8669 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8670
4a8ca46b
RL
8671 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8672 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8673 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8674
c1ee92f9 8675 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8676 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8677 drm_object_attach_property(&aconnector->base.base,
8678 adev->mode_info.abm_level_property, 0);
8679 }
bb47de73
NK
8680
8681 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8682 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8683 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8684 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8685
8c61b31e
JFZ
8686 if (!aconnector->mst_port)
8687 drm_connector_attach_vrr_capable_property(&aconnector->base);
8688
0c8620d6 8689#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8690 if (adev->dm.hdcp_workqueue)
53e108aa 8691 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8692#endif
bb47de73 8693 }
e7b07cee
HW
8694}
8695
7578ecda
AD
8696static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8697 struct i2c_msg *msgs, int num)
e7b07cee
HW
8698{
8699 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8700 struct ddc_service *ddc_service = i2c->ddc_service;
8701 struct i2c_command cmd;
8702 int i;
8703 int result = -EIO;
8704
b830ebc9 8705 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8706
8707 if (!cmd.payloads)
8708 return result;
8709
8710 cmd.number_of_payloads = num;
8711 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8712 cmd.speed = 100;
8713
8714 for (i = 0; i < num; i++) {
8715 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8716 cmd.payloads[i].address = msgs[i].addr;
8717 cmd.payloads[i].length = msgs[i].len;
8718 cmd.payloads[i].data = msgs[i].buf;
8719 }
8720
c85e6e54
DF
8721 if (dc_submit_i2c(
8722 ddc_service->ctx->dc,
22676bc5 8723 ddc_service->link->link_index,
e7b07cee
HW
8724 &cmd))
8725 result = num;
8726
8727 kfree(cmd.payloads);
8728 return result;
8729}
8730
7578ecda 8731static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8732{
8733 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8734}
8735
8736static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8737 .master_xfer = amdgpu_dm_i2c_xfer,
8738 .functionality = amdgpu_dm_i2c_func,
8739};
8740
3ee6b26b
AD
8741static struct amdgpu_i2c_adapter *
8742create_i2c(struct ddc_service *ddc_service,
8743 int link_index,
8744 int *res)
e7b07cee
HW
8745{
8746 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8747 struct amdgpu_i2c_adapter *i2c;
8748
b830ebc9 8749 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8750 if (!i2c)
8751 return NULL;
e7b07cee
HW
8752 i2c->base.owner = THIS_MODULE;
8753 i2c->base.class = I2C_CLASS_DDC;
8754 i2c->base.dev.parent = &adev->pdev->dev;
8755 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8756 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8757 i2c_set_adapdata(&i2c->base, i2c);
8758 i2c->ddc_service = ddc_service;
8759
8760 return i2c;
8761}
8762
89fc8d4e 8763
1f6010a9
DF
8764/*
8765 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8766 * dc_link which will be represented by this aconnector.
8767 */
7578ecda
AD
8768static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8769 struct amdgpu_dm_connector *aconnector,
8770 uint32_t link_index,
8771 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8772{
8773 int res = 0;
8774 int connector_type;
8775 struct dc *dc = dm->dc;
8776 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8777 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8778
8779 link->priv = aconnector;
e7b07cee 8780
f1ad2f5e 8781 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8782
8783 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8784 if (!i2c) {
8785 DRM_ERROR("Failed to create i2c adapter data\n");
8786 return -ENOMEM;
8787 }
8788
e7b07cee
HW
8789 aconnector->i2c = i2c;
8790 res = i2c_add_adapter(&i2c->base);
8791
8792 if (res) {
8793 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8794 goto out_free;
8795 }
8796
8797 connector_type = to_drm_connector_type(link->connector_signal);
8798
17165de2 8799 res = drm_connector_init_with_ddc(
e7b07cee
HW
8800 dm->ddev,
8801 &aconnector->base,
8802 &amdgpu_dm_connector_funcs,
17165de2
AP
8803 connector_type,
8804 &i2c->base);
e7b07cee
HW
8805
8806 if (res) {
8807 DRM_ERROR("connector_init failed\n");
8808 aconnector->connector_id = -1;
8809 goto out_free;
8810 }
8811
8812 drm_connector_helper_add(
8813 &aconnector->base,
8814 &amdgpu_dm_connector_helper_funcs);
8815
8816 amdgpu_dm_connector_init_helper(
8817 dm,
8818 aconnector,
8819 connector_type,
8820 link,
8821 link_index);
8822
cde4c44d 8823 drm_connector_attach_encoder(
e7b07cee
HW
8824 &aconnector->base, &aencoder->base);
8825
e7b07cee
HW
8826 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8827 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8828 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8829
e7b07cee
HW
8830out_free:
8831 if (res) {
8832 kfree(i2c);
8833 aconnector->i2c = NULL;
8834 }
8835 return res;
8836}
8837
8838int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8839{
8840 switch (adev->mode_info.num_crtc) {
8841 case 1:
8842 return 0x1;
8843 case 2:
8844 return 0x3;
8845 case 3:
8846 return 0x7;
8847 case 4:
8848 return 0xf;
8849 case 5:
8850 return 0x1f;
8851 case 6:
8852 default:
8853 return 0x3f;
8854 }
8855}
8856
7578ecda
AD
8857static int amdgpu_dm_encoder_init(struct drm_device *dev,
8858 struct amdgpu_encoder *aencoder,
8859 uint32_t link_index)
e7b07cee 8860{
1348969a 8861 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8862
8863 int res = drm_encoder_init(dev,
8864 &aencoder->base,
8865 &amdgpu_dm_encoder_funcs,
8866 DRM_MODE_ENCODER_TMDS,
8867 NULL);
8868
8869 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8870
8871 if (!res)
8872 aencoder->encoder_id = link_index;
8873 else
8874 aencoder->encoder_id = -1;
8875
8876 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8877
8878 return res;
8879}
8880
3ee6b26b
AD
8881static void manage_dm_interrupts(struct amdgpu_device *adev,
8882 struct amdgpu_crtc *acrtc,
8883 bool enable)
e7b07cee
HW
8884{
8885 /*
8fe684e9
NK
8886 * We have no guarantee that the frontend index maps to the same
8887 * backend index - some even map to more than one.
8888 *
8889 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8890 */
8891 int irq_type =
734dd01d 8892 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8893 adev,
8894 acrtc->crtc_id);
8895
8896 if (enable) {
8897 drm_crtc_vblank_on(&acrtc->base);
8898 amdgpu_irq_get(
8899 adev,
8900 &adev->pageflip_irq,
8901 irq_type);
86bc2219
WL
8902#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8903 amdgpu_irq_get(
8904 adev,
8905 &adev->vline0_irq,
8906 irq_type);
8907#endif
e7b07cee 8908 } else {
86bc2219
WL
8909#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8910 amdgpu_irq_put(
8911 adev,
8912 &adev->vline0_irq,
8913 irq_type);
8914#endif
e7b07cee
HW
8915 amdgpu_irq_put(
8916 adev,
8917 &adev->pageflip_irq,
8918 irq_type);
8919 drm_crtc_vblank_off(&acrtc->base);
8920 }
8921}
8922
8fe684e9
NK
8923static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8924 struct amdgpu_crtc *acrtc)
8925{
8926 int irq_type =
8927 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8928
8929 /**
8930 * This reads the current state for the IRQ and force reapplies
8931 * the setting to hardware.
8932 */
8933 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8934}
8935
3ee6b26b
AD
8936static bool
8937is_scaling_state_different(const struct dm_connector_state *dm_state,
8938 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8939{
8940 if (dm_state->scaling != old_dm_state->scaling)
8941 return true;
8942 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8943 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8944 return true;
8945 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8946 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8947 return true;
b830ebc9
HW
8948 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8949 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8950 return true;
e7b07cee
HW
8951 return false;
8952}
8953
0c8620d6
BL
8954#ifdef CONFIG_DRM_AMD_DC_HDCP
8955static bool is_content_protection_different(struct drm_connector_state *state,
8956 const struct drm_connector_state *old_state,
8957 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8958{
8959 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8960 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8961
31c0ed90 8962 /* Handle: Type0/1 change */
53e108aa
BL
8963 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8964 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8965 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8966 return true;
8967 }
8968
31c0ed90
BL
8969 /* CP is being re enabled, ignore this
8970 *
8971 * Handles: ENABLED -> DESIRED
8972 */
0c8620d6
BL
8973 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8974 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8975 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8976 return false;
8977 }
8978
31c0ed90
BL
8979 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8980 *
8981 * Handles: UNDESIRED -> ENABLED
8982 */
0c8620d6
BL
8983 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8984 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8985 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8986
0d9a947b
QZ
8987 /* Stream removed and re-enabled
8988 *
8989 * Can sometimes overlap with the HPD case,
8990 * thus set update_hdcp to false to avoid
8991 * setting HDCP multiple times.
8992 *
8993 * Handles: DESIRED -> DESIRED (Special case)
8994 */
8995 if (!(old_state->crtc && old_state->crtc->enabled) &&
8996 state->crtc && state->crtc->enabled &&
8997 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8998 dm_con_state->update_hdcp = false;
8999 return true;
9000 }
9001
9002 /* Hot-plug, headless s3, dpms
9003 *
9004 * Only start HDCP if the display is connected/enabled.
9005 * update_hdcp flag will be set to false until the next
9006 * HPD comes in.
31c0ed90
BL
9007 *
9008 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 9009 */
97f6c917
BL
9010 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9011 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9012 dm_con_state->update_hdcp = false;
0c8620d6 9013 return true;
97f6c917 9014 }
0c8620d6 9015
31c0ed90
BL
9016 /*
9017 * Handles: UNDESIRED -> UNDESIRED
9018 * DESIRED -> DESIRED
9019 * ENABLED -> ENABLED
9020 */
0c8620d6
BL
9021 if (old_state->content_protection == state->content_protection)
9022 return false;
9023
31c0ed90
BL
9024 /*
9025 * Handles: UNDESIRED -> DESIRED
9026 * DESIRED -> UNDESIRED
9027 * ENABLED -> UNDESIRED
9028 */
97f6c917 9029 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
9030 return true;
9031
31c0ed90
BL
9032 /*
9033 * Handles: DESIRED -> ENABLED
9034 */
0c8620d6
BL
9035 return false;
9036}
9037
0c8620d6 9038#endif
3ee6b26b
AD
9039static void remove_stream(struct amdgpu_device *adev,
9040 struct amdgpu_crtc *acrtc,
9041 struct dc_stream_state *stream)
e7b07cee
HW
9042{
9043 /* this is the update mode case */
e7b07cee
HW
9044
9045 acrtc->otg_inst = -1;
9046 acrtc->enabled = false;
9047}
9048
7578ecda
AD
9049static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9050 struct dc_cursor_position *position)
2a8f6ccb 9051{
f4c2cc43 9052 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
9053 int x, y;
9054 int xorigin = 0, yorigin = 0;
9055
e371e19c 9056 if (!crtc || !plane->state->fb)
2a8f6ccb 9057 return 0;
2a8f6ccb
HW
9058
9059 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9060 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9061 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9062 __func__,
9063 plane->state->crtc_w,
9064 plane->state->crtc_h);
9065 return -EINVAL;
9066 }
9067
9068 x = plane->state->crtc_x;
9069 y = plane->state->crtc_y;
c14a005c 9070
e371e19c
NK
9071 if (x <= -amdgpu_crtc->max_cursor_width ||
9072 y <= -amdgpu_crtc->max_cursor_height)
9073 return 0;
9074
2a8f6ccb
HW
9075 if (x < 0) {
9076 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9077 x = 0;
9078 }
9079 if (y < 0) {
9080 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9081 y = 0;
9082 }
9083 position->enable = true;
d243b6ff 9084 position->translate_by_source = true;
2a8f6ccb
HW
9085 position->x = x;
9086 position->y = y;
9087 position->x_hotspot = xorigin;
9088 position->y_hotspot = yorigin;
9089
9090 return 0;
9091}
9092
3ee6b26b
AD
9093static void handle_cursor_update(struct drm_plane *plane,
9094 struct drm_plane_state *old_plane_state)
e7b07cee 9095{
1348969a 9096 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
9097 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9098 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9099 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9100 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9101 uint64_t address = afb ? afb->address : 0;
6a30a929 9102 struct dc_cursor_position position = {0};
2a8f6ccb
HW
9103 struct dc_cursor_attributes attributes;
9104 int ret;
9105
e7b07cee
HW
9106 if (!plane->state->fb && !old_plane_state->fb)
9107 return;
9108
cb2318b7 9109 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
9110 __func__,
9111 amdgpu_crtc->crtc_id,
9112 plane->state->crtc_w,
9113 plane->state->crtc_h);
2a8f6ccb
HW
9114
9115 ret = get_cursor_position(plane, crtc, &position);
9116 if (ret)
9117 return;
9118
9119 if (!position.enable) {
9120 /* turn off cursor */
674e78ac
NK
9121 if (crtc_state && crtc_state->stream) {
9122 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
9123 dc_stream_set_cursor_position(crtc_state->stream,
9124 &position);
674e78ac
NK
9125 mutex_unlock(&adev->dm.dc_lock);
9126 }
2a8f6ccb 9127 return;
e7b07cee 9128 }
e7b07cee 9129
2a8f6ccb
HW
9130 amdgpu_crtc->cursor_width = plane->state->crtc_w;
9131 amdgpu_crtc->cursor_height = plane->state->crtc_h;
9132
c1cefe11 9133 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
9134 attributes.address.high_part = upper_32_bits(address);
9135 attributes.address.low_part = lower_32_bits(address);
9136 attributes.width = plane->state->crtc_w;
9137 attributes.height = plane->state->crtc_h;
9138 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9139 attributes.rotation_angle = 0;
9140 attributes.attribute_flags.value = 0;
9141
03a66367 9142 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 9143
886daac9 9144 if (crtc_state->stream) {
674e78ac 9145 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
9146 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9147 &attributes))
9148 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 9149
2a8f6ccb
HW
9150 if (!dc_stream_set_cursor_position(crtc_state->stream,
9151 &position))
9152 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 9153 mutex_unlock(&adev->dm.dc_lock);
886daac9 9154 }
2a8f6ccb 9155}
e7b07cee
HW
9156
9157static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9158{
9159
9160 assert_spin_locked(&acrtc->base.dev->event_lock);
9161 WARN_ON(acrtc->event);
9162
9163 acrtc->event = acrtc->base.state->event;
9164
9165 /* Set the flip status */
9166 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9167
9168 /* Mark this event as consumed */
9169 acrtc->base.state->event = NULL;
9170
cb2318b7
VL
9171 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9172 acrtc->crtc_id);
e7b07cee
HW
9173}
9174
bb47de73
NK
9175static void update_freesync_state_on_stream(
9176 struct amdgpu_display_manager *dm,
9177 struct dm_crtc_state *new_crtc_state,
180db303
NK
9178 struct dc_stream_state *new_stream,
9179 struct dc_plane_state *surface,
9180 u32 flip_timestamp_in_us)
bb47de73 9181{
09aef2c4 9182 struct mod_vrr_params vrr_params;
bb47de73 9183 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 9184 struct amdgpu_device *adev = dm->adev;
585d450c 9185 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 9186 unsigned long flags;
4cda3243 9187 bool pack_sdp_v1_3 = false;
bb47de73
NK
9188
9189 if (!new_stream)
9190 return;
9191
9192 /*
9193 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9194 * For now it's sufficient to just guard against these conditions.
9195 */
9196
9197 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9198 return;
9199
4a580877 9200 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 9201 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 9202
180db303
NK
9203 if (surface) {
9204 mod_freesync_handle_preflip(
9205 dm->freesync_module,
9206 surface,
9207 new_stream,
9208 flip_timestamp_in_us,
9209 &vrr_params);
09aef2c4
MK
9210
9211 if (adev->family < AMDGPU_FAMILY_AI &&
9212 amdgpu_dm_vrr_active(new_crtc_state)) {
9213 mod_freesync_handle_v_update(dm->freesync_module,
9214 new_stream, &vrr_params);
e63e2491
EB
9215
9216 /* Need to call this before the frame ends. */
9217 dc_stream_adjust_vmin_vmax(dm->dc,
9218 new_crtc_state->stream,
9219 &vrr_params.adjust);
09aef2c4 9220 }
180db303 9221 }
bb47de73
NK
9222
9223 mod_freesync_build_vrr_infopacket(
9224 dm->freesync_module,
9225 new_stream,
180db303 9226 &vrr_params,
ecd0136b
HT
9227 PACKET_TYPE_VRR,
9228 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
9229 &vrr_infopacket,
9230 pack_sdp_v1_3);
bb47de73 9231
8a48b44c 9232 new_crtc_state->freesync_timing_changed |=
585d450c 9233 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
9234 &vrr_params.adjust,
9235 sizeof(vrr_params.adjust)) != 0);
bb47de73 9236
8a48b44c 9237 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
9238 (memcmp(&new_crtc_state->vrr_infopacket,
9239 &vrr_infopacket,
9240 sizeof(vrr_infopacket)) != 0);
9241
585d450c 9242 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
9243 new_crtc_state->vrr_infopacket = vrr_infopacket;
9244
585d450c 9245 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
9246 new_stream->vrr_infopacket = vrr_infopacket;
9247
9248 if (new_crtc_state->freesync_vrr_info_changed)
9249 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9250 new_crtc_state->base.crtc->base.id,
9251 (int)new_crtc_state->base.vrr_enabled,
180db303 9252 (int)vrr_params.state);
09aef2c4 9253
4a580877 9254 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
9255}
9256
585d450c 9257static void update_stream_irq_parameters(
e854194c
MK
9258 struct amdgpu_display_manager *dm,
9259 struct dm_crtc_state *new_crtc_state)
9260{
9261 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 9262 struct mod_vrr_params vrr_params;
e854194c 9263 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 9264 struct amdgpu_device *adev = dm->adev;
585d450c 9265 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 9266 unsigned long flags;
e854194c
MK
9267
9268 if (!new_stream)
9269 return;
9270
9271 /*
9272 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9273 * For now it's sufficient to just guard against these conditions.
9274 */
9275 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9276 return;
9277
4a580877 9278 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 9279 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 9280
e854194c
MK
9281 if (new_crtc_state->vrr_supported &&
9282 config.min_refresh_in_uhz &&
9283 config.max_refresh_in_uhz) {
a85ba005
NC
9284 /*
9285 * if freesync compatible mode was set, config.state will be set
9286 * in atomic check
9287 */
9288 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9289 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9290 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9291 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9292 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9293 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9294 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9295 } else {
9296 config.state = new_crtc_state->base.vrr_enabled ?
9297 VRR_STATE_ACTIVE_VARIABLE :
9298 VRR_STATE_INACTIVE;
9299 }
e854194c
MK
9300 } else {
9301 config.state = VRR_STATE_UNSUPPORTED;
9302 }
9303
9304 mod_freesync_build_vrr_params(dm->freesync_module,
9305 new_stream,
9306 &config, &vrr_params);
9307
9308 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9309 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9310 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9311
585d450c
AP
9312 new_crtc_state->freesync_config = config;
9313 /* Copy state for access from DM IRQ handler */
9314 acrtc->dm_irq_params.freesync_config = config;
9315 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9316 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9317 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9318}
9319
66b0c973
MK
9320static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9321 struct dm_crtc_state *new_state)
9322{
9323 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9324 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9325
9326 if (!old_vrr_active && new_vrr_active) {
9327 /* Transition VRR inactive -> active:
9328 * While VRR is active, we must not disable vblank irq, as a
9329 * reenable after disable would compute bogus vblank/pflip
9330 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9331 *
9332 * We also need vupdate irq for the actual core vblank handling
9333 * at end of vblank.
66b0c973 9334 */
d2574c33 9335 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9336 drm_crtc_vblank_get(new_state->base.crtc);
9337 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9338 __func__, new_state->base.crtc->base.id);
9339 } else if (old_vrr_active && !new_vrr_active) {
9340 /* Transition VRR active -> inactive:
9341 * Allow vblank irq disable again for fixed refresh rate.
9342 */
d2574c33 9343 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9344 drm_crtc_vblank_put(new_state->base.crtc);
9345 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9346 __func__, new_state->base.crtc->base.id);
9347 }
9348}
9349
8ad27806
NK
9350static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9351{
9352 struct drm_plane *plane;
5760dcb9 9353 struct drm_plane_state *old_plane_state;
8ad27806
NK
9354 int i;
9355
9356 /*
9357 * TODO: Make this per-stream so we don't issue redundant updates for
9358 * commits with multiple streams.
9359 */
5760dcb9 9360 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9361 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9362 handle_cursor_update(plane, old_plane_state);
9363}
9364
3be5262e 9365static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9366 struct dc_state *dc_state,
3ee6b26b
AD
9367 struct drm_device *dev,
9368 struct amdgpu_display_manager *dm,
9369 struct drm_crtc *pcrtc,
420cd472 9370 bool wait_for_vblank)
e7b07cee 9371{
efc8278e 9372 uint32_t i;
8a48b44c 9373 uint64_t timestamp_ns;
e7b07cee 9374 struct drm_plane *plane;
0bc9706d 9375 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9376 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9377 struct drm_crtc_state *new_pcrtc_state =
9378 drm_atomic_get_new_crtc_state(state, pcrtc);
9379 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9380 struct dm_crtc_state *dm_old_crtc_state =
9381 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9382 int planes_count = 0, vpos, hpos;
570c91d5 9383 long r;
e7b07cee 9384 unsigned long flags;
8a48b44c 9385 struct amdgpu_bo *abo;
fdd1fe57
MK
9386 uint32_t target_vblank, last_flip_vblank;
9387 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9388 bool pflip_present = false;
bc7f670e
DF
9389 struct {
9390 struct dc_surface_update surface_updates[MAX_SURFACES];
9391 struct dc_plane_info plane_infos[MAX_SURFACES];
9392 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9393 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9394 struct dc_stream_update stream_update;
74aa7bd4 9395 } *bundle;
bc7f670e 9396
74aa7bd4 9397 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9398
74aa7bd4
DF
9399 if (!bundle) {
9400 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9401 goto cleanup;
9402 }
e7b07cee 9403
8ad27806
NK
9404 /*
9405 * Disable the cursor first if we're disabling all the planes.
9406 * It'll remain on the screen after the planes are re-enabled
9407 * if we don't.
9408 */
9409 if (acrtc_state->active_planes == 0)
9410 amdgpu_dm_commit_cursors(state);
9411
e7b07cee 9412 /* update planes when needed */
efc8278e 9413 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9414 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9415 struct drm_crtc_state *new_crtc_state;
0bc9706d 9416 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9417 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9418 bool plane_needs_flip;
c7af5f77 9419 struct dc_plane_state *dc_plane;
54d76575 9420 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9421
80c218d5
NK
9422 /* Cursor plane is handled after stream updates */
9423 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9424 continue;
e7b07cee 9425
f5ba60fe
DD
9426 if (!fb || !crtc || pcrtc != crtc)
9427 continue;
9428
9429 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9430 if (!new_crtc_state->active)
e7b07cee
HW
9431 continue;
9432
bc7f670e 9433 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9434
74aa7bd4 9435 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9436 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9437 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9438 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9439 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9440 }
8a48b44c 9441
4375d625 9442 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9443 &bundle->scaling_infos[planes_count]);
8a48b44c 9444
695af5f9
NK
9445 bundle->surface_updates[planes_count].scaling_info =
9446 &bundle->scaling_infos[planes_count];
8a48b44c 9447
f5031000 9448 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9449
f5031000 9450 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9451
f5031000
DF
9452 if (!plane_needs_flip) {
9453 planes_count += 1;
9454 continue;
9455 }
8a48b44c 9456
2fac0f53
CK
9457 abo = gem_to_amdgpu_bo(fb->obj[0]);
9458
f8308898
AG
9459 /*
9460 * Wait for all fences on this FB. Do limited wait to avoid
9461 * deadlock during GPU reset when this fence will not signal
9462 * but we hold reservation lock for the BO.
9463 */
7bc80a54
CK
9464 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9465 DMA_RESV_USAGE_WRITE, false,
d3fae3b3 9466 msecs_to_jiffies(5000));
f8308898 9467 if (unlikely(r <= 0))
ed8a5fb2 9468 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9469
695af5f9 9470 fill_dc_plane_info_and_addr(
8ce5d842 9471 dm->adev, new_plane_state,
6eed95b0 9472 afb->tiling_flags,
695af5f9 9473 &bundle->plane_infos[planes_count],
87b7ebc2 9474 &bundle->flip_addrs[planes_count].address,
6eed95b0 9475 afb->tmz_surface, false);
87b7ebc2 9476
9f07550b 9477 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9478 new_plane_state->plane->index,
9479 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9480
9481 bundle->surface_updates[planes_count].plane_info =
9482 &bundle->plane_infos[planes_count];
8a48b44c 9483
7cc191ee
LL
9484 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9485 new_crtc_state,
9486 &bundle->flip_addrs[planes_count]);
9487
caff0e66
NK
9488 /*
9489 * Only allow immediate flips for fast updates that don't
9490 * change FB pitch, DCC state, rotation or mirroing.
9491 */
f5031000 9492 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9493 crtc->state->async_flip &&
caff0e66 9494 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9495
f5031000
DF
9496 timestamp_ns = ktime_get_ns();
9497 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9498 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9499 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9500
f5031000
DF
9501 if (!bundle->surface_updates[planes_count].surface) {
9502 DRM_ERROR("No surface for CRTC: id=%d\n",
9503 acrtc_attach->crtc_id);
9504 continue;
bc7f670e
DF
9505 }
9506
f5031000
DF
9507 if (plane == pcrtc->primary)
9508 update_freesync_state_on_stream(
9509 dm,
9510 acrtc_state,
9511 acrtc_state->stream,
9512 dc_plane,
9513 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9514
9f07550b 9515 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9516 __func__,
9517 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9518 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9519
9520 planes_count += 1;
9521
8a48b44c
DF
9522 }
9523
74aa7bd4 9524 if (pflip_present) {
634092b1
MK
9525 if (!vrr_active) {
9526 /* Use old throttling in non-vrr fixed refresh rate mode
9527 * to keep flip scheduling based on target vblank counts
9528 * working in a backwards compatible way, e.g., for
9529 * clients using the GLX_OML_sync_control extension or
9530 * DRI3/Present extension with defined target_msc.
9531 */
e3eff4b5 9532 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9533 }
9534 else {
9535 /* For variable refresh rate mode only:
9536 * Get vblank of last completed flip to avoid > 1 vrr
9537 * flips per video frame by use of throttling, but allow
9538 * flip programming anywhere in the possibly large
9539 * variable vrr vblank interval for fine-grained flip
9540 * timing control and more opportunity to avoid stutter
9541 * on late submission of flips.
9542 */
9543 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9544 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9545 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9546 }
9547
fdd1fe57 9548 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9549
9550 /*
9551 * Wait until we're out of the vertical blank period before the one
9552 * targeted by the flip
9553 */
9554 while ((acrtc_attach->enabled &&
9555 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9556 0, &vpos, &hpos, NULL,
9557 NULL, &pcrtc->hwmode)
9558 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9559 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9560 (int)(target_vblank -
e3eff4b5 9561 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9562 usleep_range(1000, 1100);
9563 }
9564
8fe684e9
NK
9565 /**
9566 * Prepare the flip event for the pageflip interrupt to handle.
9567 *
9568 * This only works in the case where we've already turned on the
9569 * appropriate hardware blocks (eg. HUBP) so in the transition case
9570 * from 0 -> n planes we have to skip a hardware generated event
9571 * and rely on sending it from software.
9572 */
9573 if (acrtc_attach->base.state->event &&
10a36226 9574 acrtc_state->active_planes > 0) {
8a48b44c
DF
9575 drm_crtc_vblank_get(pcrtc);
9576
9577 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9578
9579 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9580 prepare_flip_isr(acrtc_attach);
9581
9582 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9583 }
9584
9585 if (acrtc_state->stream) {
8a48b44c 9586 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9587 bundle->stream_update.vrr_infopacket =
8a48b44c 9588 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9589 }
e7b07cee
HW
9590 }
9591
bc92c065 9592 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9593 if ((planes_count || acrtc_state->active_planes == 0) &&
9594 acrtc_state->stream) {
58aa1c50
NK
9595 /*
9596 * If PSR or idle optimizations are enabled then flush out
9597 * any pending work before hardware programming.
9598 */
06dd1888
NK
9599 if (dm->vblank_control_workqueue)
9600 flush_workqueue(dm->vblank_control_workqueue);
58aa1c50 9601
b6e881c9 9602 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9603 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9604 bundle->stream_update.src = acrtc_state->stream->src;
9605 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9606 }
9607
cf020d49
NK
9608 if (new_pcrtc_state->color_mgmt_changed) {
9609 /*
9610 * TODO: This isn't fully correct since we've actually
9611 * already modified the stream in place.
9612 */
9613 bundle->stream_update.gamut_remap =
9614 &acrtc_state->stream->gamut_remap_matrix;
9615 bundle->stream_update.output_csc_transform =
9616 &acrtc_state->stream->csc_color_matrix;
9617 bundle->stream_update.out_transfer_func =
9618 acrtc_state->stream->out_transfer_func;
9619 }
bc7f670e 9620
8a48b44c 9621 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9622 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9623 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9624
e63e2491
EB
9625 /*
9626 * If FreeSync state on the stream has changed then we need to
9627 * re-adjust the min/max bounds now that DC doesn't handle this
9628 * as part of commit.
9629 */
a85ba005 9630 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9631 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9632 dc_stream_adjust_vmin_vmax(
9633 dm->dc, acrtc_state->stream,
585d450c 9634 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9635 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9636 }
bc7f670e 9637 mutex_lock(&dm->dc_lock);
8c322309 9638 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9639 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9640 amdgpu_dm_psr_disable(acrtc_state->stream);
9641
bc7f670e 9642 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9643 bundle->surface_updates,
bc7f670e
DF
9644 planes_count,
9645 acrtc_state->stream,
efc8278e
AJ
9646 &bundle->stream_update,
9647 dc_state);
8c322309 9648
8fe684e9
NK
9649 /**
9650 * Enable or disable the interrupts on the backend.
9651 *
9652 * Most pipes are put into power gating when unused.
9653 *
9654 * When power gating is enabled on a pipe we lose the
9655 * interrupt enablement state when power gating is disabled.
9656 *
9657 * So we need to update the IRQ control state in hardware
9658 * whenever the pipe turns on (since it could be previously
9659 * power gated) or off (since some pipes can't be power gated
9660 * on some ASICs).
9661 */
9662 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9663 dm_update_pflip_irq_state(drm_to_adev(dev),
9664 acrtc_attach);
8fe684e9 9665
8c322309 9666 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9667 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9668 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9669 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9670
9671 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9672 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9673 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9674 struct amdgpu_dm_connector *aconn =
9675 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9676
9677 if (aconn->psr_skip_count > 0)
9678 aconn->psr_skip_count--;
58aa1c50
NK
9679
9680 /* Allow PSR when skip count is 0. */
9681 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7cc191ee
LL
9682
9683 /*
9684 * If sink supports PSR SU, there is no need to rely on
9685 * a vblank event disable request to enable PSR. PSR SU
9686 * can be enabled immediately once OS demonstrates an
9687 * adequate number of fast atomic commits to notify KMD
9688 * of update events. See `vblank_control_worker()`.
9689 */
9690 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9691 acrtc_attach->dm_irq_params.allow_psr_entry &&
9692 !acrtc_state->stream->link->psr_settings.psr_allow_active)
9693 amdgpu_dm_psr_enable(acrtc_state->stream);
58aa1c50
NK
9694 } else {
9695 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9696 }
9697
bc7f670e 9698 mutex_unlock(&dm->dc_lock);
e7b07cee 9699 }
4b510503 9700
8ad27806
NK
9701 /*
9702 * Update cursor state *after* programming all the planes.
9703 * This avoids redundant programming in the case where we're going
9704 * to be disabling a single plane - those pipes are being disabled.
9705 */
9706 if (acrtc_state->active_planes)
9707 amdgpu_dm_commit_cursors(state);
80c218d5 9708
4b510503 9709cleanup:
74aa7bd4 9710 kfree(bundle);
e7b07cee
HW
9711}
9712
6ce8f316
NK
9713static void amdgpu_dm_commit_audio(struct drm_device *dev,
9714 struct drm_atomic_state *state)
9715{
1348969a 9716 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9717 struct amdgpu_dm_connector *aconnector;
9718 struct drm_connector *connector;
9719 struct drm_connector_state *old_con_state, *new_con_state;
9720 struct drm_crtc_state *new_crtc_state;
9721 struct dm_crtc_state *new_dm_crtc_state;
9722 const struct dc_stream_status *status;
9723 int i, inst;
9724
9725 /* Notify device removals. */
9726 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9727 if (old_con_state->crtc != new_con_state->crtc) {
9728 /* CRTC changes require notification. */
9729 goto notify;
9730 }
9731
9732 if (!new_con_state->crtc)
9733 continue;
9734
9735 new_crtc_state = drm_atomic_get_new_crtc_state(
9736 state, new_con_state->crtc);
9737
9738 if (!new_crtc_state)
9739 continue;
9740
9741 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9742 continue;
9743
9744 notify:
9745 aconnector = to_amdgpu_dm_connector(connector);
9746
9747 mutex_lock(&adev->dm.audio_lock);
9748 inst = aconnector->audio_inst;
9749 aconnector->audio_inst = -1;
9750 mutex_unlock(&adev->dm.audio_lock);
9751
9752 amdgpu_dm_audio_eld_notify(adev, inst);
9753 }
9754
9755 /* Notify audio device additions. */
9756 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9757 if (!new_con_state->crtc)
9758 continue;
9759
9760 new_crtc_state = drm_atomic_get_new_crtc_state(
9761 state, new_con_state->crtc);
9762
9763 if (!new_crtc_state)
9764 continue;
9765
9766 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9767 continue;
9768
9769 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9770 if (!new_dm_crtc_state->stream)
9771 continue;
9772
9773 status = dc_stream_get_status(new_dm_crtc_state->stream);
9774 if (!status)
9775 continue;
9776
9777 aconnector = to_amdgpu_dm_connector(connector);
9778
9779 mutex_lock(&adev->dm.audio_lock);
9780 inst = status->audio_inst;
9781 aconnector->audio_inst = inst;
9782 mutex_unlock(&adev->dm.audio_lock);
9783
9784 amdgpu_dm_audio_eld_notify(adev, inst);
9785 }
9786}
9787
1f6010a9 9788/*
27b3f4fc
LSL
9789 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9790 * @crtc_state: the DRM CRTC state
9791 * @stream_state: the DC stream state.
9792 *
9793 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9794 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9795 */
9796static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9797 struct dc_stream_state *stream_state)
9798{
b9952f93 9799 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9800}
e7b07cee 9801
b8592b48
LL
9802/**
9803 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9804 * @state: The atomic state to commit
9805 *
9806 * This will tell DC to commit the constructed DC state from atomic_check,
9807 * programming the hardware. Any failures here implies a hardware failure, since
9808 * atomic check should have filtered anything non-kosher.
9809 */
7578ecda 9810static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9811{
9812 struct drm_device *dev = state->dev;
1348969a 9813 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9814 struct amdgpu_display_manager *dm = &adev->dm;
9815 struct dm_atomic_state *dm_state;
eb3dc897 9816 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9817 uint32_t i, j;
5cc6dcbd 9818 struct drm_crtc *crtc;
0bc9706d 9819 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9820 unsigned long flags;
9821 bool wait_for_vblank = true;
9822 struct drm_connector *connector;
c2cea706 9823 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9824 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9825 int crtc_disable_count = 0;
6ee90e88 9826 bool mode_set_reset_required = false;
e7b07cee 9827
e8a98235
RS
9828 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9829
e7b07cee
HW
9830 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9831
eb3dc897
NK
9832 dm_state = dm_atomic_get_new_state(state);
9833 if (dm_state && dm_state->context) {
9834 dc_state = dm_state->context;
9835 } else {
9836 /* No state changes, retain current state. */
813d20dc 9837 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9838 ASSERT(dc_state_temp);
9839 dc_state = dc_state_temp;
9840 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9841 }
e7b07cee 9842
6d90a208
AP
9843 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9844 new_crtc_state, i) {
9845 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9846
9847 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9848
9849 if (old_crtc_state->active &&
9850 (!new_crtc_state->active ||
9851 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9852 manage_dm_interrupts(adev, acrtc, false);
9853 dc_stream_release(dm_old_crtc_state->stream);
9854 }
9855 }
9856
8976f73b
RS
9857 drm_atomic_helper_calc_timestamping_constants(state);
9858
e7b07cee 9859 /* update changed items */
0bc9706d 9860 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9861 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9862
54d76575
LSL
9863 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9864 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9865
9f07550b 9866 drm_dbg_state(state->dev,
e7b07cee
HW
9867 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9868 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9869 "connectors_changed:%d\n",
9870 acrtc->crtc_id,
0bc9706d
LSL
9871 new_crtc_state->enable,
9872 new_crtc_state->active,
9873 new_crtc_state->planes_changed,
9874 new_crtc_state->mode_changed,
9875 new_crtc_state->active_changed,
9876 new_crtc_state->connectors_changed);
e7b07cee 9877
5c68c652
VL
9878 /* Disable cursor if disabling crtc */
9879 if (old_crtc_state->active && !new_crtc_state->active) {
9880 struct dc_cursor_position position;
9881
9882 memset(&position, 0, sizeof(position));
9883 mutex_lock(&dm->dc_lock);
9884 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9885 mutex_unlock(&dm->dc_lock);
9886 }
9887
27b3f4fc
LSL
9888 /* Copy all transient state flags into dc state */
9889 if (dm_new_crtc_state->stream) {
9890 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9891 dm_new_crtc_state->stream);
9892 }
9893
e7b07cee
HW
9894 /* handles headless hotplug case, updating new_state and
9895 * aconnector as needed
9896 */
9897
54d76575 9898 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9899
4711c033 9900 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9901
54d76575 9902 if (!dm_new_crtc_state->stream) {
e7b07cee 9903 /*
b830ebc9
HW
9904 * this could happen because of issues with
9905 * userspace notifications delivery.
9906 * In this case userspace tries to set mode on
1f6010a9
DF
9907 * display which is disconnected in fact.
9908 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9909 * We expect reset mode will come soon.
9910 *
9911 * This can also happen when unplug is done
9912 * during resume sequence ended
9913 *
9914 * In this case, we want to pretend we still
9915 * have a sink to keep the pipe running so that
9916 * hw state is consistent with the sw state
9917 */
f1ad2f5e 9918 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9919 __func__, acrtc->base.base.id);
9920 continue;
9921 }
9922
54d76575
LSL
9923 if (dm_old_crtc_state->stream)
9924 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9925
97028037
LP
9926 pm_runtime_get_noresume(dev->dev);
9927
e7b07cee 9928 acrtc->enabled = true;
0bc9706d
LSL
9929 acrtc->hw_mode = new_crtc_state->mode;
9930 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9931 mode_set_reset_required = true;
0bc9706d 9932 } else if (modereset_required(new_crtc_state)) {
4711c033 9933 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9934 /* i.e. reset mode */
6ee90e88 9935 if (dm_old_crtc_state->stream)
54d76575 9936 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9937
6ee90e88 9938 mode_set_reset_required = true;
e7b07cee
HW
9939 }
9940 } /* for_each_crtc_in_state() */
9941
eb3dc897 9942 if (dc_state) {
6ee90e88 9943 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9944 if (mode_set_reset_required) {
06dd1888
NK
9945 if (dm->vblank_control_workqueue)
9946 flush_workqueue(dm->vblank_control_workqueue);
cae5c1ab 9947
6ee90e88 9948 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9949 }
6ee90e88 9950
eb3dc897 9951 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9952 mutex_lock(&dm->dc_lock);
eb3dc897 9953 WARN_ON(!dc_commit_state(dm->dc, dc_state));
f3106c94
JC
9954
9955 /* Allow idle optimization when vblank count is 0 for display off */
9956 if (dm->active_vblank_irq_count == 0)
9957 dc_allow_idle_optimizations(dm->dc, true);
674e78ac 9958 mutex_unlock(&dm->dc_lock);
fa2123db 9959 }
fe8858bb 9960
0bc9706d 9961 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9962 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9963
54d76575 9964 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9965
54d76575 9966 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9967 const struct dc_stream_status *status =
54d76575 9968 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9969
eb3dc897 9970 if (!status)
09f609c3
LL
9971 status = dc_stream_get_status_from_state(dc_state,
9972 dm_new_crtc_state->stream);
e7b07cee 9973 if (!status)
54d76575 9974 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9975 else
9976 acrtc->otg_inst = status->primary_otg_inst;
9977 }
9978 }
0c8620d6
BL
9979#ifdef CONFIG_DRM_AMD_DC_HDCP
9980 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9981 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9982 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9983 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9984
9985 new_crtc_state = NULL;
9986
9987 if (acrtc)
9988 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9989
9990 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9991
9992 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9993 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9994 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9995 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9996 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9997 continue;
9998 }
9999
10000 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
10001 hdcp_update_display(
10002 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 10003 new_con_state->hdcp_content_type,
0e86d3d4 10004 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
10005 }
10006#endif
e7b07cee 10007
02d6a6fc 10008 /* Handle connector state changes */
c2cea706 10009 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10010 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10011 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10012 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 10013 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 10014 struct dc_stream_update stream_update;
b232d4ed 10015 struct dc_info_packet hdr_packet;
e7b07cee 10016 struct dc_stream_status *status = NULL;
b232d4ed 10017 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 10018
efc8278e 10019 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
10020 memset(&stream_update, 0, sizeof(stream_update));
10021
44d09c6a 10022 if (acrtc) {
0bc9706d 10023 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
10024 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10025 }
0bc9706d 10026
e7b07cee 10027 /* Skip any modesets/resets */
0bc9706d 10028 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
10029 continue;
10030
54d76575 10031 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
10032 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10033
b232d4ed
NK
10034 scaling_changed = is_scaling_state_different(dm_new_con_state,
10035 dm_old_con_state);
10036
10037 abm_changed = dm_new_crtc_state->abm_level !=
10038 dm_old_crtc_state->abm_level;
10039
10040 hdr_changed =
72921cdf 10041 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
10042
10043 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 10044 continue;
e7b07cee 10045
b6e881c9 10046 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 10047 if (scaling_changed) {
02d6a6fc 10048 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 10049 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 10050
02d6a6fc
DF
10051 stream_update.src = dm_new_crtc_state->stream->src;
10052 stream_update.dst = dm_new_crtc_state->stream->dst;
10053 }
10054
b232d4ed 10055 if (abm_changed) {
02d6a6fc
DF
10056 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10057
10058 stream_update.abm_level = &dm_new_crtc_state->abm_level;
10059 }
70e8ffc5 10060
b232d4ed
NK
10061 if (hdr_changed) {
10062 fill_hdr_info_packet(new_con_state, &hdr_packet);
10063 stream_update.hdr_static_metadata = &hdr_packet;
10064 }
10065
54d76575 10066 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
10067
10068 if (WARN_ON(!status))
10069 continue;
10070
3be5262e 10071 WARN_ON(!status->plane_count);
e7b07cee 10072
02d6a6fc
DF
10073 /*
10074 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10075 * Here we create an empty update on each plane.
10076 * To fix this, DC should permit updating only stream properties.
10077 */
10078 for (j = 0; j < status->plane_count; j++)
efc8278e 10079 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
10080
10081
10082 mutex_lock(&dm->dc_lock);
10083 dc_commit_updates_for_stream(dm->dc,
efc8278e 10084 dummy_updates,
02d6a6fc
DF
10085 status->plane_count,
10086 dm_new_crtc_state->stream,
efc8278e
AJ
10087 &stream_update,
10088 dc_state);
02d6a6fc 10089 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
10090 }
10091
b5e83f6f 10092 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 10093 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 10094 new_crtc_state, i) {
fe2a1965
LP
10095 if (old_crtc_state->active && !new_crtc_state->active)
10096 crtc_disable_count++;
10097
54d76575 10098 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 10099 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 10100
585d450c
AP
10101 /* For freesync config update on crtc state and params for irq */
10102 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 10103
66b0c973
MK
10104 /* Handle vrr on->off / off->on transitions */
10105 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10106 dm_new_crtc_state);
e7b07cee
HW
10107 }
10108
8fe684e9
NK
10109 /**
10110 * Enable interrupts for CRTCs that are newly enabled or went through
10111 * a modeset. It was intentionally deferred until after the front end
10112 * state was modified to wait until the OTG was on and so the IRQ
10113 * handlers didn't access stale or invalid state.
10114 */
10115 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10116 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 10117#ifdef CONFIG_DEBUG_FS
86bc2219 10118 bool configure_crc = false;
8e7b6fee 10119 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
10120#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10121 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10122#endif
10123 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10124 cur_crc_src = acrtc->dm_irq_params.crc_src;
10125 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 10126#endif
585d450c
AP
10127 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10128
8fe684e9
NK
10129 if (new_crtc_state->active &&
10130 (!old_crtc_state->active ||
10131 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
10132 dc_stream_retain(dm_new_crtc_state->stream);
10133 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 10134 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 10135
24eb9374 10136#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
10137 /**
10138 * Frontend may have changed so reapply the CRC capture
10139 * settings for the stream.
10140 */
10141 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 10142
8e7b6fee 10143 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
10144 configure_crc = true;
10145#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
10146 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10147 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10148 acrtc->dm_irq_params.crc_window.update_win = true;
10149 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10150 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10151 crc_rd_wrk->crtc = crtc;
10152 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10153 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10154 }
86bc2219 10155#endif
e2881d6d 10156 }
c920888c 10157
86bc2219 10158 if (configure_crc)
bbc49fc0
WL
10159 if (amdgpu_dm_crtc_configure_crc_source(
10160 crtc, dm_new_crtc_state, cur_crc_src))
10161 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 10162#endif
8fe684e9
NK
10163 }
10164 }
e7b07cee 10165
420cd472 10166 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 10167 if (new_crtc_state->async_flip)
420cd472
DF
10168 wait_for_vblank = false;
10169
e7b07cee 10170 /* update planes when needed per crtc*/
5cc6dcbd 10171 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 10172 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 10173
54d76575 10174 if (dm_new_crtc_state->stream)
eb3dc897 10175 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 10176 dm, crtc, wait_for_vblank);
e7b07cee
HW
10177 }
10178
6ce8f316
NK
10179 /* Update audio instances for each connector. */
10180 amdgpu_dm_commit_audio(dev, state);
10181
7230362c 10182 /* restore the backlight level */
7fd13bae
AD
10183 for (i = 0; i < dm->num_of_edps; i++) {
10184 if (dm->backlight_dev[i] &&
4052287a 10185 (dm->actual_brightness[i] != dm->brightness[i]))
7fd13bae
AD
10186 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10187 }
83a3439d 10188
e7b07cee
HW
10189 /*
10190 * send vblank event on all events not handled in flip and
10191 * mark consumed event for drm_atomic_helper_commit_hw_done
10192 */
4a580877 10193 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 10194 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 10195
0bc9706d
LSL
10196 if (new_crtc_state->event)
10197 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 10198
0bc9706d 10199 new_crtc_state->event = NULL;
e7b07cee 10200 }
4a580877 10201 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 10202
29c8f234
LL
10203 /* Signal HW programming completion */
10204 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
10205
10206 if (wait_for_vblank)
320a1274 10207 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
10208
10209 drm_atomic_helper_cleanup_planes(dev, state);
97028037 10210
5f6fab24
AD
10211 /* return the stolen vga memory back to VRAM */
10212 if (!adev->mman.keep_stolen_vga_memory)
10213 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10214 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10215
1f6010a9
DF
10216 /*
10217 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
10218 * so we can put the GPU into runtime suspend if we're not driving any
10219 * displays anymore
10220 */
fe2a1965
LP
10221 for (i = 0; i < crtc_disable_count; i++)
10222 pm_runtime_put_autosuspend(dev->dev);
97028037 10223 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
10224
10225 if (dc_state_temp)
10226 dc_release_state(dc_state_temp);
e7b07cee
HW
10227}
10228
10229
10230static int dm_force_atomic_commit(struct drm_connector *connector)
10231{
10232 int ret = 0;
10233 struct drm_device *ddev = connector->dev;
10234 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10235 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10236 struct drm_plane *plane = disconnected_acrtc->base.primary;
10237 struct drm_connector_state *conn_state;
10238 struct drm_crtc_state *crtc_state;
10239 struct drm_plane_state *plane_state;
10240
10241 if (!state)
10242 return -ENOMEM;
10243
10244 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10245
10246 /* Construct an atomic state to restore previous display setting */
10247
10248 /*
10249 * Attach connectors to drm_atomic_state
10250 */
10251 conn_state = drm_atomic_get_connector_state(state, connector);
10252
10253 ret = PTR_ERR_OR_ZERO(conn_state);
10254 if (ret)
2dc39051 10255 goto out;
e7b07cee
HW
10256
10257 /* Attach crtc to drm_atomic_state*/
10258 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10259
10260 ret = PTR_ERR_OR_ZERO(crtc_state);
10261 if (ret)
2dc39051 10262 goto out;
e7b07cee
HW
10263
10264 /* force a restore */
10265 crtc_state->mode_changed = true;
10266
10267 /* Attach plane to drm_atomic_state */
10268 plane_state = drm_atomic_get_plane_state(state, plane);
10269
10270 ret = PTR_ERR_OR_ZERO(plane_state);
10271 if (ret)
2dc39051 10272 goto out;
e7b07cee
HW
10273
10274 /* Call commit internally with the state we just constructed */
10275 ret = drm_atomic_commit(state);
e7b07cee 10276
2dc39051 10277out:
e7b07cee 10278 drm_atomic_state_put(state);
2dc39051
VL
10279 if (ret)
10280 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
10281
10282 return ret;
10283}
10284
10285/*
1f6010a9
DF
10286 * This function handles all cases when set mode does not come upon hotplug.
10287 * This includes when a display is unplugged then plugged back into the
10288 * same port and when running without usermode desktop manager supprot
e7b07cee 10289 */
3ee6b26b
AD
10290void dm_restore_drm_connector_state(struct drm_device *dev,
10291 struct drm_connector *connector)
e7b07cee 10292{
c84dec2f 10293 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
10294 struct amdgpu_crtc *disconnected_acrtc;
10295 struct dm_crtc_state *acrtc_state;
10296
10297 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10298 return;
10299
10300 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
10301 if (!disconnected_acrtc)
10302 return;
e7b07cee 10303
70e8ffc5
HW
10304 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10305 if (!acrtc_state->stream)
e7b07cee
HW
10306 return;
10307
10308 /*
10309 * If the previous sink is not released and different from the current,
10310 * we deduce we are in a state where we can not rely on usermode call
10311 * to turn on the display, so we do it here
10312 */
10313 if (acrtc_state->stream->sink != aconnector->dc_sink)
10314 dm_force_atomic_commit(&aconnector->base);
10315}
10316
1f6010a9 10317/*
e7b07cee
HW
10318 * Grabs all modesetting locks to serialize against any blocking commits,
10319 * Waits for completion of all non blocking commits.
10320 */
3ee6b26b
AD
10321static int do_aquire_global_lock(struct drm_device *dev,
10322 struct drm_atomic_state *state)
e7b07cee
HW
10323{
10324 struct drm_crtc *crtc;
10325 struct drm_crtc_commit *commit;
10326 long ret;
10327
1f6010a9
DF
10328 /*
10329 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10330 * ensure that when the framework release it the
10331 * extra locks we are locking here will get released to
10332 */
10333 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10334 if (ret)
10335 return ret;
10336
10337 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10338 spin_lock(&crtc->commit_lock);
10339 commit = list_first_entry_or_null(&crtc->commit_list,
10340 struct drm_crtc_commit, commit_entry);
10341 if (commit)
10342 drm_crtc_commit_get(commit);
10343 spin_unlock(&crtc->commit_lock);
10344
10345 if (!commit)
10346 continue;
10347
1f6010a9
DF
10348 /*
10349 * Make sure all pending HW programming completed and
e7b07cee
HW
10350 * page flips done
10351 */
10352 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10353
10354 if (ret > 0)
10355 ret = wait_for_completion_interruptible_timeout(
10356 &commit->flip_done, 10*HZ);
10357
10358 if (ret == 0)
10359 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10360 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10361
10362 drm_crtc_commit_put(commit);
10363 }
10364
10365 return ret < 0 ? ret : 0;
10366}
10367
bb47de73
NK
10368static void get_freesync_config_for_crtc(
10369 struct dm_crtc_state *new_crtc_state,
10370 struct dm_connector_state *new_con_state)
98e6436d
AK
10371{
10372 struct mod_freesync_config config = {0};
98e6436d
AK
10373 struct amdgpu_dm_connector *aconnector =
10374 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10375 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10376 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10377 bool fs_vid_mode = false;
98e6436d 10378
a057ec46 10379 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10380 vrefresh >= aconnector->min_vfreq &&
10381 vrefresh <= aconnector->max_vfreq;
bb47de73 10382
a057ec46
IB
10383 if (new_crtc_state->vrr_supported) {
10384 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10385 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10386
10387 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10388 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10389 config.vsif_supported = true;
180db303 10390 config.btr = true;
98e6436d 10391
a85ba005
NC
10392 if (fs_vid_mode) {
10393 config.state = VRR_STATE_ACTIVE_FIXED;
10394 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10395 goto out;
10396 } else if (new_crtc_state->base.vrr_enabled) {
10397 config.state = VRR_STATE_ACTIVE_VARIABLE;
10398 } else {
10399 config.state = VRR_STATE_INACTIVE;
10400 }
10401 }
10402out:
bb47de73
NK
10403 new_crtc_state->freesync_config = config;
10404}
98e6436d 10405
bb47de73
NK
10406static void reset_freesync_config_for_crtc(
10407 struct dm_crtc_state *new_crtc_state)
10408{
10409 new_crtc_state->vrr_supported = false;
98e6436d 10410
bb47de73
NK
10411 memset(&new_crtc_state->vrr_infopacket, 0,
10412 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10413}
10414
a85ba005
NC
10415static bool
10416is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10417 struct drm_crtc_state *new_crtc_state)
10418{
1cbd7887 10419 const struct drm_display_mode *old_mode, *new_mode;
a85ba005
NC
10420
10421 if (!old_crtc_state || !new_crtc_state)
10422 return false;
10423
1cbd7887
VS
10424 old_mode = &old_crtc_state->mode;
10425 new_mode = &new_crtc_state->mode;
10426
10427 if (old_mode->clock == new_mode->clock &&
10428 old_mode->hdisplay == new_mode->hdisplay &&
10429 old_mode->vdisplay == new_mode->vdisplay &&
10430 old_mode->htotal == new_mode->htotal &&
10431 old_mode->vtotal != new_mode->vtotal &&
10432 old_mode->hsync_start == new_mode->hsync_start &&
10433 old_mode->vsync_start != new_mode->vsync_start &&
10434 old_mode->hsync_end == new_mode->hsync_end &&
10435 old_mode->vsync_end != new_mode->vsync_end &&
10436 old_mode->hskew == new_mode->hskew &&
10437 old_mode->vscan == new_mode->vscan &&
10438 (old_mode->vsync_end - old_mode->vsync_start) ==
10439 (new_mode->vsync_end - new_mode->vsync_start))
a85ba005
NC
10440 return true;
10441
10442 return false;
10443}
10444
10445static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10446 uint64_t num, den, res;
10447 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10448
10449 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10450
10451 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10452 den = (unsigned long long)new_crtc_state->mode.htotal *
10453 (unsigned long long)new_crtc_state->mode.vtotal;
10454
10455 res = div_u64(num, den);
10456 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10457}
10458
f11d9373 10459static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
17ce8a69
RL
10460 struct drm_atomic_state *state,
10461 struct drm_crtc *crtc,
10462 struct drm_crtc_state *old_crtc_state,
10463 struct drm_crtc_state *new_crtc_state,
10464 bool enable,
10465 bool *lock_and_validation_needed)
e7b07cee 10466{
eb3dc897 10467 struct dm_atomic_state *dm_state = NULL;
54d76575 10468 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10469 struct dc_stream_state *new_stream;
62f55537 10470 int ret = 0;
d4d4a645 10471
1f6010a9
DF
10472 /*
10473 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10474 * update changed items
10475 */
4b9674e5
LL
10476 struct amdgpu_crtc *acrtc = NULL;
10477 struct amdgpu_dm_connector *aconnector = NULL;
10478 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10479 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10480
4b9674e5 10481 new_stream = NULL;
9635b754 10482
4b9674e5
LL
10483 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10484 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10485 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10486 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10487
4b9674e5
LL
10488 /* TODO This hack should go away */
10489 if (aconnector && enable) {
10490 /* Make sure fake sink is created in plug-in scenario */
10491 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10492 &aconnector->base);
10493 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10494 &aconnector->base);
19f89e23 10495
4b9674e5
LL
10496 if (IS_ERR(drm_new_conn_state)) {
10497 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10498 goto fail;
10499 }
19f89e23 10500
4b9674e5
LL
10501 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10502 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10503
02d35a67
JFZ
10504 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10505 goto skip_modeset;
10506
cbd14ae7
SW
10507 new_stream = create_validate_stream_for_sink(aconnector,
10508 &new_crtc_state->mode,
10509 dm_new_conn_state,
10510 dm_old_crtc_state->stream);
19f89e23 10511
4b9674e5
LL
10512 /*
10513 * we can have no stream on ACTION_SET if a display
10514 * was disconnected during S3, in this case it is not an
10515 * error, the OS will be updated after detection, and
10516 * will do the right thing on next atomic commit
10517 */
19f89e23 10518
4b9674e5
LL
10519 if (!new_stream) {
10520 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10521 __func__, acrtc->base.base.id);
10522 ret = -ENOMEM;
10523 goto fail;
10524 }
e7b07cee 10525
3d4e52d0
VL
10526 /*
10527 * TODO: Check VSDB bits to decide whether this should
10528 * be enabled or not.
10529 */
10530 new_stream->triggered_crtc_reset.enabled =
10531 dm->force_timing_sync;
10532
4b9674e5 10533 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10534
88694af9
NK
10535 ret = fill_hdr_info_packet(drm_new_conn_state,
10536 &new_stream->hdr_static_metadata);
10537 if (ret)
10538 goto fail;
10539
7e930949
NK
10540 /*
10541 * If we already removed the old stream from the context
10542 * (and set the new stream to NULL) then we can't reuse
10543 * the old stream even if the stream and scaling are unchanged.
10544 * We'll hit the BUG_ON and black screen.
10545 *
10546 * TODO: Refactor this function to allow this check to work
10547 * in all conditions.
10548 */
de05abe6 10549 if (dm_new_crtc_state->stream &&
a85ba005
NC
10550 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10551 goto skip_modeset;
10552
7e930949
NK
10553 if (dm_new_crtc_state->stream &&
10554 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10555 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10556 new_crtc_state->mode_changed = false;
10557 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10558 new_crtc_state->mode_changed);
62f55537 10559 }
4b9674e5 10560 }
b830ebc9 10561
02d35a67 10562 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10563 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10564 goto skip_modeset;
e7b07cee 10565
9f07550b 10566 drm_dbg_state(state->dev,
4b9674e5
LL
10567 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10568 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10569 "connectors_changed:%d\n",
10570 acrtc->crtc_id,
10571 new_crtc_state->enable,
10572 new_crtc_state->active,
10573 new_crtc_state->planes_changed,
10574 new_crtc_state->mode_changed,
10575 new_crtc_state->active_changed,
10576 new_crtc_state->connectors_changed);
62f55537 10577
4b9674e5
LL
10578 /* Remove stream for any changed/disabled CRTC */
10579 if (!enable) {
62f55537 10580
4b9674e5
LL
10581 if (!dm_old_crtc_state->stream)
10582 goto skip_modeset;
eb3dc897 10583
de05abe6 10584 if (dm_new_crtc_state->stream &&
a85ba005
NC
10585 is_timing_unchanged_for_freesync(new_crtc_state,
10586 old_crtc_state)) {
10587 new_crtc_state->mode_changed = false;
10588 DRM_DEBUG_DRIVER(
10589 "Mode change not required for front porch change, "
10590 "setting mode_changed to %d",
10591 new_crtc_state->mode_changed);
10592
10593 set_freesync_fixed_config(dm_new_crtc_state);
10594
10595 goto skip_modeset;
de05abe6 10596 } else if (aconnector &&
a85ba005
NC
10597 is_freesync_video_mode(&new_crtc_state->mode,
10598 aconnector)) {
e88ebd83
SC
10599 struct drm_display_mode *high_mode;
10600
10601 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10602 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10603 set_freesync_fixed_config(dm_new_crtc_state);
10604 }
a85ba005
NC
10605 }
10606
4b9674e5
LL
10607 ret = dm_atomic_get_state(state, &dm_state);
10608 if (ret)
10609 goto fail;
e7b07cee 10610
4b9674e5
LL
10611 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10612 crtc->base.id);
62f55537 10613
4b9674e5
LL
10614 /* i.e. reset mode */
10615 if (dc_remove_stream_from_ctx(
10616 dm->dc,
10617 dm_state->context,
10618 dm_old_crtc_state->stream) != DC_OK) {
10619 ret = -EINVAL;
10620 goto fail;
10621 }
62f55537 10622
4b9674e5
LL
10623 dc_stream_release(dm_old_crtc_state->stream);
10624 dm_new_crtc_state->stream = NULL;
bb47de73 10625
4b9674e5 10626 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10627
4b9674e5 10628 *lock_and_validation_needed = true;
62f55537 10629
4b9674e5
LL
10630 } else {/* Add stream for any updated/enabled CRTC */
10631 /*
10632 * Quick fix to prevent NULL pointer on new_stream when
10633 * added MST connectors not found in existing crtc_state in the chained mode
10634 * TODO: need to dig out the root cause of that
10635 */
84a8b390 10636 if (!aconnector)
4b9674e5 10637 goto skip_modeset;
62f55537 10638
4b9674e5
LL
10639 if (modereset_required(new_crtc_state))
10640 goto skip_modeset;
62f55537 10641
4b9674e5
LL
10642 if (modeset_required(new_crtc_state, new_stream,
10643 dm_old_crtc_state->stream)) {
62f55537 10644
4b9674e5 10645 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10646
4b9674e5
LL
10647 ret = dm_atomic_get_state(state, &dm_state);
10648 if (ret)
10649 goto fail;
27b3f4fc 10650
4b9674e5 10651 dm_new_crtc_state->stream = new_stream;
62f55537 10652
4b9674e5 10653 dc_stream_retain(new_stream);
1dc90497 10654
4711c033
LT
10655 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10656 crtc->base.id);
1dc90497 10657
4b9674e5
LL
10658 if (dc_add_stream_to_ctx(
10659 dm->dc,
10660 dm_state->context,
10661 dm_new_crtc_state->stream) != DC_OK) {
10662 ret = -EINVAL;
10663 goto fail;
9b690ef3
BL
10664 }
10665
4b9674e5
LL
10666 *lock_and_validation_needed = true;
10667 }
10668 }
e277adc5 10669
4b9674e5
LL
10670skip_modeset:
10671 /* Release extra reference */
10672 if (new_stream)
10673 dc_stream_release(new_stream);
e277adc5 10674
4b9674e5
LL
10675 /*
10676 * We want to do dc stream updates that do not require a
10677 * full modeset below.
10678 */
2afda735 10679 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10680 return 0;
10681 /*
10682 * Given above conditions, the dc state cannot be NULL because:
10683 * 1. We're in the process of enabling CRTCs (just been added
10684 * to the dc context, or already is on the context)
10685 * 2. Has a valid connector attached, and
10686 * 3. Is currently active and enabled.
10687 * => The dc stream state currently exists.
10688 */
10689 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10690
4b9674e5 10691 /* Scaling or underscan settings */
c521fc31
RL
10692 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10693 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10694 update_stream_scaling_settings(
10695 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10696
b05e2c5e
DF
10697 /* ABM settings */
10698 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10699
4b9674e5
LL
10700 /*
10701 * Color management settings. We also update color properties
10702 * when a modeset is needed, to ensure it gets reprogrammed.
10703 */
10704 if (dm_new_crtc_state->base.color_mgmt_changed ||
10705 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10706 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10707 if (ret)
10708 goto fail;
62f55537 10709 }
e7b07cee 10710
4b9674e5
LL
10711 /* Update Freesync settings. */
10712 get_freesync_config_for_crtc(dm_new_crtc_state,
10713 dm_new_conn_state);
10714
62f55537 10715 return ret;
9635b754
DS
10716
10717fail:
10718 if (new_stream)
10719 dc_stream_release(new_stream);
10720 return ret;
62f55537 10721}
9b690ef3 10722
f6ff2a08
NK
10723static bool should_reset_plane(struct drm_atomic_state *state,
10724 struct drm_plane *plane,
10725 struct drm_plane_state *old_plane_state,
10726 struct drm_plane_state *new_plane_state)
10727{
10728 struct drm_plane *other;
10729 struct drm_plane_state *old_other_state, *new_other_state;
10730 struct drm_crtc_state *new_crtc_state;
10731 int i;
10732
70a1efac
NK
10733 /*
10734 * TODO: Remove this hack once the checks below are sufficient
10735 * enough to determine when we need to reset all the planes on
10736 * the stream.
10737 */
10738 if (state->allow_modeset)
10739 return true;
10740
f6ff2a08
NK
10741 /* Exit early if we know that we're adding or removing the plane. */
10742 if (old_plane_state->crtc != new_plane_state->crtc)
10743 return true;
10744
10745 /* old crtc == new_crtc == NULL, plane not in context. */
10746 if (!new_plane_state->crtc)
10747 return false;
10748
10749 new_crtc_state =
10750 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10751
10752 if (!new_crtc_state)
10753 return true;
10754
7316c4ad
NK
10755 /* CRTC Degamma changes currently require us to recreate planes. */
10756 if (new_crtc_state->color_mgmt_changed)
10757 return true;
10758
f6ff2a08
NK
10759 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10760 return true;
10761
10762 /*
10763 * If there are any new primary or overlay planes being added or
10764 * removed then the z-order can potentially change. To ensure
10765 * correct z-order and pipe acquisition the current DC architecture
10766 * requires us to remove and recreate all existing planes.
10767 *
10768 * TODO: Come up with a more elegant solution for this.
10769 */
10770 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10771 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10772 if (other->type == DRM_PLANE_TYPE_CURSOR)
10773 continue;
10774
10775 if (old_other_state->crtc != new_plane_state->crtc &&
10776 new_other_state->crtc != new_plane_state->crtc)
10777 continue;
10778
10779 if (old_other_state->crtc != new_other_state->crtc)
10780 return true;
10781
dc4cb30d
NK
10782 /* Src/dst size and scaling updates. */
10783 if (old_other_state->src_w != new_other_state->src_w ||
10784 old_other_state->src_h != new_other_state->src_h ||
10785 old_other_state->crtc_w != new_other_state->crtc_w ||
10786 old_other_state->crtc_h != new_other_state->crtc_h)
10787 return true;
10788
10789 /* Rotation / mirroring updates. */
10790 if (old_other_state->rotation != new_other_state->rotation)
10791 return true;
10792
10793 /* Blending updates. */
10794 if (old_other_state->pixel_blend_mode !=
10795 new_other_state->pixel_blend_mode)
10796 return true;
10797
10798 /* Alpha updates. */
10799 if (old_other_state->alpha != new_other_state->alpha)
10800 return true;
10801
10802 /* Colorspace changes. */
10803 if (old_other_state->color_range != new_other_state->color_range ||
10804 old_other_state->color_encoding != new_other_state->color_encoding)
10805 return true;
10806
9a81cc60
NK
10807 /* Framebuffer checks fall at the end. */
10808 if (!old_other_state->fb || !new_other_state->fb)
10809 continue;
10810
10811 /* Pixel format changes can require bandwidth updates. */
10812 if (old_other_state->fb->format != new_other_state->fb->format)
10813 return true;
10814
6eed95b0
BN
10815 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10816 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10817
10818 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10819 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10820 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10821 return true;
10822 }
10823
10824 return false;
10825}
10826
b0455fda
SS
10827static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10828 struct drm_plane_state *new_plane_state,
10829 struct drm_framebuffer *fb)
10830{
e72868c4
SS
10831 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10832 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10833 unsigned int pitch;
e72868c4 10834 bool linear;
b0455fda
SS
10835
10836 if (fb->width > new_acrtc->max_cursor_width ||
10837 fb->height > new_acrtc->max_cursor_height) {
10838 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10839 new_plane_state->fb->width,
10840 new_plane_state->fb->height);
10841 return -EINVAL;
10842 }
10843 if (new_plane_state->src_w != fb->width << 16 ||
10844 new_plane_state->src_h != fb->height << 16) {
10845 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10846 return -EINVAL;
10847 }
10848
10849 /* Pitch in pixels */
10850 pitch = fb->pitches[0] / fb->format->cpp[0];
10851
10852 if (fb->width != pitch) {
10853 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10854 fb->width, pitch);
10855 return -EINVAL;
10856 }
10857
10858 switch (pitch) {
10859 case 64:
10860 case 128:
10861 case 256:
10862 /* FB pitch is supported by cursor plane */
10863 break;
10864 default:
10865 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10866 return -EINVAL;
10867 }
10868
e72868c4
SS
10869 /* Core DRM takes care of checking FB modifiers, so we only need to
10870 * check tiling flags when the FB doesn't have a modifier. */
10871 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10872 if (adev->family < AMDGPU_FAMILY_AI) {
10873 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10874 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10875 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10876 } else {
10877 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10878 }
10879 if (!linear) {
10880 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10881 return -EINVAL;
10882 }
10883 }
10884
b0455fda
SS
10885 return 0;
10886}
10887
9e869063
LL
10888static int dm_update_plane_state(struct dc *dc,
10889 struct drm_atomic_state *state,
10890 struct drm_plane *plane,
10891 struct drm_plane_state *old_plane_state,
10892 struct drm_plane_state *new_plane_state,
10893 bool enable,
10894 bool *lock_and_validation_needed)
62f55537 10895{
eb3dc897
NK
10896
10897 struct dm_atomic_state *dm_state = NULL;
62f55537 10898 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10899 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10900 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10901 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10902 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10903 bool needs_reset;
62f55537 10904 int ret = 0;
e7b07cee 10905
9b690ef3 10906
9e869063
LL
10907 new_plane_crtc = new_plane_state->crtc;
10908 old_plane_crtc = old_plane_state->crtc;
10909 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10910 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10911
626bf90f
SS
10912 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10913 if (!enable || !new_plane_crtc ||
10914 drm_atomic_plane_disabling(plane->state, new_plane_state))
10915 return 0;
10916
10917 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10918
5f581248
SS
10919 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10920 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10921 return -EINVAL;
10922 }
10923
24f99d2b 10924 if (new_plane_state->fb) {
b0455fda
SS
10925 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10926 new_plane_state->fb);
10927 if (ret)
10928 return ret;
24f99d2b
SS
10929 }
10930
9e869063 10931 return 0;
626bf90f 10932 }
9b690ef3 10933
f6ff2a08
NK
10934 needs_reset = should_reset_plane(state, plane, old_plane_state,
10935 new_plane_state);
10936
9e869063
LL
10937 /* Remove any changed/removed planes */
10938 if (!enable) {
f6ff2a08 10939 if (!needs_reset)
9e869063 10940 return 0;
a7b06724 10941
9e869063
LL
10942 if (!old_plane_crtc)
10943 return 0;
62f55537 10944
9e869063
LL
10945 old_crtc_state = drm_atomic_get_old_crtc_state(
10946 state, old_plane_crtc);
10947 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10948
9e869063
LL
10949 if (!dm_old_crtc_state->stream)
10950 return 0;
62f55537 10951
9e869063
LL
10952 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10953 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10954
9e869063
LL
10955 ret = dm_atomic_get_state(state, &dm_state);
10956 if (ret)
10957 return ret;
eb3dc897 10958
9e869063
LL
10959 if (!dc_remove_plane_from_context(
10960 dc,
10961 dm_old_crtc_state->stream,
10962 dm_old_plane_state->dc_state,
10963 dm_state->context)) {
62f55537 10964
c3537613 10965 return -EINVAL;
9e869063 10966 }
e7b07cee 10967
9b690ef3 10968
9e869063
LL
10969 dc_plane_state_release(dm_old_plane_state->dc_state);
10970 dm_new_plane_state->dc_state = NULL;
1dc90497 10971
9e869063 10972 *lock_and_validation_needed = true;
1dc90497 10973
9e869063
LL
10974 } else { /* Add new planes */
10975 struct dc_plane_state *dc_new_plane_state;
1dc90497 10976
9e869063
LL
10977 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10978 return 0;
e7b07cee 10979
9e869063
LL
10980 if (!new_plane_crtc)
10981 return 0;
e7b07cee 10982
9e869063
LL
10983 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10984 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10985
9e869063
LL
10986 if (!dm_new_crtc_state->stream)
10987 return 0;
62f55537 10988
f6ff2a08 10989 if (!needs_reset)
9e869063 10990 return 0;
62f55537 10991
8c44515b
AP
10992 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10993 if (ret)
10994 return ret;
10995
9e869063 10996 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10997
9e869063
LL
10998 dc_new_plane_state = dc_create_plane_state(dc);
10999 if (!dc_new_plane_state)
11000 return -ENOMEM;
62f55537 11001
4711c033
LT
11002 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11003 plane->base.id, new_plane_crtc->base.id);
8c45c5db 11004
695af5f9 11005 ret = fill_dc_plane_attributes(
1348969a 11006 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
11007 dc_new_plane_state,
11008 new_plane_state,
11009 new_crtc_state);
11010 if (ret) {
11011 dc_plane_state_release(dc_new_plane_state);
11012 return ret;
11013 }
62f55537 11014
9e869063
LL
11015 ret = dm_atomic_get_state(state, &dm_state);
11016 if (ret) {
11017 dc_plane_state_release(dc_new_plane_state);
11018 return ret;
11019 }
eb3dc897 11020
9e869063
LL
11021 /*
11022 * Any atomic check errors that occur after this will
11023 * not need a release. The plane state will be attached
11024 * to the stream, and therefore part of the atomic
11025 * state. It'll be released when the atomic state is
11026 * cleaned.
11027 */
11028 if (!dc_add_plane_to_context(
11029 dc,
11030 dm_new_crtc_state->stream,
11031 dc_new_plane_state,
11032 dm_state->context)) {
62f55537 11033
9e869063
LL
11034 dc_plane_state_release(dc_new_plane_state);
11035 return -EINVAL;
11036 }
8c45c5db 11037
9e869063 11038 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 11039
214993e1
ML
11040 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11041
9e869063
LL
11042 /* Tell DC to do a full surface update every time there
11043 * is a plane change. Inefficient, but works for now.
11044 */
11045 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11046
11047 *lock_and_validation_needed = true;
62f55537 11048 }
e7b07cee
HW
11049
11050
62f55537
AG
11051 return ret;
11052}
a87fa993 11053
69cb5629
VZ
11054static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11055 int *src_w, int *src_h)
11056{
11057 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11058 case DRM_MODE_ROTATE_90:
11059 case DRM_MODE_ROTATE_270:
11060 *src_w = plane_state->src_h >> 16;
11061 *src_h = plane_state->src_w >> 16;
11062 break;
11063 case DRM_MODE_ROTATE_0:
11064 case DRM_MODE_ROTATE_180:
11065 default:
11066 *src_w = plane_state->src_w >> 16;
11067 *src_h = plane_state->src_h >> 16;
11068 break;
11069 }
11070}
11071
12f4849a
SS
11072static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11073 struct drm_crtc *crtc,
11074 struct drm_crtc_state *new_crtc_state)
11075{
d1bfbe8a
SS
11076 struct drm_plane *cursor = crtc->cursor, *underlying;
11077 struct drm_plane_state *new_cursor_state, *new_underlying_state;
11078 int i;
11079 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
11080 int cursor_src_w, cursor_src_h;
11081 int underlying_src_w, underlying_src_h;
12f4849a
SS
11082
11083 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11084 * cursor per pipe but it's going to inherit the scaling and
11085 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 11086 * blending properties match the underlying planes'. */
12f4849a 11087
d1bfbe8a
SS
11088 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11089 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
11090 return 0;
11091 }
11092
69cb5629
VZ
11093 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11094 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11095 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 11096
d1bfbe8a
SS
11097 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11098 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11099 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11100 continue;
12f4849a 11101
d1bfbe8a
SS
11102 /* Ignore disabled planes */
11103 if (!new_underlying_state->fb)
11104 continue;
11105
69cb5629
VZ
11106 dm_get_oriented_plane_size(new_underlying_state,
11107 &underlying_src_w, &underlying_src_h);
11108 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11109 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
11110
11111 if (cursor_scale_w != underlying_scale_w ||
11112 cursor_scale_h != underlying_scale_h) {
11113 drm_dbg_atomic(crtc->dev,
11114 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11115 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11116 return -EINVAL;
11117 }
11118
11119 /* If this plane covers the whole CRTC, no need to check planes underneath */
11120 if (new_underlying_state->crtc_x <= 0 &&
11121 new_underlying_state->crtc_y <= 0 &&
11122 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11123 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11124 break;
12f4849a
SS
11125 }
11126
11127 return 0;
11128}
11129
e10517b3 11130#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
11131static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11132{
11133 struct drm_connector *connector;
128f8ed5 11134 struct drm_connector_state *conn_state, *old_conn_state;
44be939f
ML
11135 struct amdgpu_dm_connector *aconnector = NULL;
11136 int i;
128f8ed5
RL
11137 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11138 if (!conn_state->crtc)
11139 conn_state = old_conn_state;
11140
44be939f
ML
11141 if (conn_state->crtc != crtc)
11142 continue;
11143
11144 aconnector = to_amdgpu_dm_connector(connector);
11145 if (!aconnector->port || !aconnector->mst_port)
11146 aconnector = NULL;
11147 else
11148 break;
11149 }
11150
11151 if (!aconnector)
11152 return 0;
11153
11154 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11155}
e10517b3 11156#endif
44be939f 11157
b8592b48
LL
11158/**
11159 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11160 * @dev: The DRM device
11161 * @state: The atomic state to commit
11162 *
11163 * Validate that the given atomic state is programmable by DC into hardware.
11164 * This involves constructing a &struct dc_state reflecting the new hardware
11165 * state we wish to commit, then querying DC to see if it is programmable. It's
11166 * important not to modify the existing DC state. Otherwise, atomic_check
11167 * may unexpectedly commit hardware changes.
11168 *
11169 * When validating the DC state, it's important that the right locks are
11170 * acquired. For full updates case which removes/adds/updates streams on one
11171 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11172 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 11173 * flip using DRMs synchronization events.
b8592b48
LL
11174 *
11175 * Note that DM adds the affected connectors for all CRTCs in state, when that
11176 * might not seem necessary. This is because DC stream creation requires the
11177 * DC sink, which is tied to the DRM connector state. Cleaning this up should
11178 * be possible but non-trivial - a possible TODO item.
11179 *
11180 * Return: -Error code if validation failed.
11181 */
7578ecda
AD
11182static int amdgpu_dm_atomic_check(struct drm_device *dev,
11183 struct drm_atomic_state *state)
62f55537 11184{
1348969a 11185 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 11186 struct dm_atomic_state *dm_state = NULL;
62f55537 11187 struct dc *dc = adev->dm.dc;
62f55537 11188 struct drm_connector *connector;
c2cea706 11189 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 11190 struct drm_crtc *crtc;
fc9e9920 11191 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
11192 struct drm_plane *plane;
11193 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 11194 enum dc_status status;
1e88ad0a 11195 int ret, i;
62f55537 11196 bool lock_and_validation_needed = false;
214993e1 11197 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6513104b
HW
11198#if defined(CONFIG_DRM_AMD_DC_DCN)
11199 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
11200 struct drm_dp_mst_topology_state *mst_state;
11201 struct drm_dp_mst_topology_mgr *mgr;
6513104b 11202#endif
62f55537 11203
e8a98235 11204 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 11205
62f55537 11206 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
11207 if (ret) {
11208 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 11209 goto fail;
68ca1c3e 11210 }
62f55537 11211
c5892a10
SW
11212 /* Check connector changes */
11213 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11214 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11215 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11216
11217 /* Skip connectors that are disabled or part of modeset already. */
11218 if (!old_con_state->crtc && !new_con_state->crtc)
11219 continue;
11220
11221 if (!new_con_state->crtc)
11222 continue;
11223
11224 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11225 if (IS_ERR(new_crtc_state)) {
68ca1c3e 11226 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
11227 ret = PTR_ERR(new_crtc_state);
11228 goto fail;
11229 }
11230
11231 if (dm_old_con_state->abm_level !=
11232 dm_new_con_state->abm_level)
11233 new_crtc_state->connectors_changed = true;
11234 }
11235
e10517b3 11236#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 11237 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
11238 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11239 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11240 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
11241 if (ret) {
11242 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 11243 goto fail;
68ca1c3e 11244 }
44be939f
ML
11245 }
11246 }
71be4b16 11247 if (!pre_validate_dsc(state, &dm_state, vars)) {
11248 ret = -EINVAL;
11249 goto fail;
11250 }
44be939f 11251 }
e10517b3 11252#endif
1e88ad0a 11253 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
11254 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11255
1e88ad0a 11256 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 11257 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
11258 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11259 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 11260 continue;
7bef1af3 11261
03fc4cf4 11262 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
11263 if (ret) {
11264 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 11265 goto fail;
68ca1c3e 11266 }
03fc4cf4 11267
1e88ad0a
S
11268 if (!new_crtc_state->enable)
11269 continue;
fc9e9920 11270
1e88ad0a 11271 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
11272 if (ret) {
11273 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 11274 goto fail;
68ca1c3e 11275 }
fc9e9920 11276
1e88ad0a 11277 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
11278 if (ret) {
11279 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 11280 goto fail;
68ca1c3e 11281 }
115a385c 11282
cbac53f7 11283 if (dm_old_crtc_state->dsc_force_changed)
115a385c 11284 new_crtc_state->mode_changed = true;
e7b07cee
HW
11285 }
11286
2d9e6431
NK
11287 /*
11288 * Add all primary and overlay planes on the CRTC to the state
11289 * whenever a plane is enabled to maintain correct z-ordering
11290 * and to enable fast surface updates.
11291 */
11292 drm_for_each_crtc(crtc, dev) {
11293 bool modified = false;
11294
11295 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11296 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11297 continue;
11298
11299 if (new_plane_state->crtc == crtc ||
11300 old_plane_state->crtc == crtc) {
11301 modified = true;
11302 break;
11303 }
11304 }
11305
11306 if (!modified)
11307 continue;
11308
11309 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11310 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11311 continue;
11312
11313 new_plane_state =
11314 drm_atomic_get_plane_state(state, plane);
11315
11316 if (IS_ERR(new_plane_state)) {
11317 ret = PTR_ERR(new_plane_state);
68ca1c3e 11318 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
11319 goto fail;
11320 }
11321 }
11322 }
11323
62f55537 11324 /* Remove exiting planes if they are modified */
9e869063
LL
11325 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11326 ret = dm_update_plane_state(dc, state, plane,
11327 old_plane_state,
11328 new_plane_state,
11329 false,
11330 &lock_and_validation_needed);
68ca1c3e
S
11331 if (ret) {
11332 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11333 goto fail;
68ca1c3e 11334 }
62f55537
AG
11335 }
11336
11337 /* Disable all crtcs which require disable */
4b9674e5
LL
11338 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11339 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11340 old_crtc_state,
11341 new_crtc_state,
11342 false,
11343 &lock_and_validation_needed);
68ca1c3e
S
11344 if (ret) {
11345 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11346 goto fail;
68ca1c3e 11347 }
62f55537
AG
11348 }
11349
11350 /* Enable all crtcs which require enable */
4b9674e5
LL
11351 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11352 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11353 old_crtc_state,
11354 new_crtc_state,
11355 true,
11356 &lock_and_validation_needed);
68ca1c3e
S
11357 if (ret) {
11358 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11359 goto fail;
68ca1c3e 11360 }
62f55537
AG
11361 }
11362
11363 /* Add new/modified planes */
9e869063
LL
11364 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11365 ret = dm_update_plane_state(dc, state, plane,
11366 old_plane_state,
11367 new_plane_state,
11368 true,
11369 &lock_and_validation_needed);
68ca1c3e
S
11370 if (ret) {
11371 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11372 goto fail;
68ca1c3e 11373 }
62f55537
AG
11374 }
11375
b349f76e
ES
11376 /* Run this here since we want to validate the streams we created */
11377 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11378 if (ret) {
11379 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11380 goto fail;
68ca1c3e 11381 }
62f55537 11382
214993e1
ML
11383 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11384 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11385 if (dm_new_crtc_state->mpo_requested)
11386 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11387 }
11388
12f4849a
SS
11389 /* Check cursor planes scaling */
11390 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11391 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11392 if (ret) {
11393 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11394 goto fail;
68ca1c3e 11395 }
12f4849a
SS
11396 }
11397
43d10d30
NK
11398 if (state->legacy_cursor_update) {
11399 /*
11400 * This is a fast cursor update coming from the plane update
11401 * helper, check if it can be done asynchronously for better
11402 * performance.
11403 */
11404 state->async_update =
11405 !drm_atomic_helper_async_check(dev, state);
11406
11407 /*
11408 * Skip the remaining global validation if this is an async
11409 * update. Cursor updates can be done without affecting
11410 * state or bandwidth calcs and this avoids the performance
11411 * penalty of locking the private state object and
11412 * allocating a new dc_state.
11413 */
11414 if (state->async_update)
11415 return 0;
11416 }
11417
ebdd27e1 11418 /* Check scaling and underscan changes*/
1f6010a9 11419 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11420 * new stream into context w\o causing full reset. Need to
11421 * decide how to handle.
11422 */
c2cea706 11423 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11424 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11425 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11426 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11427
11428 /* Skip any modesets/resets */
0bc9706d
LSL
11429 if (!acrtc || drm_atomic_crtc_needs_modeset(
11430 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11431 continue;
11432
b830ebc9 11433 /* Skip any thing not scale or underscan changes */
54d76575 11434 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11435 continue;
11436
11437 lock_and_validation_needed = true;
11438 }
11439
41724ea2
BL
11440#if defined(CONFIG_DRM_AMD_DC_DCN)
11441 /* set the slot info for each mst_state based on the link encoding format */
11442 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11443 struct amdgpu_dm_connector *aconnector;
11444 struct drm_connector *connector;
11445 struct drm_connector_list_iter iter;
11446 u8 link_coding_cap;
11447
11448 if (!mgr->mst_state )
11449 continue;
11450
11451 drm_connector_list_iter_begin(dev, &iter);
11452 drm_for_each_connector_iter(connector, &iter) {
11453 int id = connector->index;
11454
11455 if (id == mst_state->mgr->conn_base_id) {
11456 aconnector = to_amdgpu_dm_connector(connector);
11457 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11458 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11459
11460 break;
11461 }
11462 }
11463 drm_connector_list_iter_end(&iter);
11464
11465 }
11466#endif
f6d7c7fa
NK
11467 /**
11468 * Streams and planes are reset when there are changes that affect
11469 * bandwidth. Anything that affects bandwidth needs to go through
11470 * DC global validation to ensure that the configuration can be applied
11471 * to hardware.
11472 *
11473 * We have to currently stall out here in atomic_check for outstanding
11474 * commits to finish in this case because our IRQ handlers reference
11475 * DRM state directly - we can end up disabling interrupts too early
11476 * if we don't.
11477 *
11478 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11479 */
f6d7c7fa 11480 if (lock_and_validation_needed) {
eb3dc897 11481 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11482 if (ret) {
11483 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11484 goto fail;
68ca1c3e 11485 }
e7b07cee
HW
11486
11487 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11488 if (ret) {
11489 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11490 goto fail;
68ca1c3e 11491 }
1dc90497 11492
d9fe1a4c 11493#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11494 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11495 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
71be4b16 11496 ret = -EINVAL;
8c20a1ed 11497 goto fail;
68ca1c3e 11498 }
8c20a1ed 11499
6513104b 11500 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11501 if (ret) {
11502 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11503 goto fail;
68ca1c3e 11504 }
d9fe1a4c 11505#endif
29b9ba74 11506
ded58c7b
ZL
11507 /*
11508 * Perform validation of MST topology in the state:
11509 * We need to perform MST atomic check before calling
11510 * dc_validate_global_state(), or there is a chance
11511 * to get stuck in an infinite loop and hang eventually.
11512 */
11513 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11514 if (ret) {
11515 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11516 goto fail;
68ca1c3e 11517 }
85fb8bb9 11518 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11519 if (status != DC_OK) {
68ca1c3e 11520 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11521 dc_status_to_str(status), status);
e7b07cee
HW
11522 ret = -EINVAL;
11523 goto fail;
11524 }
bd200d19 11525 } else {
674e78ac 11526 /*
bd200d19
NK
11527 * The commit is a fast update. Fast updates shouldn't change
11528 * the DC context, affect global validation, and can have their
11529 * commit work done in parallel with other commits not touching
11530 * the same resource. If we have a new DC context as part of
11531 * the DM atomic state from validation we need to free it and
11532 * retain the existing one instead.
fde9f39a
MR
11533 *
11534 * Furthermore, since the DM atomic state only contains the DC
11535 * context and can safely be annulled, we can free the state
11536 * and clear the associated private object now to free
11537 * some memory and avoid a possible use-after-free later.
674e78ac 11538 */
bd200d19 11539
fde9f39a
MR
11540 for (i = 0; i < state->num_private_objs; i++) {
11541 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11542
fde9f39a
MR
11543 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11544 int j = state->num_private_objs-1;
bd200d19 11545
fde9f39a
MR
11546 dm_atomic_destroy_state(obj,
11547 state->private_objs[i].state);
11548
11549 /* If i is not at the end of the array then the
11550 * last element needs to be moved to where i was
11551 * before the array can safely be truncated.
11552 */
11553 if (i != j)
11554 state->private_objs[i] =
11555 state->private_objs[j];
bd200d19 11556
fde9f39a
MR
11557 state->private_objs[j].ptr = NULL;
11558 state->private_objs[j].state = NULL;
11559 state->private_objs[j].old_state = NULL;
11560 state->private_objs[j].new_state = NULL;
11561
11562 state->num_private_objs = j;
11563 break;
11564 }
bd200d19 11565 }
e7b07cee
HW
11566 }
11567
caff0e66
NK
11568 /* Store the overall update type for use later in atomic check. */
11569 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11570 struct dm_crtc_state *dm_new_crtc_state =
11571 to_dm_crtc_state(new_crtc_state);
11572
f6d7c7fa
NK
11573 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11574 UPDATE_TYPE_FULL :
11575 UPDATE_TYPE_FAST;
e7b07cee
HW
11576 }
11577
11578 /* Must be success */
11579 WARN_ON(ret);
e8a98235
RS
11580
11581 trace_amdgpu_dm_atomic_check_finish(state, ret);
11582
e7b07cee
HW
11583 return ret;
11584
11585fail:
11586 if (ret == -EDEADLK)
01e28f9c 11587 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11588 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11589 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11590 else
01e28f9c 11591 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11592
e8a98235
RS
11593 trace_amdgpu_dm_atomic_check_finish(state, ret);
11594
e7b07cee
HW
11595 return ret;
11596}
11597
3ee6b26b
AD
11598static bool is_dp_capable_without_timing_msa(struct dc *dc,
11599 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11600{
11601 uint8_t dpcd_data;
11602 bool capable = false;
11603
c84dec2f 11604 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11605 dm_helpers_dp_read_dpcd(
11606 NULL,
c84dec2f 11607 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11608 DP_DOWN_STREAM_PORT_COUNT,
11609 &dpcd_data,
11610 sizeof(dpcd_data))) {
11611 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11612 }
11613
11614 return capable;
11615}
f9b4f20c 11616
46db138d
SW
11617static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11618 unsigned int offset,
11619 unsigned int total_length,
11620 uint8_t *data,
11621 unsigned int length,
11622 struct amdgpu_hdmi_vsdb_info *vsdb)
11623{
11624 bool res;
11625 union dmub_rb_cmd cmd;
11626 struct dmub_cmd_send_edid_cea *input;
11627 struct dmub_cmd_edid_cea_output *output;
11628
11629 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11630 return false;
11631
11632 memset(&cmd, 0, sizeof(cmd));
11633
11634 input = &cmd.edid_cea.data.input;
11635
11636 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11637 cmd.edid_cea.header.sub_type = 0;
11638 cmd.edid_cea.header.payload_bytes =
11639 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11640 input->offset = offset;
11641 input->length = length;
eb9e59eb 11642 input->cea_total_length = total_length;
46db138d
SW
11643 memcpy(input->payload, data, length);
11644
11645 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11646 if (!res) {
11647 DRM_ERROR("EDID CEA parser failed\n");
11648 return false;
11649 }
11650
11651 output = &cmd.edid_cea.data.output;
11652
11653 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11654 if (!output->ack.success) {
11655 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11656 output->ack.offset);
11657 }
11658 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11659 if (!output->amd_vsdb.vsdb_found)
11660 return false;
11661
11662 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11663 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11664 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11665 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11666 } else {
b76a8062 11667 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11668 return false;
11669 }
11670
11671 return true;
11672}
11673
11674static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11675 uint8_t *edid_ext, int len,
11676 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11677{
11678 int i;
f9b4f20c
SW
11679
11680 /* send extension block to DMCU for parsing */
11681 for (i = 0; i < len; i += 8) {
11682 bool res;
11683 int offset;
11684
11685 /* send 8 bytes a time */
46db138d 11686 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11687 return false;
11688
11689 if (i+8 == len) {
11690 /* EDID block sent completed, expect result */
11691 int version, min_rate, max_rate;
11692
46db138d 11693 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11694 if (res) {
11695 /* amd vsdb found */
11696 vsdb_info->freesync_supported = 1;
11697 vsdb_info->amd_vsdb_version = version;
11698 vsdb_info->min_refresh_rate_hz = min_rate;
11699 vsdb_info->max_refresh_rate_hz = max_rate;
11700 return true;
11701 }
11702 /* not amd vsdb */
11703 return false;
11704 }
11705
11706 /* check for ack*/
46db138d 11707 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11708 if (!res)
11709 return false;
11710 }
11711
11712 return false;
11713}
11714
46db138d
SW
11715static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11716 uint8_t *edid_ext, int len,
11717 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11718{
11719 int i;
11720
11721 /* send extension block to DMCU for parsing */
11722 for (i = 0; i < len; i += 8) {
11723 /* send 8 bytes a time */
11724 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11725 return false;
11726 }
11727
11728 return vsdb_info->freesync_supported;
11729}
11730
11731static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11732 uint8_t *edid_ext, int len,
11733 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11734{
11735 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11736
11737 if (adev->dm.dmub_srv)
11738 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11739 else
11740 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11741}
11742
7c7dd774 11743static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11744 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11745{
11746 uint8_t *edid_ext = NULL;
11747 int i;
11748 bool valid_vsdb_found = false;
11749
11750 /*----- drm_find_cea_extension() -----*/
11751 /* No EDID or EDID extensions */
11752 if (edid == NULL || edid->extensions == 0)
7c7dd774 11753 return -ENODEV;
f9b4f20c
SW
11754
11755 /* Find CEA extension */
11756 for (i = 0; i < edid->extensions; i++) {
11757 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11758 if (edid_ext[0] == CEA_EXT)
11759 break;
11760 }
11761
11762 if (i == edid->extensions)
7c7dd774 11763 return -ENODEV;
f9b4f20c
SW
11764
11765 /*----- cea_db_offsets() -----*/
11766 if (edid_ext[0] != CEA_EXT)
7c7dd774 11767 return -ENODEV;
f9b4f20c
SW
11768
11769 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11770
11771 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11772}
11773
98e6436d
AK
11774void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11775 struct edid *edid)
e7b07cee 11776{
eb0709ba 11777 int i = 0;
e7b07cee
HW
11778 struct detailed_timing *timing;
11779 struct detailed_non_pixel *data;
11780 struct detailed_data_monitor_range *range;
c84dec2f
HW
11781 struct amdgpu_dm_connector *amdgpu_dm_connector =
11782 to_amdgpu_dm_connector(connector);
bb47de73 11783 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11784 struct dc_sink *sink;
e7b07cee
HW
11785
11786 struct drm_device *dev = connector->dev;
1348969a 11787 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11788 bool freesync_capable = false;
f9b4f20c 11789 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11790
8218d7f1
HW
11791 if (!connector->state) {
11792 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11793 goto update;
8218d7f1
HW
11794 }
11795
9b2fdc33
AP
11796 sink = amdgpu_dm_connector->dc_sink ?
11797 amdgpu_dm_connector->dc_sink :
11798 amdgpu_dm_connector->dc_em_sink;
11799
11800 if (!edid || !sink) {
98e6436d
AK
11801 dm_con_state = to_dm_connector_state(connector->state);
11802
11803 amdgpu_dm_connector->min_vfreq = 0;
11804 amdgpu_dm_connector->max_vfreq = 0;
11805 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11806 connector->display_info.monitor_range.min_vfreq = 0;
11807 connector->display_info.monitor_range.max_vfreq = 0;
11808 freesync_capable = false;
98e6436d 11809
bb47de73 11810 goto update;
98e6436d
AK
11811 }
11812
8218d7f1
HW
11813 dm_con_state = to_dm_connector_state(connector->state);
11814
e7b07cee 11815 if (!adev->dm.freesync_module)
bb47de73 11816 goto update;
f9b4f20c
SW
11817
11818
9b2fdc33
AP
11819 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11820 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11821 bool edid_check_required = false;
11822
11823 if (edid) {
e7b07cee
HW
11824 edid_check_required = is_dp_capable_without_timing_msa(
11825 adev->dm.dc,
c84dec2f 11826 amdgpu_dm_connector);
e7b07cee 11827 }
e7b07cee 11828
f9b4f20c
SW
11829 if (edid_check_required == true && (edid->version > 1 ||
11830 (edid->version == 1 && edid->revision > 1))) {
11831 for (i = 0; i < 4; i++) {
e7b07cee 11832
f9b4f20c
SW
11833 timing = &edid->detailed_timings[i];
11834 data = &timing->data.other_data;
11835 range = &data->data.range;
11836 /*
11837 * Check if monitor has continuous frequency mode
11838 */
11839 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11840 continue;
11841 /*
11842 * Check for flag range limits only. If flag == 1 then
11843 * no additional timing information provided.
11844 * Default GTF, GTF Secondary curve and CVT are not
11845 * supported
11846 */
11847 if (range->flags != 1)
11848 continue;
a0ffc3fd 11849
f9b4f20c
SW
11850 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11851 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11852 amdgpu_dm_connector->pixel_clock_mhz =
11853 range->pixel_clock_mhz * 10;
a0ffc3fd 11854
f9b4f20c
SW
11855 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11856 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11857
f9b4f20c
SW
11858 break;
11859 }
98e6436d 11860
f9b4f20c
SW
11861 if (amdgpu_dm_connector->max_vfreq -
11862 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11863
f9b4f20c
SW
11864 freesync_capable = true;
11865 }
11866 }
9b2fdc33 11867 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11868 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11869 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11870 timing = &edid->detailed_timings[i];
11871 data = &timing->data.other_data;
11872
11873 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11874 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11875 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11876 freesync_capable = true;
11877
11878 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11879 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11880 }
11881 }
bb47de73
NK
11882
11883update:
11884 if (dm_con_state)
11885 dm_con_state->freesync_capable = freesync_capable;
11886
11887 if (connector->vrr_capable_property)
11888 drm_connector_set_vrr_capable_property(connector,
11889 freesync_capable);
e7b07cee
HW
11890}
11891
3d4e52d0
VL
11892void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11893{
1348969a 11894 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11895 struct dc *dc = adev->dm.dc;
11896 int i;
11897
11898 mutex_lock(&adev->dm.dc_lock);
11899 if (dc->current_state) {
11900 for (i = 0; i < dc->current_state->stream_count; ++i)
11901 dc->current_state->streams[i]
11902 ->triggered_crtc_reset.enabled =
11903 adev->dm.force_timing_sync;
11904
11905 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11906 dc_trigger_sync(dc, dc->current_state);
11907 }
11908 mutex_unlock(&adev->dm.dc_lock);
11909}
9d83722d
RS
11910
11911void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11912 uint32_t value, const char *func_name)
11913{
11914#ifdef DM_CHECK_ADDR_0
11915 if (address == 0) {
11916 DC_ERR("invalid register write. address = 0");
11917 return;
11918 }
11919#endif
11920 cgs_write_register(ctx->cgs_device, address, value);
11921 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11922}
11923
11924uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11925 const char *func_name)
11926{
11927 uint32_t value;
11928#ifdef DM_CHECK_ADDR_0
11929 if (address == 0) {
11930 DC_ERR("invalid register read; address = 0\n");
11931 return 0;
11932 }
11933#endif
11934
11935 if (ctx->dmub_srv &&
11936 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11937 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11938 ASSERT(false);
11939 return 0;
11940 }
11941
11942 value = cgs_read_register(ctx->cgs_device, address);
11943
11944 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11945
11946 return value;
11947}
81927e28 11948
240e6d25
IB
11949static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11950 struct dc_context *ctx,
11951 uint8_t status_type,
11952 uint32_t *operation_result)
88f52b1f
JS
11953{
11954 struct amdgpu_device *adev = ctx->driver_context;
11955 int return_status = -1;
11956 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11957
11958 if (is_cmd_aux) {
11959 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11960 return_status = p_notify->aux_reply.length;
11961 *operation_result = p_notify->result;
11962 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11963 *operation_result = AUX_RET_ERROR_TIMEOUT;
11964 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11965 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11966 } else {
11967 *operation_result = AUX_RET_ERROR_UNKNOWN;
11968 }
11969 } else {
11970 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11971 return_status = 0;
11972 *operation_result = p_notify->sc_status;
11973 } else {
11974 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11975 }
11976 }
11977
11978 return return_status;
11979}
11980
11981int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11982 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11983{
11984 struct amdgpu_device *adev = ctx->driver_context;
11985 int ret = 0;
11986
88f52b1f
JS
11987 if (is_cmd_aux) {
11988 dc_process_dmub_aux_transfer_async(ctx->dc,
11989 link_index, (struct aux_payload *)cmd_payload);
11990 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11991 (struct set_config_cmd_payload *)cmd_payload,
11992 adev->dm.dmub_notify)) {
11993 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11994 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11995 (uint32_t *)operation_result);
11996 }
11997
9e3a50d2 11998 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11999 if (ret == 0) {
9e3a50d2 12000 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
12001 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12002 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12003 (uint32_t *)operation_result);
81927e28 12004 }
81927e28 12005
88f52b1f
JS
12006 if (is_cmd_aux) {
12007 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12008 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 12009
88f52b1f
JS
12010 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12011 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12012 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12013 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12014 adev->dm.dmub_notify->aux_reply.length);
12015 }
12016 }
81927e28
JS
12017 }
12018
88f52b1f
JS
12019 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12020 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12021 (uint32_t *)operation_result);
81927e28 12022}
1edf5ae1
ZL
12023
12024/*
12025 * Check whether seamless boot is supported.
12026 *
12027 * So far we only support seamless boot on CHIP_VANGOGH.
12028 * If everything goes well, we may consider expanding
12029 * seamless boot to other ASICs.
12030 */
12031bool check_seamless_boot_capability(struct amdgpu_device *adev)
12032{
12033 switch (adev->asic_type) {
12034 case CHIP_VANGOGH:
12035 if (!adev->mman.keep_stolen_vga_memory)
12036 return true;
12037 break;
12038 default:
12039 break;
12040 }
12041
12042 return false;
12043}