drm/amd/display: Update SR watermarks for DCN314
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
f6e03f80 31#include "link_enc_cfg.h"
1dc90497 32#include "dc/inc/core_types.h"
a7669aff 33#include "dal_asic_id.h"
cdca3f21 34#include "dmub/dmub_srv.h"
743b9786
NK
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
f9b4f20c 38#include "dc/dc_edid_parser.h"
81927e28 39#include "dc/dc_stat.h"
9d83722d 40#include "amdgpu_dm_trace.h"
028c4ccf 41#include "dpcd_defs.h"
bc33f5e5 42#include "link/protocols/link_dpcd.h"
028c4ccf 43#include "link_service_types.h"
1e5d4d8e
RL
44#include "link/protocols/link_dp_capability.h"
45#include "link/protocols/link_ddc.h"
4562236b
HW
46
47#include "vid.h"
48#include "amdgpu.h"
a49dcb88 49#include "amdgpu_display.h"
a94d5569 50#include "amdgpu_ucode.h"
4562236b
HW
51#include "atom.h"
52#include "amdgpu_dm.h"
5d945cbc 53#include "amdgpu_dm_plane.h"
473683a0 54#include "amdgpu_dm_crtc.h"
52704fca 55#include "amdgpu_dm_hdcp.h"
6a99099f 56#include <drm/display/drm_hdcp_helper.h>
e7b07cee 57#include "amdgpu_pm.h"
1f579254 58#include "amdgpu_atombios.h"
4562236b
HW
59
60#include "amd_shared.h"
61#include "amdgpu_dm_irq.h"
62#include "dm_helpers.h"
e7b07cee 63#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
64#if defined(CONFIG_DEBUG_FS)
65#include "amdgpu_dm_debugfs.h"
66#endif
f4594cd1 67#include "amdgpu_dm_psr.h"
4562236b
HW
68
69#include "ivsrcid/ivsrcid_vislands30.h"
70
a6276e92 71#include <linux/backlight.h>
4562236b
HW
72#include <linux/module.h>
73#include <linux/moduleparam.h>
e7b07cee 74#include <linux/types.h>
97028037 75#include <linux/pm_runtime.h>
09d21852 76#include <linux/pci.h>
a94d5569 77#include <linux/firmware.h>
6ce8f316 78#include <linux/component.h>
57b9f338 79#include <linux/dmi.h>
4562236b 80
da68386d 81#include <drm/display/drm_dp_mst_helper.h>
4fc8cb47 82#include <drm/display/drm_hdmi_helper.h>
4562236b 83#include <drm/drm_atomic.h>
674e78ac 84#include <drm/drm_atomic_uapi.h>
4562236b 85#include <drm/drm_atomic_helper.h>
90bb087f 86#include <drm/drm_blend.h>
09d21852 87#include <drm/drm_fourcc.h>
e7b07cee 88#include <drm/drm_edid.h>
09d21852 89#include <drm/drm_vblank.h>
6ce8f316 90#include <drm/drm_audio_component.h>
047de3f1 91#include <drm/drm_gem_atomic_helper.h>
30c63715 92#include <drm/drm_plane_helper.h>
4562236b 93
da11ef83
HG
94#include <acpi/video.h>
95
5527cd06 96#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 97
ad941f7a
FX
98#include "dcn/dcn_1_0_offset.h"
99#include "dcn/dcn_1_0_sh_mask.h"
407e7517 100#include "soc15_hw_ip.h"
543036a2 101#include "soc15_common.h"
407e7517 102#include "vega10_ip_offset.h"
ff5ef992 103
543036a2
AP
104#include "gc/gc_11_0_0_offset.h"
105#include "gc/gc_11_0_0_sh_mask.h"
106
e7b07cee 107#include "modules/inc/mod_freesync.h"
bbf854dc 108#include "modules/power/power_helpers.h"
e7b07cee 109
743b9786
NK
110#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
112#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
114#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
116#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
117MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
118#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
119MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
120#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
121MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
122#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
123MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
124#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
125MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
e850f6b1
RL
126#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
127MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
b5b8ed44
QZ
128#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
129MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
de7cc1b4
PL
130#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
131MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
2200eb9e 132
577359ca
AP
133#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
134MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
135#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
136MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
137
a94d5569
DF
138#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
139MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 140
5ea23931
RL
141#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
142MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
143
8c7aea40
NK
144/* Number of bytes in PSP header for firmware. */
145#define PSP_HEADER_BYTES 0x100
146
147/* Number of bytes in PSP footer for firmware. */
148#define PSP_FOOTER_BYTES 0x100
149
b8592b48
LL
150/**
151 * DOC: overview
152 *
153 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 154 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
155 * requests into DC requests, and DC responses into DRM responses.
156 *
157 * The root control structure is &struct amdgpu_display_manager.
158 */
159
7578ecda
AD
160/* basic init/fini API */
161static int amdgpu_dm_init(struct amdgpu_device *adev);
162static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 163static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 164
0f877894
OV
165static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
166{
167 switch (link->dpcd_caps.dongle_type) {
168 case DISPLAY_DONGLE_NONE:
169 return DRM_MODE_SUBCONNECTOR_Native;
170 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
171 return DRM_MODE_SUBCONNECTOR_VGA;
172 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
173 case DISPLAY_DONGLE_DP_DVI_DONGLE:
174 return DRM_MODE_SUBCONNECTOR_DVID;
175 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
176 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
177 return DRM_MODE_SUBCONNECTOR_HDMIA;
178 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
179 default:
180 return DRM_MODE_SUBCONNECTOR_Unknown;
181 }
182}
183
184static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
185{
186 struct dc_link *link = aconnector->dc_link;
187 struct drm_connector *connector = &aconnector->base;
188 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
189
190 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
191 return;
192
193 if (aconnector->dc_sink)
194 subconnector = get_subconnector_type(link);
195
196 drm_object_property_set_value(&connector->base,
197 connector->dev->mode_config.dp_subconnector_property,
198 subconnector);
199}
200
1f6010a9
DF
201/*
202 * initializes drm_device display related structures, based on the information
7578ecda
AD
203 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
204 * drm_encoder, drm_mode_config
205 *
206 * Returns 0 on success
207 */
208static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
209/* removes and deallocates the drm structures, created by the above function */
210static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
211
7578ecda
AD
212static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
213 struct amdgpu_dm_connector *amdgpu_dm_connector,
ae67558b 214 u32 link_index,
7578ecda
AD
215 struct amdgpu_encoder *amdgpu_encoder);
216static int amdgpu_dm_encoder_init(struct drm_device *dev,
217 struct amdgpu_encoder *aencoder,
218 uint32_t link_index);
219
220static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221
7578ecda
AD
222static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223
224static int amdgpu_dm_atomic_check(struct drm_device *dev,
225 struct drm_atomic_state *state);
226
e27c41d5 227static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 228static void handle_hpd_rx_irq(void *param);
e27c41d5 229
a85ba005
NC
230static bool
231is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
232 struct drm_crtc_state *new_crtc_state);
4562236b
HW
233/*
234 * dm_vblank_get_counter
235 *
236 * @brief
237 * Get counter for number of vertical blanks
238 *
239 * @param
240 * struct amdgpu_device *adev - [in] desired amdgpu device
241 * int disp_idx - [in] which CRTC to get the counter from
242 *
243 * @return
244 * Counter for vertical blanks
245 */
246static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
247{
248 if (crtc >= adev->mode_info.num_crtc)
249 return 0;
250 else {
251 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
252
585d450c 253 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
254 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
255 crtc);
4562236b
HW
256 return 0;
257 }
258
585d450c 259 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
260 }
261}
262
263static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 264 u32 *vbl, u32 *position)
4562236b 265{
ae67558b 266 u32 v_blank_start, v_blank_end, h_position, v_position;
81c50963 267
4562236b
HW
268 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
269 return -EINVAL;
270 else {
271 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
272
585d450c 273 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
274 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
275 crtc);
4562236b
HW
276 return 0;
277 }
278
81c50963
ST
279 /*
280 * TODO rework base driver to use values directly.
281 * for now parse it back into reg-format
282 */
585d450c 283 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
284 &v_blank_start,
285 &v_blank_end,
286 &h_position,
287 &v_position);
288
e806208d
AG
289 *position = v_position | (h_position << 16);
290 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
291 }
292
293 return 0;
294}
295
296static bool dm_is_idle(void *handle)
297{
298 /* XXX todo */
299 return true;
300}
301
302static int dm_wait_for_idle(void *handle)
303{
304 /* XXX todo */
305 return 0;
306}
307
308static bool dm_check_soft_reset(void *handle)
309{
310 return false;
311}
312
313static int dm_soft_reset(void *handle)
314{
315 /* XXX todo */
316 return 0;
317}
318
3ee6b26b
AD
319static struct amdgpu_crtc *
320get_crtc_by_otg_inst(struct amdgpu_device *adev,
321 int otg_inst)
4562236b 322{
4a580877 323 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
324 struct drm_crtc *crtc;
325 struct amdgpu_crtc *amdgpu_crtc;
326
bcd74374 327 if (WARN_ON(otg_inst == -1))
4562236b 328 return adev->mode_info.crtcs[0];
4562236b
HW
329
330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
331 amdgpu_crtc = to_amdgpu_crtc(crtc);
332
333 if (amdgpu_crtc->otg_inst == otg_inst)
334 return amdgpu_crtc;
335 }
336
337 return NULL;
338}
339
a85ba005
NC
340static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
341 struct dm_crtc_state *new_state)
342{
343 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
344 return true;
6c5e25a0 345 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
a85ba005
NC
346 return true;
347 else
348 return false;
349}
350
bb46a6a9
RS
351static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
352 int planes_count)
353{
354 int i, j;
bb46a6a9 355
8866d627
JC
356 for (i = 0, j = planes_count - 1; i < j; i++, j--)
357 swap(array_of_surface_update[i], array_of_surface_update[j]);
bb46a6a9
RS
358}
359
81f743a0
RS
360/**
361 * update_planes_and_stream_adapter() - Send planes to be updated in DC
362 *
363 * DC has a generic way to update planes and stream via
364 * dc_update_planes_and_stream function; however, DM might need some
365 * adjustments and preparation before calling it. This function is a wrapper
366 * for the dc_update_planes_and_stream that does any required configuration
367 * before passing control to DC.
368 */
369static inline bool update_planes_and_stream_adapter(struct dc *dc,
370 int update_type,
371 int planes_count,
372 struct dc_stream_state *stream,
373 struct dc_stream_update *stream_update,
374 struct dc_surface_update *array_of_surface_update)
375{
bb46a6a9
RS
376 reverse_planes_order(array_of_surface_update, planes_count);
377
81f743a0
RS
378 /*
379 * Previous frame finished and HW is ready for optimization.
380 */
381 if (update_type == UPDATE_TYPE_FAST)
382 dc_post_update_surfaces_to_stream(dc);
383
384 return dc_update_planes_and_stream(dc,
385 array_of_surface_update,
386 planes_count,
387 stream,
388 stream_update);
389}
390
b8e8c934
HW
391/**
392 * dm_pflip_high_irq() - Handle pageflip interrupt
393 * @interrupt_params: ignored
394 *
395 * Handles the pageflip interrupt by notifying all interested parties
396 * that the pageflip has been completed.
397 */
4562236b
HW
398static void dm_pflip_high_irq(void *interrupt_params)
399{
4562236b
HW
400 struct amdgpu_crtc *amdgpu_crtc;
401 struct common_irq_params *irq_params = interrupt_params;
402 struct amdgpu_device *adev = irq_params->adev;
403 unsigned long flags;
71bbe51a 404 struct drm_pending_vblank_event *e;
ae67558b 405 u32 vpos, hpos, v_blank_start, v_blank_end;
71bbe51a 406 bool vrr_active;
4562236b
HW
407
408 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
409
410 /* IRQ could occur when in initial stage */
1f6010a9 411 /* TODO work and BO cleanup */
4562236b 412 if (amdgpu_crtc == NULL) {
cb2318b7 413 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
414 return;
415 }
416
4a580877 417 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
418
419 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 420 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
421 amdgpu_crtc->pflip_status,
422 AMDGPU_FLIP_SUBMITTED,
423 amdgpu_crtc->crtc_id,
424 amdgpu_crtc);
4a580877 425 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
426 return;
427 }
428
71bbe51a
MK
429 /* page flip completed. */
430 e = amdgpu_crtc->event;
431 amdgpu_crtc->event = NULL;
4562236b 432
bcd74374 433 WARN_ON(!e);
1159898a 434
6c5e25a0 435 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
436
437 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
438 if (!vrr_active ||
585d450c 439 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
440 &v_blank_end, &hpos, &vpos) ||
441 (vpos < v_blank_start)) {
442 /* Update to correct count and vblank timestamp if racing with
443 * vblank irq. This also updates to the correct vblank timestamp
444 * even in VRR mode, as scanout is past the front-porch atm.
445 */
446 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 447
71bbe51a
MK
448 /* Wake up userspace by sending the pageflip event with proper
449 * count and timestamp of vblank of flip completion.
450 */
451 if (e) {
452 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
453
454 /* Event sent, so done with vblank for this flip */
455 drm_crtc_vblank_put(&amdgpu_crtc->base);
456 }
457 } else if (e) {
458 /* VRR active and inside front-porch: vblank count and
459 * timestamp for pageflip event will only be up to date after
460 * drm_crtc_handle_vblank() has been executed from late vblank
461 * irq handler after start of back-porch (vline 0). We queue the
462 * pageflip event for send-out by drm_crtc_handle_vblank() with
463 * updated timestamp and count, once it runs after us.
464 *
465 * We need to open-code this instead of using the helper
466 * drm_crtc_arm_vblank_event(), as that helper would
467 * call drm_crtc_accurate_vblank_count(), which we must
468 * not call in VRR mode while we are in front-porch!
469 */
470
471 /* sequence will be replaced by real count during send-out. */
472 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
473 e->pipe = amdgpu_crtc->crtc_id;
474
4a580877 475 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
476 e = NULL;
477 }
4562236b 478
fdd1fe57
MK
479 /* Keep track of vblank of this flip for flip throttling. We use the
480 * cooked hw counter, as that one incremented at start of this vblank
481 * of pageflip completion, so last_flip_vblank is the forbidden count
482 * for queueing new pageflips if vsync + VRR is enabled.
483 */
5d1c59c4 484 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 485 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 486
54f5499a 487 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 488 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 489
cb2318b7
VL
490 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
491 amdgpu_crtc->crtc_id, amdgpu_crtc,
492 vrr_active, (int) !e);
4562236b
HW
493}
494
d2574c33
MK
495static void dm_vupdate_high_irq(void *interrupt_params)
496{
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
499 struct amdgpu_crtc *acrtc;
47588233
RS
500 struct drm_device *drm_dev;
501 struct drm_vblank_crtc *vblank;
502 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 503 unsigned long flags;
585d450c 504 int vrr_active;
d2574c33
MK
505
506 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
507
508 if (acrtc) {
6c5e25a0 509 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
47588233
RS
510 drm_dev = acrtc->base.dev;
511 vblank = &drm_dev->vblank[acrtc->base.index];
512 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
513 frame_duration_ns = vblank->time - previous_timestamp;
514
515 if (frame_duration_ns > 0) {
516 trace_amdgpu_refresh_rate_track(acrtc->base.index,
517 frame_duration_ns,
518 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
519 atomic64_set(&irq_params->previous_timestamp, vblank->time);
520 }
d2574c33 521
cb2318b7 522 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 523 acrtc->crtc_id,
585d450c 524 vrr_active);
d2574c33
MK
525
526 /* Core vblank handling is done here after end of front-porch in
527 * vrr mode, as vblank timestamping will give valid results
528 * while now done after front-porch. This will also deliver
529 * page-flip completion events that have been queued to us
530 * if a pageflip happened inside front-porch.
531 */
585d450c 532 if (vrr_active) {
6c5e25a0 533 amdgpu_dm_crtc_handle_vblank(acrtc);
09aef2c4
MK
534
535 /* BTR processing for pre-DCE12 ASICs */
585d450c 536 if (acrtc->dm_irq_params.stream &&
09aef2c4 537 adev->family < AMDGPU_FAMILY_AI) {
4a580877 538 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
539 mod_freesync_handle_v_update(
540 adev->dm.freesync_module,
585d450c
AP
541 acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
543
544 dc_stream_adjust_vmin_vmax(
545 adev->dm.dc,
585d450c
AP
546 acrtc->dm_irq_params.stream,
547 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 548 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
549 }
550 }
d2574c33
MK
551 }
552}
553
b8e8c934
HW
554/**
555 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 556 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
557 *
558 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
559 * event handler.
560 */
4562236b
HW
561static void dm_crtc_high_irq(void *interrupt_params)
562{
563 struct common_irq_params *irq_params = interrupt_params;
564 struct amdgpu_device *adev = irq_params->adev;
4562236b 565 struct amdgpu_crtc *acrtc;
09aef2c4 566 unsigned long flags;
585d450c 567 int vrr_active;
4562236b 568
b57de80a 569 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
570 if (!acrtc)
571 return;
572
6c5e25a0 573 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
16f17eda 574
cb2318b7 575 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 576 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 577
2346ef47
NK
578 /**
579 * Core vblank handling at start of front-porch is only possible
580 * in non-vrr mode, as only there vblank timestamping will give
581 * valid results while done in front-porch. Otherwise defer it
582 * to dm_vupdate_high_irq after end of front-porch.
583 */
585d450c 584 if (!vrr_active)
6c5e25a0 585 amdgpu_dm_crtc_handle_vblank(acrtc);
2346ef47
NK
586
587 /**
588 * Following stuff must happen at start of vblank, for crc
589 * computation and below-the-range btr support in vrr mode.
590 */
16f17eda 591 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
592
593 /* BTR updates need to happen before VUPDATE on Vega and above. */
594 if (adev->family < AMDGPU_FAMILY_AI)
595 return;
16f17eda 596
4a580877 597 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 598
585d450c
AP
599 if (acrtc->dm_irq_params.stream &&
600 acrtc->dm_irq_params.vrr_params.supported &&
601 acrtc->dm_irq_params.freesync_config.state ==
602 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 603 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
604 acrtc->dm_irq_params.stream,
605 &acrtc->dm_irq_params.vrr_params);
16f17eda 606
585d450c
AP
607 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
608 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
609 }
610
2b5aed9a
MK
611 /*
612 * If there aren't any active_planes then DCH HUBP may be clock-gated.
613 * In that case, pageflip completion interrupts won't fire and pageflip
614 * completion events won't get delivered. Prevent this by sending
615 * pending pageflip events from here if a flip is still pending.
616 *
617 * If any planes are enabled, use dm_pflip_high_irq() instead, to
618 * avoid race conditions between flip programming and completion,
619 * which could cause too early flip completion events.
620 */
2346ef47
NK
621 if (adev->family >= AMDGPU_FAMILY_RV &&
622 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 623 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
624 if (acrtc->event) {
625 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
626 acrtc->event = NULL;
627 drm_crtc_vblank_put(&acrtc->base);
628 }
629 acrtc->pflip_status = AMDGPU_FLIP_NONE;
630 }
631
4a580877 632 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
633}
634
9e1178ef 635#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
636/**
637 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
638 * DCN generation ASICs
48e01bf4 639 * @interrupt_params: interrupt parameters
86bc2219
WL
640 *
641 * Used to set crc window/read out crc value at vertical line 0 position
642 */
86bc2219
WL
643static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
644{
645 struct common_irq_params *irq_params = interrupt_params;
646 struct amdgpu_device *adev = irq_params->adev;
647 struct amdgpu_crtc *acrtc;
648
649 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
650
651 if (!acrtc)
652 return;
653
654 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
655}
433e5dec 656#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 657
e27c41d5 658/**
03f2abb0 659 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
e27c41d5
JS
660 * @adev: amdgpu_device pointer
661 * @notify: dmub notification structure
662 *
663 * Dmub AUX or SET_CONFIG command completion processing callback
664 * Copies dmub notification to DM which is to be read by AUX command.
665 * issuing thread and also signals the event to wake up the thread.
666 */
240e6d25
IB
667static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
668 struct dmub_notification *notify)
e27c41d5
JS
669{
670 if (adev->dm.dmub_notify)
671 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
672 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
673 complete(&adev->dm.dmub_aux_transfer_done);
674}
675
676/**
677 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
678 * @adev: amdgpu_device pointer
679 * @notify: dmub notification structure
680 *
681 * Dmub Hpd interrupt processing callback. Gets displayindex through the
682 * ink index and calls helper to do the processing.
683 */
240e6d25
IB
684static void dmub_hpd_callback(struct amdgpu_device *adev,
685 struct dmub_notification *notify)
e27c41d5
JS
686{
687 struct amdgpu_dm_connector *aconnector;
f6e03f80 688 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
689 struct drm_connector *connector;
690 struct drm_connector_list_iter iter;
691 struct dc_link *link;
ae67558b 692 u8 link_index = 0;
978ffac8 693 struct drm_device *dev;
e27c41d5
JS
694
695 if (adev == NULL)
696 return;
697
698 if (notify == NULL) {
699 DRM_ERROR("DMUB HPD callback notification was NULL");
700 return;
701 }
702
703 if (notify->link_index > adev->dm.dc->link_count) {
704 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
705 return;
706 }
707
e27c41d5 708 link_index = notify->link_index;
e27c41d5 709 link = adev->dm.dc->links[link_index];
978ffac8 710 dev = adev->dm.ddev;
e27c41d5
JS
711
712 drm_connector_list_iter_begin(dev, &iter);
713 drm_for_each_connector_iter(connector, &iter) {
714 aconnector = to_amdgpu_dm_connector(connector);
715 if (link && aconnector->dc_link == link) {
c416a9e4
SW
716 if (notify->type == DMUB_NOTIFICATION_HPD)
717 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
718 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
719 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
720 else
721 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
722 notify->type, link_index);
723
f6e03f80 724 hpd_aconnector = aconnector;
e27c41d5
JS
725 break;
726 }
727 }
728 drm_connector_list_iter_end(&iter);
e27c41d5 729
c40a09e5
NK
730 if (hpd_aconnector) {
731 if (notify->type == DMUB_NOTIFICATION_HPD)
732 handle_hpd_irq_helper(hpd_aconnector);
733 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
734 handle_hpd_rx_irq(hpd_aconnector);
735 }
e27c41d5
JS
736}
737
738/**
739 * register_dmub_notify_callback - Sets callback for DMUB notify
740 * @adev: amdgpu_device pointer
741 * @type: Type of dmub notification
742 * @callback: Dmub interrupt callback function
743 * @dmub_int_thread_offload: offload indicator
744 *
745 * API to register a dmub callback handler for a dmub notification
746 * Also sets indicator whether callback processing to be offloaded.
747 * to dmub interrupt handling thread
748 * Return: true if successfully registered, false if there is existing registration
749 */
240e6d25
IB
750static bool register_dmub_notify_callback(struct amdgpu_device *adev,
751 enum dmub_notification_type type,
752 dmub_notify_interrupt_callback_t callback,
753 bool dmub_int_thread_offload)
e27c41d5
JS
754{
755 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
756 adev->dm.dmub_callback[type] = callback;
757 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
758 } else
759 return false;
760
761 return true;
762}
763
764static void dm_handle_hpd_work(struct work_struct *work)
765{
766 struct dmub_hpd_work *dmub_hpd_wrk;
767
768 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
769
770 if (!dmub_hpd_wrk->dmub_notify) {
771 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
772 return;
773 }
774
775 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
776 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
777 dmub_hpd_wrk->dmub_notify);
778 }
094b21c1
JS
779
780 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
781 kfree(dmub_hpd_wrk);
782
783}
784
e25515e2 785#define DMUB_TRACE_MAX_READ 64
81927e28
JS
786/**
787 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
788 * @interrupt_params: used for determining the Outbox instance
789 *
790 * Handles the Outbox Interrupt
791 * event handler.
792 */
81927e28
JS
793static void dm_dmub_outbox1_low_irq(void *interrupt_params)
794{
795 struct dmub_notification notify;
796 struct common_irq_params *irq_params = interrupt_params;
797 struct amdgpu_device *adev = irq_params->adev;
798 struct amdgpu_display_manager *dm = &adev->dm;
799 struct dmcub_trace_buf_entry entry = { 0 };
ae67558b 800 u32 count = 0;
e27c41d5 801 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 802 struct dc_link *plink = NULL;
81927e28 803
f6e03f80
JS
804 if (dc_enable_dmub_notifications(adev->dm.dc) &&
805 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 806
f6e03f80
JS
807 do {
808 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
a35faec3 809 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
f6e03f80
JS
810 DRM_ERROR("DM: notify type %d invalid!", notify.type);
811 continue;
812 }
c40a09e5
NK
813 if (!dm->dmub_callback[notify.type]) {
814 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
815 continue;
816 }
f6e03f80 817 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
818 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
819 if (!dmub_hpd_wrk) {
820 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
821 return;
822 }
0e909e4f
CJ
823 dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
824 GFP_ATOMIC);
094b21c1
JS
825 if (!dmub_hpd_wrk->dmub_notify) {
826 kfree(dmub_hpd_wrk);
827 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
828 return;
829 }
830 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
f6e03f80
JS
831 dmub_hpd_wrk->adev = adev;
832 if (notify.type == DMUB_NOTIFICATION_HPD) {
833 plink = adev->dm.dc->links[notify.link_index];
834 if (plink) {
835 plink->hpd_status =
b97788e5 836 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 837 }
e27c41d5 838 }
f6e03f80
JS
839 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
840 } else {
841 dm->dmub_callback[notify.type](adev, &notify);
842 }
843 } while (notify.pending_notification);
81927e28
JS
844 }
845
846
847 do {
848 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
849 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
850 entry.param0, entry.param1);
851
852 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
853 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
854 } else
855 break;
856
857 count++;
858
859 } while (count <= DMUB_TRACE_MAX_READ);
860
f6e03f80
JS
861 if (count > DMUB_TRACE_MAX_READ)
862 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 863}
86bc2219 864
4562236b
HW
865static int dm_set_clockgating_state(void *handle,
866 enum amd_clockgating_state state)
867{
868 return 0;
869}
870
871static int dm_set_powergating_state(void *handle,
872 enum amd_powergating_state state)
873{
874 return 0;
875}
876
877/* Prototypes of private functions */
878static int dm_early_init(void* handle);
879
a32e24b4 880/* Allocate memory for FBC compressed data */
3e332d3a 881static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 882{
3e332d3a 883 struct drm_device *dev = connector->dev;
1348969a 884 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 885 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
886 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
887 struct drm_display_mode *mode;
42e67c3b
RL
888 unsigned long max_size = 0;
889
890 if (adev->dm.dc->fbc_compressor == NULL)
891 return;
a32e24b4 892
3e332d3a 893 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
894 return;
895
3e332d3a
RL
896 if (compressor->bo_ptr)
897 return;
42e67c3b 898
42e67c3b 899
3e332d3a
RL
900 list_for_each_entry(mode, &connector->modes, head) {
901 if (max_size < mode->htotal * mode->vtotal)
902 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
903 }
904
905 if (max_size) {
906 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 907 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 908 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
909
910 if (r)
42e67c3b
RL
911 DRM_ERROR("DM: Failed to initialize FBC\n");
912 else {
913 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
914 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
915 }
916
a32e24b4
RL
917 }
918
919}
a32e24b4 920
6ce8f316
NK
921static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
922 int pipe, bool *enabled,
923 unsigned char *buf, int max_bytes)
924{
925 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 926 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
927 struct drm_connector *connector;
928 struct drm_connector_list_iter conn_iter;
929 struct amdgpu_dm_connector *aconnector;
930 int ret = 0;
931
932 *enabled = false;
933
934 mutex_lock(&adev->dm.audio_lock);
935
936 drm_connector_list_iter_begin(dev, &conn_iter);
937 drm_for_each_connector_iter(connector, &conn_iter) {
938 aconnector = to_amdgpu_dm_connector(connector);
939 if (aconnector->audio_inst != port)
940 continue;
941
942 *enabled = true;
943 ret = drm_eld_size(connector->eld);
944 memcpy(buf, connector->eld, min(max_bytes, ret));
945
946 break;
947 }
948 drm_connector_list_iter_end(&conn_iter);
949
950 mutex_unlock(&adev->dm.audio_lock);
951
952 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
953
954 return ret;
955}
956
957static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
958 .get_eld = amdgpu_dm_audio_component_get_eld,
959};
960
961static int amdgpu_dm_audio_component_bind(struct device *kdev,
962 struct device *hda_kdev, void *data)
963{
964 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 965 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
966 struct drm_audio_component *acomp = data;
967
968 acomp->ops = &amdgpu_dm_audio_component_ops;
969 acomp->dev = kdev;
970 adev->dm.audio_component = acomp;
971
972 return 0;
973}
974
975static void amdgpu_dm_audio_component_unbind(struct device *kdev,
976 struct device *hda_kdev, void *data)
977{
978 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 979 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
980 struct drm_audio_component *acomp = data;
981
982 acomp->ops = NULL;
983 acomp->dev = NULL;
984 adev->dm.audio_component = NULL;
985}
986
987static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
988 .bind = amdgpu_dm_audio_component_bind,
989 .unbind = amdgpu_dm_audio_component_unbind,
990};
991
992static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
993{
994 int i, ret;
995
996 if (!amdgpu_audio)
997 return 0;
998
999 adev->mode_info.audio.enabled = true;
1000
1001 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1002
1003 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1004 adev->mode_info.audio.pin[i].channels = -1;
1005 adev->mode_info.audio.pin[i].rate = -1;
1006 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1007 adev->mode_info.audio.pin[i].status_bits = 0;
1008 adev->mode_info.audio.pin[i].category_code = 0;
1009 adev->mode_info.audio.pin[i].connected = false;
1010 adev->mode_info.audio.pin[i].id =
1011 adev->dm.dc->res_pool->audios[i]->inst;
1012 adev->mode_info.audio.pin[i].offset = 0;
1013 }
1014
1015 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1016 if (ret < 0)
1017 return ret;
1018
1019 adev->dm.audio_registered = true;
1020
1021 return 0;
1022}
1023
1024static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1025{
1026 if (!amdgpu_audio)
1027 return;
1028
1029 if (!adev->mode_info.audio.enabled)
1030 return;
1031
1032 if (adev->dm.audio_registered) {
1033 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1034 adev->dm.audio_registered = false;
1035 }
1036
1037 /* TODO: Disable audio? */
1038
1039 adev->mode_info.audio.enabled = false;
1040}
1041
dfd84d90 1042static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1043{
1044 struct drm_audio_component *acomp = adev->dm.audio_component;
1045
1046 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1047 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1048
1049 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1050 pin, -1);
1051 }
1052}
1053
743b9786
NK
1054static int dm_dmub_hw_init(struct amdgpu_device *adev)
1055{
743b9786
NK
1056 const struct dmcub_firmware_header_v1_0 *hdr;
1057 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1058 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1059 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1060 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1061 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1062 struct dmub_srv_hw_params hw_params;
1063 enum dmub_status status;
1064 const unsigned char *fw_inst_const, *fw_bss_data;
ae67558b 1065 u32 i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1066 bool has_hw_support;
1067
1068 if (!dmub_srv)
1069 /* DMUB isn't supported on the ASIC. */
1070 return 0;
1071
8c7aea40
NK
1072 if (!fb_info) {
1073 DRM_ERROR("No framebuffer info for DMUB service.\n");
1074 return -EINVAL;
1075 }
1076
743b9786
NK
1077 if (!dmub_fw) {
1078 /* Firmware required for DMUB support. */
1079 DRM_ERROR("No firmware provided for DMUB.\n");
1080 return -EINVAL;
1081 }
1082
1083 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1084 if (status != DMUB_STATUS_OK) {
1085 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1086 return -EINVAL;
1087 }
1088
1089 if (!has_hw_support) {
1090 DRM_INFO("DMUB unsupported on ASIC\n");
1091 return 0;
1092 }
1093
47e62dbd
NK
1094 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1095 status = dmub_srv_hw_reset(dmub_srv);
1096 if (status != DMUB_STATUS_OK)
1097 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1098
743b9786
NK
1099 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1100
743b9786
NK
1101 fw_inst_const = dmub_fw->data +
1102 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1103 PSP_HEADER_BYTES;
743b9786
NK
1104
1105 fw_bss_data = dmub_fw->data +
1106 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1107 le32_to_cpu(hdr->inst_const_bytes);
1108
1109 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1110 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1111 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1112
1113 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1114
ddde28a5
HW
1115 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1116 * amdgpu_ucode_init_single_fw will load dmub firmware
1117 * fw_inst_const part to cw0; otherwise, the firmware back door load
1118 * will be done by dm_dmub_hw_init
1119 */
1120 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1121 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1122 fw_inst_const_size);
1123 }
1124
a576b345
NK
1125 if (fw_bss_data_size)
1126 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1127 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1128
1129 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1130 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1131 adev->bios_size);
1132
1133 /* Reset regions that need to be reset. */
1134 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1135 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1136
1137 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1138 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1139
1140 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1141 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1142
1143 /* Initialize hardware. */
1144 memset(&hw_params, 0, sizeof(hw_params));
1145 hw_params.fb_base = adev->gmc.fb_start;
949933b0 1146 hw_params.fb_offset = adev->vm_manager.vram_base_offset;
743b9786 1147
31a7f4bb
HW
1148 /* backdoor load firmware and trigger dmub running */
1149 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1150 hw_params.load_inst_const = true;
1151
743b9786
NK
1152 if (dmcu)
1153 hw_params.psp_version = dmcu->psp_version;
1154
8c7aea40
NK
1155 for (i = 0; i < fb_info->num_fb; ++i)
1156 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1157
3b36f50d 1158 switch (adev->ip_versions[DCE_HWIP][0]) {
f6aa84b8
RL
1159 case IP_VERSION(3, 1, 3):
1160 case IP_VERSION(3, 1, 4):
3b36f50d 1161 hw_params.dpia_supported = true;
7367540b 1162 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
5b109397
JS
1163 break;
1164 default:
1165 break;
1166 }
1167
743b9786
NK
1168 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1169 if (status != DMUB_STATUS_OK) {
1170 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1171 return -EINVAL;
1172 }
1173
1174 /* Wait for firmware load to finish. */
1175 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1176 if (status != DMUB_STATUS_OK)
1177 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1178
1179 /* Init DMCU and ABM if available. */
1180 if (dmcu && abm) {
1181 dmcu->funcs->dmcu_init(dmcu);
1182 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1183 }
1184
051b7887
RL
1185 if (!adev->dm.dc->ctx->dmub_srv)
1186 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1187 if (!adev->dm.dc->ctx->dmub_srv) {
1188 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1189 return -ENOMEM;
1190 }
1191
743b9786
NK
1192 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1193 adev->dm.dmcub_fw_version);
1194
1195 return 0;
1196}
1197
79d6b935
NK
1198static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1199{
1200 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1201 enum dmub_status status;
1202 bool init;
1203
1204 if (!dmub_srv) {
1205 /* DMUB isn't supported on the ASIC. */
1206 return;
1207 }
1208
1209 status = dmub_srv_is_hw_init(dmub_srv, &init);
1210 if (status != DMUB_STATUS_OK)
1211 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1212
1213 if (status == DMUB_STATUS_OK && init) {
1214 /* Wait for firmware load to finish. */
1215 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1216 if (status != DMUB_STATUS_OK)
1217 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1218 } else {
1219 /* Perform the full hardware initialization. */
1220 dm_dmub_hw_init(adev);
1221 }
1222}
1223
c0fb85ae 1224static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1225{
ae67558b
SS
1226 u64 pt_base;
1227 u32 logical_addr_low;
1228 u32 logical_addr_high;
1229 u32 agp_base, agp_bot, agp_top;
c0fb85ae 1230 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1231
a0f884f5
NK
1232 memset(pa_config, 0, sizeof(*pa_config));
1233
c0fb85ae
YZ
1234 agp_base = 0;
1235 agp_bot = adev->gmc.agp_start >> 24;
1236 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1237
0294868f
AD
1238 /* AGP aperture is disabled */
1239 if (agp_bot == agp_top) {
4d2c6e89 1240 logical_addr_low = adev->gmc.fb_start >> 18;
0294868f
AD
1241 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1242 /*
1243 * Raven2 has a HW issue that it is unable to use the vram which
1244 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1245 * workaround that increase system aperture high address (add 1)
1246 * to get rid of the VM fault and hardware hang.
1247 */
1248 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1249 else
4d2c6e89 1250 logical_addr_high = adev->gmc.fb_end >> 18;
0294868f 1251 } else {
4d2c6e89 1252 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
0294868f
AD
1253 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1254 /*
1255 * Raven2 has a HW issue that it is unable to use the vram which
1256 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1257 * workaround that increase system aperture high address (add 1)
1258 * to get rid of the VM fault and hardware hang.
1259 */
1260 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1261 else
1262 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1263 }
1264
1265 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1266
c0fb85ae
YZ
1267 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1268 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1269 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1270 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1271 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1272 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1273
c0fb85ae
YZ
1274 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1275 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1276
1277 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1278 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1279 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1280
1281 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
949933b0 1282 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
c0fb85ae
YZ
1283 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1284
1285 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1286 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1287 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1288
40e9f3f0 1289 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
c44a22b3 1290
c44a22b3 1291}
cae5c1ab 1292
028c4ccf
QZ
1293static void force_connector_state(
1294 struct amdgpu_dm_connector *aconnector,
1295 enum drm_connector_force force_state)
1296{
1297 struct drm_connector *connector = &aconnector->base;
1298
1299 mutex_lock(&connector->dev->mode_config.mutex);
1300 aconnector->base.force = force_state;
1301 mutex_unlock(&connector->dev->mode_config.mutex);
1302
1303 mutex_lock(&aconnector->hpd_lock);
1304 drm_kms_helper_connector_hotplug_event(connector);
1305 mutex_unlock(&aconnector->hpd_lock);
1306}
1307
8e794421
WL
1308static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1309{
1310 struct hpd_rx_irq_offload_work *offload_work;
1311 struct amdgpu_dm_connector *aconnector;
1312 struct dc_link *dc_link;
1313 struct amdgpu_device *adev;
1314 enum dc_connection_type new_connection_type = dc_connection_none;
1315 unsigned long flags;
028c4ccf
QZ
1316 union test_response test_response;
1317
1318 memset(&test_response, 0, sizeof(test_response));
8e794421
WL
1319
1320 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1321 aconnector = offload_work->offload_wq->aconnector;
1322
1323 if (!aconnector) {
1324 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1325 goto skip;
1326 }
1327
1328 adev = drm_to_adev(aconnector->base.dev);
1329 dc_link = aconnector->dc_link;
1330
1331 mutex_lock(&aconnector->hpd_lock);
54618888 1332 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
8e794421
WL
1333 DRM_ERROR("KMS: Failed to detect connector\n");
1334 mutex_unlock(&aconnector->hpd_lock);
1335
1336 if (new_connection_type == dc_connection_none)
1337 goto skip;
1338
1339 if (amdgpu_in_reset(adev))
1340 goto skip;
1341
1342 mutex_lock(&adev->dm.dc_lock);
028c4ccf 1343 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
8e794421 1344 dc_link_dp_handle_automated_test(dc_link);
028c4ccf
QZ
1345
1346 if (aconnector->timing_changed) {
1347 /* force connector disconnect and reconnect */
1348 force_connector_state(aconnector, DRM_FORCE_OFF);
1349 msleep(100);
1350 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1351 }
1352
1353 test_response.bits.ACK = 1;
1354
1355 core_link_write_dpcd(
1356 dc_link,
1357 DP_TEST_RESPONSE,
1358 &test_response.raw,
1359 sizeof(test_response));
1360 }
8e794421 1361 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
c5a31f17 1362 dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
8e794421 1363 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
e322843e
HW
1364 /* offload_work->data is from handle_hpd_rx_irq->
1365 * schedule_hpd_rx_offload_work.this is defer handle
1366 * for hpd short pulse. upon here, link status may be
1367 * changed, need get latest link status from dpcd
1368 * registers. if link status is good, skip run link
1369 * training again.
1370 */
1371 union hpd_irq_data irq_data;
1372
1373 memset(&irq_data, 0, sizeof(irq_data));
1374
1375 /* before dc_link_dp_handle_link_loss, allow new link lost handle
1376 * request be added to work queue if link lost at end of dc_link_
1377 * dp_handle_link_loss
1378 */
8e794421
WL
1379 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1380 offload_work->offload_wq->is_handling_link_loss = false;
1381 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
e322843e 1382
54618888 1383 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
e322843e
HW
1384 dc_link_check_link_loss_status(dc_link, &irq_data))
1385 dc_link_dp_handle_link_loss(dc_link);
8e794421
WL
1386 }
1387 mutex_unlock(&adev->dm.dc_lock);
1388
1389skip:
1390 kfree(offload_work);
1391
1392}
1393
1394static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1395{
1396 int max_caps = dc->caps.max_links;
1397 int i = 0;
1398 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1399
1400 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1401
1402 if (!hpd_rx_offload_wq)
1403 return NULL;
1404
1405
1406 for (i = 0; i < max_caps; i++) {
1407 hpd_rx_offload_wq[i].wq =
1408 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1409
1410 if (hpd_rx_offload_wq[i].wq == NULL) {
1411 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
7136f956 1412 goto out_err;
8e794421
WL
1413 }
1414
1415 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1416 }
1417
1418 return hpd_rx_offload_wq;
7136f956
RM
1419
1420out_err:
1421 for (i = 0; i < max_caps; i++) {
1422 if (hpd_rx_offload_wq[i].wq)
1423 destroy_workqueue(hpd_rx_offload_wq[i].wq);
1424 }
1425 kfree(hpd_rx_offload_wq);
1426 return NULL;
8e794421
WL
1427}
1428
3ce51649
AD
1429struct amdgpu_stutter_quirk {
1430 u16 chip_vendor;
1431 u16 chip_device;
1432 u16 subsys_vendor;
1433 u16 subsys_device;
1434 u8 revision;
1435};
1436
1437static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1438 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1439 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1440 { 0, 0, 0, 0, 0 },
1441};
1442
1443static bool dm_should_disable_stutter(struct pci_dev *pdev)
1444{
1445 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1446
1447 while (p && p->chip_device != 0) {
1448 if (pdev->vendor == p->chip_vendor &&
1449 pdev->device == p->chip_device &&
1450 pdev->subsystem_vendor == p->subsys_vendor &&
1451 pdev->subsystem_device == p->subsys_device &&
1452 pdev->revision == p->revision) {
1453 return true;
1454 }
1455 ++p;
1456 }
1457 return false;
1458}
1459
57b9f338
FZ
1460static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1461 {
1462 .matches = {
1463 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1464 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1465 },
1466 },
1467 {
1468 .matches = {
1469 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1470 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1471 },
1472 },
1473 {
1474 .matches = {
1475 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1476 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1477 },
1478 },
503dc81c
TL
1479 {
1480 .matches = {
1481 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1482 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1483 },
1484 },
1485 {
1486 .matches = {
1487 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1488 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1489 },
1490 },
1491 {
1492 .matches = {
1493 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1494 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1495 },
1496 },
1497 {
1498 .matches = {
1499 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1500 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1501 },
1502 },
1503 {
1504 .matches = {
1505 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1506 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1507 },
1508 },
1509 {
1510 .matches = {
1511 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1512 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1513 },
1514 },
57b9f338 1515 {}
503dc81c 1516 /* TODO: refactor this from a fixed table to a dynamic option */
57b9f338
FZ
1517};
1518
1519static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1520{
1521 const struct dmi_system_id *dmi_id;
1522
1523 dm->aux_hpd_discon_quirk = false;
1524
1525 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1526 if (dmi_id) {
1527 dm->aux_hpd_discon_quirk = true;
1528 DRM_INFO("aux_hpd_discon_quirk attached\n");
1529 }
1530}
1531
7578ecda 1532static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1533{
1534 struct dc_init_data init_data;
52704fca 1535 struct dc_callback_init init_params;
743b9786 1536 int r;
52704fca 1537
4a580877 1538 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1539 adev->dm.adev = adev;
1540
4562236b
HW
1541 /* Zero all the fields */
1542 memset(&init_data, 0, sizeof(init_data));
52704fca 1543 memset(&init_params, 0, sizeof(init_params));
4562236b 1544
ead08b95 1545 mutex_init(&adev->dm.dpia_aux_lock);
674e78ac 1546 mutex_init(&adev->dm.dc_lock);
6ce8f316 1547 mutex_init(&adev->dm.audio_lock);
674e78ac 1548
4562236b
HW
1549 if(amdgpu_dm_irq_init(adev)) {
1550 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1551 goto error;
1552 }
1553
1554 init_data.asic_id.chip_family = adev->family;
1555
2dc31ca1 1556 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1557 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1558 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1559
770d13b1 1560 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1561 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1562 init_data.asic_id.atombios_base_address =
1563 adev->mode_info.atom_context->bios;
1564
1565 init_data.driver = adev;
1566
1567 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1568
1569 if (!adev->dm.cgs_device) {
1570 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1571 goto error;
1572 }
1573
1574 init_data.cgs_device = adev->dm.cgs_device;
1575
4562236b
HW
1576 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1577
fd546bc5
AD
1578 switch (adev->ip_versions[DCE_HWIP][0]) {
1579 case IP_VERSION(2, 1, 0):
1580 switch (adev->dm.dmcub_fw_version) {
1581 case 0: /* development */
1582 case 0x1: /* linux-firmware.git hash 6d9f399 */
1583 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1584 init_data.flags.disable_dmcu = false;
1585 break;
1586 default:
1587 init_data.flags.disable_dmcu = true;
1588 }
1589 break;
1590 case IP_VERSION(2, 0, 3):
1591 init_data.flags.disable_dmcu = true;
1592 break;
1593 default:
1594 break;
1595 }
1596
60fb100b
AD
1597 switch (adev->asic_type) {
1598 case CHIP_CARRIZO:
1599 case CHIP_STONEY:
1ebcaebd
NK
1600 init_data.flags.gpu_vm_support = true;
1601 break;
60fb100b 1602 default:
1d789535 1603 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
1604 case IP_VERSION(1, 0, 0):
1605 case IP_VERSION(1, 0, 1):
a7f520bf
AD
1606 /* enable S/G on PCO and RV2 */
1607 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1608 (adev->apu_flags & AMD_APU_IS_PICASSO))
1609 init_data.flags.gpu_vm_support = true;
1610 break;
c4029779 1611 case IP_VERSION(2, 1, 0):
c08182f2 1612 case IP_VERSION(3, 0, 1):
8f56a0fe
AD
1613 case IP_VERSION(3, 1, 2):
1614 case IP_VERSION(3, 1, 3):
69ed0c5d 1615 case IP_VERSION(3, 1, 4):
512e8475 1616 case IP_VERSION(3, 1, 5):
0fe382fb 1617 case IP_VERSION(3, 1, 6):
c08182f2
AD
1618 init_data.flags.gpu_vm_support = true;
1619 break;
c08182f2
AD
1620 default:
1621 break;
1622 }
60fb100b
AD
1623 break;
1624 }
bf0207e1
AD
1625 if (init_data.flags.gpu_vm_support &&
1626 (amdgpu_sg_display == 0))
1627 init_data.flags.gpu_vm_support = false;
6e227308 1628
a7f520bf
AD
1629 if (init_data.flags.gpu_vm_support)
1630 adev->mode_info.gpu_vm_support = true;
1631
04b94af4
AD
1632 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1633 init_data.flags.fbc_support = true;
1634
d99f38ae
AD
1635 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1636 init_data.flags.multi_mon_pp_mclk_switch = true;
1637
eaf56410
LL
1638 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1639 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1640
1641 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1642 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1643
12320274
AP
1644 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1645 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1646 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1647 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
12320274 1648
7aba117a 1649 init_data.flags.seamless_boot_edp_requested = false;
78ad75f8 1650
1edf5ae1 1651 if (check_seamless_boot_capability(adev)) {
7aba117a 1652 init_data.flags.seamless_boot_edp_requested = true;
1edf5ae1
ZL
1653 init_data.flags.allow_seamless_boot_optimization = true;
1654 DRM_INFO("Seamless boot condition check passed\n");
1655 }
1656
a8201902
LM
1657 init_data.flags.enable_mipi_converter_optimization = true;
1658
e5028e9f 1659 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
2a93292f 1660 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
e5028e9f 1661
0dd79532 1662 INIT_LIST_HEAD(&adev->dm.da_list);
57b9f338
FZ
1663
1664 retrieve_dmi_info(&adev->dm);
1665
4562236b
HW
1666 /* Display Core create. */
1667 adev->dm.dc = dc_create(&init_data);
1668
423788c7 1669 if (adev->dm.dc) {
9788d087 1670 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
bf7fda0b 1671 dce_version_to_string(adev->dm.dc->ctx->dce_version));
423788c7 1672 } else {
9788d087
AP
1673 DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
1674 dce_version_to_string(adev->dm.dc->ctx->dce_version));
423788c7
ES
1675 goto error;
1676 }
4562236b 1677
8a791dab
HW
1678 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1679 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1680 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1681 }
1682
f99d8762
HW
1683 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1684 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1685 if (dm_should_disable_stutter(adev->pdev))
1686 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1687
8a791dab
HW
1688 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1689 adev->dm.dc->debug.disable_stutter = true;
1690
2665f63a 1691 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1692 adev->dm.dc->debug.disable_dsc = true;
2665f63a 1693 }
8a791dab
HW
1694
1695 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1696 adev->dm.dc->debug.disable_clock_gate = true;
1697
cfb979f7
AP
1698 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1699 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1700
792a0cdd
LL
1701 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1702
d1bc26cb
FZ
1703 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1704 adev->dm.dc->debug.ignore_cable_id = true;
1705
3d8fcc67
WL
1706 /* TODO: There is a new drm mst change where the freedom of
1707 * vc_next_start_slot update is revoked/moved into drm, instead of in
1708 * driver. This forces us to make sure to get vc_next_start_slot updated
1709 * in drm function each time without considering if mst_state is active
1710 * or not. Otherwise, next time hotplug will give wrong start_slot
1711 * number. We are implementing a temporary solution to even notify drm
1712 * mst deallocation when link is no longer of MST type when uncommitting
1713 * the stream so we will have more time to work on a proper solution.
1714 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
1715 * should notify drm to do a complete "reset" of its states and stop
1716 * calling further drm mst functions when link is no longer of an MST
1717 * type. This could happen when we unplug an MST hubs/displays. When
1718 * uncommit stream comes later after unplug, we should just reset
1719 * hardware states only.
1720 */
1721 adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
1722
e3834491
FZ
1723 if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
1724 DRM_INFO("DP-HDMI FRL PCON supported\n");
1725
743b9786
NK
1726 r = dm_dmub_hw_init(adev);
1727 if (r) {
1728 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1729 goto error;
1730 }
1731
bb6785c1
NK
1732 dc_hardware_init(adev->dm.dc);
1733
8e794421
WL
1734 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1735 if (!adev->dm.hpd_rx_offload_wq) {
1736 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1737 goto error;
1738 }
1739
3ca001af 1740 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1741 struct dc_phy_addr_space_config pa_config;
1742
0b08c54b 1743 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1744
0b08c54b
YZ
1745 // Call the DC init_memory func
1746 dc_setup_system_context(adev->dm.dc, &pa_config);
1747 }
c0fb85ae 1748
4562236b
HW
1749 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1750 if (!adev->dm.freesync_module) {
1751 DRM_ERROR(
1752 "amdgpu: failed to initialize freesync_module.\n");
1753 } else
f1ad2f5e 1754 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1755 adev->dm.freesync_module);
1756
e277adc5
LSL
1757 amdgpu_dm_init_color_mod();
1758
ea3b4242 1759 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1760 adev->dm.vblank_control_workqueue =
1761 create_singlethread_workqueue("dm_vblank_control_workqueue");
1762 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1763 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242 1764 }
ea3b4242 1765
c08182f2 1766 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1767 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1768
96a3b32e
BL
1769 if (!adev->dm.hdcp_workqueue)
1770 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1771 else
1772 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1773
96a3b32e
BL
1774 dc_init_callbacks(adev->dm.dc, &init_params);
1775 }
11d526f1 1776 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
81927e28
JS
1777 init_completion(&adev->dm.dmub_aux_transfer_done);
1778 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1779 if (!adev->dm.dmub_notify) {
1780 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1781 goto error;
1782 }
e27c41d5
JS
1783
1784 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1785 if (!adev->dm.delayed_hpd_wq) {
1786 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1787 goto error;
1788 }
1789
81927e28 1790 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1791 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1792 dmub_aux_setconfig_callback, false)) {
1793 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1794 goto error;
1795 }
1796 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1797 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1798 goto error;
1799 }
c40a09e5
NK
1800 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1801 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1802 goto error;
1803 }
81927e28
JS
1804 }
1805
11d526f1
SW
1806 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1807 * It is expected that DMUB will resend any pending notifications at this point, for
1808 * example HPD from DPIA.
1809 */
1810 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1811 dc_enable_dmub_outbox(adev->dm.dc);
1812
1c43a48b
SW
1813 if (amdgpu_dm_initialize_drm_device(adev)) {
1814 DRM_ERROR(
1815 "amdgpu: failed to initialize sw for display support.\n");
1816 goto error;
1817 }
1818
f74367e4
AD
1819 /* create fake encoders for MST */
1820 dm_dp_create_fake_mst_encoders(adev);
1821
4562236b
HW
1822 /* TODO: Add_display_info? */
1823
1824 /* TODO use dynamic cursor width */
4a580877
LT
1825 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1826 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1827
4a580877 1828 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1829 DRM_ERROR(
1830 "amdgpu: failed to initialize sw for display support.\n");
1831 goto error;
1832 }
1833
f477c7b5
AL
1834#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1835 adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
1836 if (!adev->dm.secure_display_ctxs)
1837 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
1838#endif
c0fb85ae 1839
f1ad2f5e 1840 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1841
1842 return 0;
1843error:
1844 amdgpu_dm_fini(adev);
1845
59d0f396 1846 return -EINVAL;
4562236b
HW
1847}
1848
e9669fb7
AG
1849static int amdgpu_dm_early_fini(void *handle)
1850{
1851 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1852
1853 amdgpu_dm_audio_fini(adev);
1854
1855 return 0;
1856}
1857
7578ecda 1858static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1859{
f74367e4
AD
1860 int i;
1861
09a5df6c
NK
1862 if (adev->dm.vblank_control_workqueue) {
1863 destroy_workqueue(adev->dm.vblank_control_workqueue);
1864 adev->dm.vblank_control_workqueue = NULL;
1865 }
09a5df6c 1866
4562236b 1867 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1868
9a65df19 1869#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1b11ff76 1870 if (adev->dm.secure_display_ctxs) {
c3d74960 1871 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1b11ff76
AL
1872 if (adev->dm.secure_display_ctxs[i].crtc) {
1873 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
1874 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
1875 }
1876 }
1877 kfree(adev->dm.secure_display_ctxs);
1878 adev->dm.secure_display_ctxs = NULL;
9a65df19
WL
1879 }
1880#endif
52704fca 1881 if (adev->dm.hdcp_workqueue) {
e96b1b29 1882 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1883 adev->dm.hdcp_workqueue = NULL;
1884 }
1885
1886 if (adev->dm.dc)
1887 dc_deinit_callbacks(adev->dm.dc);
51ba6912 1888
52f1783f
IA
1889 if (adev->dm.dc)
1890 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1891
81927e28
JS
1892 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1893 kfree(adev->dm.dmub_notify);
1894 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1895 destroy_workqueue(adev->dm.delayed_hpd_wq);
1896 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1897 }
1898
743b9786
NK
1899 if (adev->dm.dmub_bo)
1900 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1901 &adev->dm.dmub_bo_gpu_addr,
1902 &adev->dm.dmub_bo_cpu_addr);
52704fca 1903
006c26a0
AG
1904 if (adev->dm.hpd_rx_offload_wq) {
1905 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1906 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1907 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1908 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1909 }
1910 }
1911
1912 kfree(adev->dm.hpd_rx_offload_wq);
1913 adev->dm.hpd_rx_offload_wq = NULL;
1914 }
1915
c8bdf2b6
ED
1916 /* DC Destroy TODO: Replace destroy DAL */
1917 if (adev->dm.dc)
1918 dc_destroy(&adev->dm.dc);
4562236b
HW
1919 /*
1920 * TODO: pageflip, vlank interrupt
1921 *
1922 * amdgpu_dm_irq_fini(adev);
1923 */
1924
1925 if (adev->dm.cgs_device) {
1926 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1927 adev->dm.cgs_device = NULL;
1928 }
1929 if (adev->dm.freesync_module) {
1930 mod_freesync_destroy(adev->dm.freesync_module);
1931 adev->dm.freesync_module = NULL;
1932 }
674e78ac 1933
6ce8f316 1934 mutex_destroy(&adev->dm.audio_lock);
674e78ac 1935 mutex_destroy(&adev->dm.dc_lock);
ead08b95 1936 mutex_destroy(&adev->dm.dpia_aux_lock);
674e78ac 1937
4562236b
HW
1938 return;
1939}
1940
a94d5569 1941static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1942{
a7669aff 1943 const char *fw_name_dmcu = NULL;
a94d5569
DF
1944 int r;
1945 const struct dmcu_firmware_header_v1_0 *hdr;
1946
1947 switch(adev->asic_type) {
55e56389
MR
1948#if defined(CONFIG_DRM_AMD_DC_SI)
1949 case CHIP_TAHITI:
1950 case CHIP_PITCAIRN:
1951 case CHIP_VERDE:
1952 case CHIP_OLAND:
1953#endif
a94d5569
DF
1954 case CHIP_BONAIRE:
1955 case CHIP_HAWAII:
1956 case CHIP_KAVERI:
1957 case CHIP_KABINI:
1958 case CHIP_MULLINS:
1959 case CHIP_TONGA:
1960 case CHIP_FIJI:
1961 case CHIP_CARRIZO:
1962 case CHIP_STONEY:
1963 case CHIP_POLARIS11:
1964 case CHIP_POLARIS10:
1965 case CHIP_POLARIS12:
1966 case CHIP_VEGAM:
1967 case CHIP_VEGA10:
1968 case CHIP_VEGA12:
1969 case CHIP_VEGA20:
1970 return 0;
5ea23931
RL
1971 case CHIP_NAVI12:
1972 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1973 break;
a94d5569 1974 case CHIP_RAVEN:
a7669aff
HW
1975 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1976 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1977 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1978 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1979 else
a7669aff 1980 return 0;
a94d5569
DF
1981 break;
1982 default:
1d789535 1983 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1984 case IP_VERSION(2, 0, 2):
1985 case IP_VERSION(2, 0, 3):
1986 case IP_VERSION(2, 0, 0):
1987 case IP_VERSION(2, 1, 0):
1988 case IP_VERSION(3, 0, 0):
1989 case IP_VERSION(3, 0, 2):
1990 case IP_VERSION(3, 0, 3):
1991 case IP_VERSION(3, 0, 1):
1992 case IP_VERSION(3, 1, 2):
1993 case IP_VERSION(3, 1, 3):
f3cd57e4 1994 case IP_VERSION(3, 1, 4):
b5b8ed44 1995 case IP_VERSION(3, 1, 5):
de7cc1b4 1996 case IP_VERSION(3, 1, 6):
577359ca
AP
1997 case IP_VERSION(3, 2, 0):
1998 case IP_VERSION(3, 2, 1):
c08182f2
AD
1999 return 0;
2000 default:
2001 break;
2002 }
a94d5569 2003 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 2004 return -EINVAL;
a94d5569
DF
2005 }
2006
2007 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2008 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2009 return 0;
2010 }
2011
46fa9075
ML
2012 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
2013 if (r == -ENODEV) {
a94d5569
DF
2014 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
2015 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2016 adev->dm.fw_dmcu = NULL;
2017 return 0;
2018 }
a94d5569
DF
2019 if (r) {
2020 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
2021 fw_name_dmcu);
51526637 2022 amdgpu_ucode_release(&adev->dm.fw_dmcu);
a94d5569
DF
2023 return r;
2024 }
2025
2026 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2027 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
2028 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2029 adev->firmware.fw_size +=
2030 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2031
2032 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2033 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2034 adev->firmware.fw_size +=
2035 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2036
ee6e89c0
DF
2037 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2038
a94d5569
DF
2039 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2040
4562236b
HW
2041 return 0;
2042}
2043
743b9786
NK
2044static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2045{
2046 struct amdgpu_device *adev = ctx;
2047
2048 return dm_read_reg(adev->dm.dc->ctx, address);
2049}
2050
2051static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2052 uint32_t value)
2053{
2054 struct amdgpu_device *adev = ctx;
2055
2056 return dm_write_reg(adev->dm.dc->ctx, address, value);
2057}
2058
2059static int dm_dmub_sw_init(struct amdgpu_device *adev)
2060{
2061 struct dmub_srv_create_params create_params;
8c7aea40
NK
2062 struct dmub_srv_region_params region_params;
2063 struct dmub_srv_region_info region_info;
2064 struct dmub_srv_fb_params fb_params;
2065 struct dmub_srv_fb_info *fb_info;
2066 struct dmub_srv *dmub_srv;
743b9786 2067 const struct dmcub_firmware_header_v1_0 *hdr;
743b9786
NK
2068 enum dmub_asic dmub_asic;
2069 enum dmub_status status;
2070 int r;
2071
1d789535 2072 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 2073 case IP_VERSION(2, 1, 0):
743b9786 2074 dmub_asic = DMUB_ASIC_DCN21;
743b9786 2075 break;
c08182f2 2076 case IP_VERSION(3, 0, 0):
35a45d63 2077 dmub_asic = DMUB_ASIC_DCN30;
79037324 2078 break;
c08182f2 2079 case IP_VERSION(3, 0, 1):
469989ca 2080 dmub_asic = DMUB_ASIC_DCN301;
469989ca 2081 break;
c08182f2 2082 case IP_VERSION(3, 0, 2):
2a411205 2083 dmub_asic = DMUB_ASIC_DCN302;
2a411205 2084 break;
c08182f2 2085 case IP_VERSION(3, 0, 3):
656fe9b6 2086 dmub_asic = DMUB_ASIC_DCN303;
656fe9b6 2087 break;
c08182f2
AD
2088 case IP_VERSION(3, 1, 2):
2089 case IP_VERSION(3, 1, 3):
3137f792 2090 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd 2091 break;
e850f6b1
RL
2092 case IP_VERSION(3, 1, 4):
2093 dmub_asic = DMUB_ASIC_DCN314;
e850f6b1 2094 break;
b5b8ed44
QZ
2095 case IP_VERSION(3, 1, 5):
2096 dmub_asic = DMUB_ASIC_DCN315;
b5b8ed44 2097 break;
de7cc1b4 2098 case IP_VERSION(3, 1, 6):
868f4357 2099 dmub_asic = DMUB_ASIC_DCN316;
de7cc1b4 2100 break;
577359ca
AP
2101 case IP_VERSION(3, 2, 0):
2102 dmub_asic = DMUB_ASIC_DCN32;
577359ca
AP
2103 break;
2104 case IP_VERSION(3, 2, 1):
2105 dmub_asic = DMUB_ASIC_DCN321;
577359ca 2106 break;
743b9786
NK
2107 default:
2108 /* ASIC doesn't support DMUB. */
2109 return 0;
2110 }
2111
743b9786 2112 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 2113 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 2114
9a6ed547
NK
2115 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2116 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2117 AMDGPU_UCODE_ID_DMCUB;
2118 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2119 adev->dm.dmub_fw;
2120 adev->firmware.fw_size +=
2121 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 2122
9a6ed547
NK
2123 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2124 adev->dm.dmcub_fw_version);
2125 }
2126
743b9786 2127
8c7aea40
NK
2128 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2129 dmub_srv = adev->dm.dmub_srv;
2130
2131 if (!dmub_srv) {
2132 DRM_ERROR("Failed to allocate DMUB service!\n");
2133 return -ENOMEM;
2134 }
2135
2136 memset(&create_params, 0, sizeof(create_params));
2137 create_params.user_ctx = adev;
2138 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2139 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2140 create_params.asic = dmub_asic;
2141
2142 /* Create the DMUB service. */
2143 status = dmub_srv_create(dmub_srv, &create_params);
2144 if (status != DMUB_STATUS_OK) {
2145 DRM_ERROR("Error creating DMUB service: %d\n", status);
2146 return -EINVAL;
2147 }
2148
2149 /* Calculate the size of all the regions for the DMUB service. */
2150 memset(&region_params, 0, sizeof(region_params));
2151
2152 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2153 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2154 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2155 region_params.vbios_size = adev->bios_size;
0922b899 2156 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
2157 adev->dm.dmub_fw->data +
2158 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 2159 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
2160 region_params.fw_inst_const =
2161 adev->dm.dmub_fw->data +
2162 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2163 PSP_HEADER_BYTES;
8c7aea40
NK
2164
2165 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2166 &region_info);
2167
2168 if (status != DMUB_STATUS_OK) {
2169 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2170 return -EINVAL;
2171 }
2172
2173 /*
2174 * Allocate a framebuffer based on the total size of all the regions.
2175 * TODO: Move this into GART.
2176 */
2177 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
58ab2c08
CK
2178 AMDGPU_GEM_DOMAIN_VRAM |
2179 AMDGPU_GEM_DOMAIN_GTT,
2180 &adev->dm.dmub_bo,
8c7aea40
NK
2181 &adev->dm.dmub_bo_gpu_addr,
2182 &adev->dm.dmub_bo_cpu_addr);
2183 if (r)
2184 return r;
2185
2186 /* Rebase the regions on the framebuffer address. */
2187 memset(&fb_params, 0, sizeof(fb_params));
2188 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2189 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2190 fb_params.region_info = &region_info;
2191
2192 adev->dm.dmub_fb_info =
2193 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2194 fb_info = adev->dm.dmub_fb_info;
2195
2196 if (!fb_info) {
2197 DRM_ERROR(
2198 "Failed to allocate framebuffer info for DMUB service!\n");
2199 return -ENOMEM;
2200 }
2201
2202 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2203 if (status != DMUB_STATUS_OK) {
2204 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2205 return -EINVAL;
2206 }
2207
743b9786
NK
2208 return 0;
2209}
2210
a94d5569
DF
2211static int dm_sw_init(void *handle)
2212{
2213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2214 int r;
2215
2216 r = dm_dmub_sw_init(adev);
2217 if (r)
2218 return r;
a94d5569
DF
2219
2220 return load_dmcu_fw(adev);
2221}
2222
4562236b
HW
2223static int dm_sw_fini(void *handle)
2224{
a94d5569
DF
2225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2226
8c7aea40
NK
2227 kfree(adev->dm.dmub_fb_info);
2228 adev->dm.dmub_fb_info = NULL;
2229
743b9786
NK
2230 if (adev->dm.dmub_srv) {
2231 dmub_srv_destroy(adev->dm.dmub_srv);
2232 adev->dm.dmub_srv = NULL;
2233 }
2234
51526637
ML
2235 amdgpu_ucode_release(&adev->dm.dmub_fw);
2236 amdgpu_ucode_release(&adev->dm.fw_dmcu);
a94d5569 2237
4562236b
HW
2238 return 0;
2239}
2240
7abcf6b5 2241static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2242{
c84dec2f 2243 struct amdgpu_dm_connector *aconnector;
4562236b 2244 struct drm_connector *connector;
f8d2d39e 2245 struct drm_connector_list_iter iter;
7abcf6b5 2246 int ret = 0;
4562236b 2247
f8d2d39e
LP
2248 drm_connector_list_iter_begin(dev, &iter);
2249 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2250 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2251 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2252 aconnector->mst_mgr.aux) {
f1ad2f5e 2253 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2254 aconnector,
2255 aconnector->base.base.id);
7abcf6b5
AG
2256
2257 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2258 if (ret < 0) {
2259 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2260 aconnector->dc_link->type =
2261 dc_connection_single;
3f6752b4
RL
2262 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2263 aconnector->dc_link);
f8d2d39e 2264 break;
7abcf6b5 2265 }
f8d2d39e 2266 }
4562236b 2267 }
f8d2d39e 2268 drm_connector_list_iter_end(&iter);
4562236b 2269
7abcf6b5
AG
2270 return ret;
2271}
2272
2273static int dm_late_init(void *handle)
2274{
42e67c3b 2275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2276
bbf854dc
DF
2277 struct dmcu_iram_parameters params;
2278 unsigned int linear_lut[16];
2279 int i;
17bdb4a8 2280 struct dmcu *dmcu = NULL;
bbf854dc 2281
17bdb4a8
JFZ
2282 dmcu = adev->dm.dc->res_pool->dmcu;
2283
bbf854dc
DF
2284 for (i = 0; i < 16; i++)
2285 linear_lut[i] = 0xFFFF * i / 15;
2286
2287 params.set = 0;
75068994 2288 params.backlight_ramping_override = false;
bbf854dc
DF
2289 params.backlight_ramping_start = 0xCCCC;
2290 params.backlight_ramping_reduction = 0xCCCCCCCC;
2291 params.backlight_lut_array_size = 16;
2292 params.backlight_lut_array = linear_lut;
2293
2ad0cdf9
AK
2294 /* Min backlight level after ABM reduction, Don't allow below 1%
2295 * 0xFFFF x 0.01 = 0x28F
2296 */
2297 params.min_abm_backlight = 0x28F;
5cb32419 2298 /* In the case where abm is implemented on dmcub,
3335a135
UKK
2299 * dmcu object will be null.
2300 * ABM 2.4 and up are implemented on dmcub.
2301 */
6e568e43
JW
2302 if (dmcu) {
2303 if (!dmcu_load_iram(dmcu, params))
2304 return -EINVAL;
2305 } else if (adev->dm.dc->ctx->dmub_srv) {
2306 struct dc_link *edp_links[MAX_NUM_EDP];
2307 int edp_num;
bbf854dc 2308
7ae1dbe6 2309 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
6e568e43
JW
2310 for (i = 0; i < edp_num; i++) {
2311 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2312 return -EINVAL;
2313 }
2314 }
bbf854dc 2315
4a580877 2316 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2317}
2318
2319static void s3_handle_mst(struct drm_device *dev, bool suspend)
2320{
c84dec2f 2321 struct amdgpu_dm_connector *aconnector;
4562236b 2322 struct drm_connector *connector;
f8d2d39e 2323 struct drm_connector_list_iter iter;
fe7553be
LP
2324 struct drm_dp_mst_topology_mgr *mgr;
2325 int ret;
2326 bool need_hotplug = false;
4562236b 2327
f8d2d39e
LP
2328 drm_connector_list_iter_begin(dev, &iter);
2329 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2330 aconnector = to_amdgpu_dm_connector(connector);
2331 if (aconnector->dc_link->type != dc_connection_mst_branch ||
f0127cb1 2332 aconnector->mst_root)
fe7553be
LP
2333 continue;
2334
2335 mgr = &aconnector->mst_mgr;
2336
2337 if (suspend) {
2338 drm_dp_mst_topology_mgr_suspend(mgr);
2339 } else {
1e5d4d8e
RL
2340 /* if extended timeout is supported in hardware,
2341 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2342 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2343 */
2344 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2345 if (!dp_is_lttpr_present(aconnector->dc_link))
2346 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2347
6f85f738 2348 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be 2349 if (ret < 0) {
84a8b390
WL
2350 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2351 aconnector->dc_link);
fe7553be
LP
2352 need_hotplug = true;
2353 }
2354 }
4562236b 2355 }
f8d2d39e 2356 drm_connector_list_iter_end(&iter);
fe7553be
LP
2357
2358 if (need_hotplug)
2359 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2360}
2361
9340dfd3
HW
2362static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2363{
9340dfd3
HW
2364 int ret = 0;
2365
9340dfd3
HW
2366 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2367 * on window driver dc implementation.
2368 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2369 * should be passed to smu during boot up and resume from s3.
2370 * boot up: dc calculate dcn watermark clock settings within dc_create,
2371 * dcn20_resource_construct
2372 * then call pplib functions below to pass the settings to smu:
2373 * smu_set_watermarks_for_clock_ranges
2374 * smu_set_watermarks_table
2375 * navi10_set_watermarks_table
2376 * smu_write_watermarks_table
2377 *
2378 * For Renoir, clock settings of dcn watermark are also fixed values.
2379 * dc has implemented different flow for window driver:
2380 * dc_hardware_init / dc_set_power_state
2381 * dcn10_init_hw
2382 * notify_wm_ranges
2383 * set_wm_ranges
2384 * -- Linux
2385 * smu_set_watermarks_for_clock_ranges
2386 * renoir_set_watermarks_table
2387 * smu_write_watermarks_table
2388 *
2389 * For Linux,
2390 * dc_hardware_init -> amdgpu_dm_init
2391 * dc_set_power_state --> dm_resume
2392 *
2393 * therefore, this function apply to navi10/12/14 but not Renoir
2394 * *
2395 */
1d789535 2396 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2397 case IP_VERSION(2, 0, 2):
2398 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2399 break;
2400 default:
2401 return 0;
2402 }
2403
13f5dbd6 2404 ret = amdgpu_dpm_write_watermarks_table(adev);
e7a95eea
EQ
2405 if (ret) {
2406 DRM_ERROR("Failed to update WMTABLE!\n");
2407 return ret;
9340dfd3
HW
2408 }
2409
9340dfd3
HW
2410 return 0;
2411}
2412
b8592b48
LL
2413/**
2414 * dm_hw_init() - Initialize DC device
28d687ea 2415 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2416 *
2417 * Initialize the &struct amdgpu_display_manager device. This involves calling
2418 * the initializers of each DM component, then populating the struct with them.
2419 *
2420 * Although the function implies hardware initialization, both hardware and
2421 * software are initialized here. Splitting them out to their relevant init
2422 * hooks is a future TODO item.
2423 *
2424 * Some notable things that are initialized here:
2425 *
2426 * - Display Core, both software and hardware
2427 * - DC modules that we need (freesync and color management)
2428 * - DRM software states
2429 * - Interrupt sources and handlers
2430 * - Vblank support
2431 * - Debug FS entries, if enabled
2432 */
4562236b
HW
2433static int dm_hw_init(void *handle)
2434{
2435 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2436 /* Create DAL display manager */
2437 amdgpu_dm_init(adev);
4562236b
HW
2438 amdgpu_dm_hpd_init(adev);
2439
4562236b
HW
2440 return 0;
2441}
2442
b8592b48
LL
2443/**
2444 * dm_hw_fini() - Teardown DC device
28d687ea 2445 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2446 *
2447 * Teardown components within &struct amdgpu_display_manager that require
2448 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2449 * were loaded. Also flush IRQ workqueues and disable them.
2450 */
4562236b
HW
2451static int dm_hw_fini(void *handle)
2452{
2453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2454
2455 amdgpu_dm_hpd_fini(adev);
2456
2457 amdgpu_dm_irq_fini(adev);
21de3396 2458 amdgpu_dm_fini(adev);
4562236b
HW
2459 return 0;
2460}
2461
cdaae837 2462
cdaae837
BL
2463static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2464 struct dc_state *state, bool enable)
2465{
2466 enum dc_irq_source irq_source;
2467 struct amdgpu_crtc *acrtc;
2468 int rc = -EBUSY;
2469 int i = 0;
2470
2471 for (i = 0; i < state->stream_count; i++) {
2472 acrtc = get_crtc_by_otg_inst(
2473 adev, state->stream_status[i].primary_otg_inst);
2474
2475 if (acrtc && state->stream_status[i].plane_count != 0) {
2476 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2477 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2478 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2479 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2480 if (rc)
2481 DRM_WARN("Failed to %s pflip interrupts\n",
2482 enable ? "enable" : "disable");
2483
2484 if (enable) {
6c5e25a0 2485 rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
cdaae837
BL
2486 if (rc)
2487 DRM_WARN("Failed to enable vblank interrupts\n");
2488 } else {
6c5e25a0 2489 amdgpu_dm_crtc_disable_vblank(&acrtc->base);
cdaae837
BL
2490 }
2491
2492 }
2493 }
2494
2495}
2496
dfd84d90 2497static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2498{
2499 struct dc_state *context = NULL;
2500 enum dc_status res = DC_ERROR_UNEXPECTED;
2501 int i;
2502 struct dc_stream_state *del_streams[MAX_PIPES];
2503 int del_streams_count = 0;
2504
2505 memset(del_streams, 0, sizeof(del_streams));
2506
2507 context = dc_create_state(dc);
2508 if (context == NULL)
2509 goto context_alloc_fail;
2510
2511 dc_resource_state_copy_construct_current(dc, context);
2512
2513 /* First remove from context all streams */
2514 for (i = 0; i < context->stream_count; i++) {
2515 struct dc_stream_state *stream = context->streams[i];
2516
2517 del_streams[del_streams_count++] = stream;
2518 }
2519
2520 /* Remove all planes for removed streams and then remove the streams */
2521 for (i = 0; i < del_streams_count; i++) {
2522 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2523 res = DC_FAIL_DETACH_SURFACES;
2524 goto fail;
2525 }
2526
2527 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2528 if (res != DC_OK)
2529 goto fail;
2530 }
2531
b8272241 2532 res = dc_commit_streams(dc, context->streams, context->stream_count);
cdaae837
BL
2533
2534fail:
2535 dc_release_state(context);
2536
2537context_alloc_fail:
2538 return res;
2539}
2540
8e794421
WL
2541static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2542{
2543 int i;
2544
2545 if (dm->hpd_rx_offload_wq) {
2546 for (i = 0; i < dm->dc->caps.max_links; i++)
2547 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2548 }
2549}
2550
4562236b
HW
2551static int dm_suspend(void *handle)
2552{
2553 struct amdgpu_device *adev = handle;
2554 struct amdgpu_display_manager *dm = &adev->dm;
2555 int ret = 0;
4562236b 2556
53b3f8f4 2557 if (amdgpu_in_reset(adev)) {
cdaae837 2558 mutex_lock(&dm->dc_lock);
98ab5f35 2559
98ab5f35 2560 dc_allow_idle_optimizations(adev->dm.dc, false);
98ab5f35 2561
cdaae837
BL
2562 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2563
2564 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2565
2566 amdgpu_dm_commit_zero_streams(dm->dc);
2567
2568 amdgpu_dm_irq_suspend(adev);
2569
8e794421
WL
2570 hpd_rx_irq_work_suspend(dm);
2571
cdaae837
BL
2572 return ret;
2573 }
4562236b 2574
d2f0b53b 2575 WARN_ON(adev->dm.cached_state);
4a580877 2576 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2577
4a580877 2578 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2579
4562236b
HW
2580 amdgpu_dm_irq_suspend(adev);
2581
8e794421
WL
2582 hpd_rx_irq_work_suspend(dm);
2583
32f5062d 2584 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2585
1c2075d4 2586 return 0;
4562236b
HW
2587}
2588
17ce8a69 2589struct amdgpu_dm_connector *
1daf8c63
AD
2590amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2591 struct drm_crtc *crtc)
4562236b 2592{
ae67558b 2593 u32 i;
c2cea706 2594 struct drm_connector_state *new_con_state;
4562236b
HW
2595 struct drm_connector *connector;
2596 struct drm_crtc *crtc_from_state;
2597
c2cea706
LSL
2598 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2599 crtc_from_state = new_con_state->crtc;
4562236b
HW
2600
2601 if (crtc_from_state == crtc)
c84dec2f 2602 return to_amdgpu_dm_connector(connector);
4562236b
HW
2603 }
2604
2605 return NULL;
2606}
2607
fbbdadf2
BL
2608static void emulated_link_detect(struct dc_link *link)
2609{
2610 struct dc_sink_init_data sink_init_data = { 0 };
2611 struct display_sink_capability sink_caps = { 0 };
2612 enum dc_edid_status edid_status;
2613 struct dc_context *dc_ctx = link->ctx;
2614 struct dc_sink *sink = NULL;
2615 struct dc_sink *prev_sink = NULL;
2616
2617 link->type = dc_connection_none;
2618 prev_sink = link->local_sink;
2619
30164a16
VL
2620 if (prev_sink)
2621 dc_sink_release(prev_sink);
fbbdadf2
BL
2622
2623 switch (link->connector_signal) {
2624 case SIGNAL_TYPE_HDMI_TYPE_A: {
2625 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2626 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2627 break;
2628 }
2629
2630 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2631 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2632 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2633 break;
2634 }
2635
2636 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2637 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2638 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2639 break;
2640 }
2641
2642 case SIGNAL_TYPE_LVDS: {
2643 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2644 sink_caps.signal = SIGNAL_TYPE_LVDS;
2645 break;
2646 }
2647
2648 case SIGNAL_TYPE_EDP: {
2649 sink_caps.transaction_type =
2650 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2651 sink_caps.signal = SIGNAL_TYPE_EDP;
2652 break;
2653 }
2654
2655 case SIGNAL_TYPE_DISPLAY_PORT: {
2656 sink_caps.transaction_type =
2657 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2658 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2659 break;
2660 }
2661
2662 default:
2663 DC_ERROR("Invalid connector type! signal:%d\n",
2664 link->connector_signal);
2665 return;
2666 }
2667
2668 sink_init_data.link = link;
2669 sink_init_data.sink_signal = sink_caps.signal;
2670
2671 sink = dc_sink_create(&sink_init_data);
2672 if (!sink) {
2673 DC_ERROR("Failed to create sink!\n");
2674 return;
2675 }
2676
dcd5fb82 2677 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2678 link->local_sink = sink;
2679
2680 edid_status = dm_helpers_read_local_edid(
2681 link->ctx,
2682 link,
2683 sink);
2684
2685 if (edid_status != EDID_OK)
2686 DC_ERROR("Failed to read EDID");
2687
2688}
2689
cdaae837
BL
2690static void dm_gpureset_commit_state(struct dc_state *dc_state,
2691 struct amdgpu_display_manager *dm)
2692{
2693 struct {
2694 struct dc_surface_update surface_updates[MAX_SURFACES];
2695 struct dc_plane_info plane_infos[MAX_SURFACES];
2696 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2697 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2698 struct dc_stream_update stream_update;
2699 } * bundle;
2700 int k, m;
2701
2702 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2703
2704 if (!bundle) {
2705 dm_error("Failed to allocate update bundle\n");
2706 goto cleanup;
2707 }
2708
2709 for (k = 0; k < dc_state->stream_count; k++) {
2710 bundle->stream_update.stream = dc_state->streams[k];
2711
2712 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2713 bundle->surface_updates[m].surface =
2714 dc_state->stream_status->plane_states[m];
2715 bundle->surface_updates[m].surface->force_full_update =
2716 true;
2717 }
f7511289 2718
81f743a0
RS
2719 update_planes_and_stream_adapter(dm->dc,
2720 UPDATE_TYPE_FULL,
2721 dc_state->stream_status->plane_count,
2722 dc_state->streams[k],
2723 &bundle->stream_update,
2724 bundle->surface_updates);
cdaae837
BL
2725 }
2726
2727cleanup:
2728 kfree(bundle);
2729
2730 return;
2731}
2732
4562236b
HW
2733static int dm_resume(void *handle)
2734{
2735 struct amdgpu_device *adev = handle;
4a580877 2736 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2737 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2738 struct amdgpu_dm_connector *aconnector;
4562236b 2739 struct drm_connector *connector;
f8d2d39e 2740 struct drm_connector_list_iter iter;
4562236b 2741 struct drm_crtc *crtc;
c2cea706 2742 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2743 struct dm_crtc_state *dm_new_crtc_state;
2744 struct drm_plane *plane;
2745 struct drm_plane_state *new_plane_state;
2746 struct dm_plane_state *dm_new_plane_state;
113b7a01 2747 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2748 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2749 struct dc_state *dc_state;
2750 int i, r, j;
4562236b 2751
53b3f8f4 2752 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2753 dc_state = dm->cached_dc_state;
2754
6d63fcc2
NK
2755 /*
2756 * The dc->current_state is backed up into dm->cached_dc_state
2757 * before we commit 0 streams.
2758 *
2759 * DC will clear link encoder assignments on the real state
2760 * but the changes won't propagate over to the copy we made
2761 * before the 0 streams commit.
2762 *
2763 * DC expects that link encoder assignments are *not* valid
32685b32
NK
2764 * when committing a state, so as a workaround we can copy
2765 * off of the current state.
2766 *
2767 * We lose the previous assignments, but we had already
2768 * commit 0 streams anyway.
6d63fcc2 2769 */
32685b32 2770 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
6d63fcc2 2771
cdaae837
BL
2772 r = dm_dmub_hw_init(adev);
2773 if (r)
2774 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2775
2776 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2777 dc_resume(dm->dc);
2778
2779 amdgpu_dm_irq_resume_early(adev);
2780
2781 for (i = 0; i < dc_state->stream_count; i++) {
2782 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2783 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2784 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2785 = 0xffffffff;
2786 }
2787 }
2788
11d526f1
SW
2789 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2790 amdgpu_dm_outbox_init(adev);
2791 dc_enable_dmub_outbox(adev->dm.dc);
2792 }
2793
b8272241 2794 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
4562236b 2795
cdaae837
BL
2796 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2797
2798 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2799
2800 dc_release_state(dm->cached_dc_state);
2801 dm->cached_dc_state = NULL;
2802
2803 amdgpu_dm_irq_resume_late(adev);
2804
2805 mutex_unlock(&dm->dc_lock);
2806
2807 return 0;
2808 }
113b7a01
LL
2809 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2810 dc_release_state(dm_state->context);
2811 dm_state->context = dc_create_state(dm->dc);
2812 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2813 dc_resource_state_construct(dm->dc, dm_state->context);
2814
8c7aea40 2815 /* Before powering on DC we need to re-initialize DMUB. */
79d6b935 2816 dm_dmub_hw_resume(adev);
8c7aea40 2817
11d526f1
SW
2818 /* Re-enable outbox interrupts for DPIA. */
2819 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2820 amdgpu_dm_outbox_init(adev);
2821 dc_enable_dmub_outbox(adev->dm.dc);
2822 }
2823
a80aa93d
ML
2824 /* power on hardware */
2825 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2826
4562236b
HW
2827 /* program HPD filter */
2828 dc_resume(dm->dc);
2829
4562236b
HW
2830 /*
2831 * early enable HPD Rx IRQ, should be done before set mode as short
2832 * pulse interrupts are used for MST
2833 */
2834 amdgpu_dm_irq_resume_early(adev);
2835
d20ebea8 2836 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2837 s3_handle_mst(ddev, false);
2838
4562236b 2839 /* Do detection*/
f8d2d39e
LP
2840 drm_connector_list_iter_begin(ddev, &iter);
2841 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2842 aconnector = to_amdgpu_dm_connector(connector);
4562236b 2843
7a7175a2
RL
2844 if (!aconnector->dc_link)
2845 continue;
2846
4562236b
HW
2847 /*
2848 * this is the case when traversing through already created
2849 * MST connectors, should be skipped
2850 */
7a7175a2 2851 if (aconnector->dc_link->type == dc_connection_mst_branch)
4562236b
HW
2852 continue;
2853
03ea364c 2854 mutex_lock(&aconnector->hpd_lock);
54618888 2855 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
fbbdadf2
BL
2856 DRM_ERROR("KMS: Failed to detect connector\n");
2857
15c735e7 2858 if (aconnector->base.force && new_connection_type == dc_connection_none) {
fbbdadf2 2859 emulated_link_detect(aconnector->dc_link);
15c735e7
WL
2860 } else {
2861 mutex_lock(&dm->dc_lock);
fbbdadf2 2862 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
15c735e7
WL
2863 mutex_unlock(&dm->dc_lock);
2864 }
3eb4eba4
RL
2865
2866 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2867 aconnector->fake_enable = false;
2868
dcd5fb82
MF
2869 if (aconnector->dc_sink)
2870 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2871 aconnector->dc_sink = NULL;
2872 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2873 mutex_unlock(&aconnector->hpd_lock);
4562236b 2874 }
f8d2d39e 2875 drm_connector_list_iter_end(&iter);
4562236b 2876
1f6010a9 2877 /* Force mode set in atomic commit */
a80aa93d 2878 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2879 new_crtc_state->active_changed = true;
4f346e65 2880
fcb4019e
LSL
2881 /*
2882 * atomic_check is expected to create the dc states. We need to release
2883 * them here, since they were duplicated as part of the suspend
2884 * procedure.
2885 */
a80aa93d 2886 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2887 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2888 if (dm_new_crtc_state->stream) {
2889 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2890 dc_stream_release(dm_new_crtc_state->stream);
2891 dm_new_crtc_state->stream = NULL;
2892 }
2893 }
2894
a80aa93d 2895 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2896 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2897 if (dm_new_plane_state->dc_state) {
2898 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2899 dc_plane_state_release(dm_new_plane_state->dc_state);
2900 dm_new_plane_state->dc_state = NULL;
2901 }
2902 }
2903
2d1af6a1 2904 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2905
a80aa93d 2906 dm->cached_state = NULL;
0a214e2f 2907
9faa4237 2908 amdgpu_dm_irq_resume_late(adev);
4562236b 2909
9340dfd3
HW
2910 amdgpu_dm_smu_write_watermarks_table(adev);
2911
2d1af6a1 2912 return 0;
4562236b
HW
2913}
2914
b8592b48
LL
2915/**
2916 * DOC: DM Lifecycle
2917 *
2918 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2919 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2920 * the base driver's device list to be initialized and torn down accordingly.
2921 *
2922 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2923 */
2924
4562236b
HW
2925static const struct amd_ip_funcs amdgpu_dm_funcs = {
2926 .name = "dm",
2927 .early_init = dm_early_init,
7abcf6b5 2928 .late_init = dm_late_init,
4562236b
HW
2929 .sw_init = dm_sw_init,
2930 .sw_fini = dm_sw_fini,
e9669fb7 2931 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2932 .hw_init = dm_hw_init,
2933 .hw_fini = dm_hw_fini,
2934 .suspend = dm_suspend,
2935 .resume = dm_resume,
2936 .is_idle = dm_is_idle,
2937 .wait_for_idle = dm_wait_for_idle,
2938 .check_soft_reset = dm_check_soft_reset,
2939 .soft_reset = dm_soft_reset,
2940 .set_clockgating_state = dm_set_clockgating_state,
2941 .set_powergating_state = dm_set_powergating_state,
2942};
2943
2944const struct amdgpu_ip_block_version dm_ip_block =
2945{
2946 .type = AMD_IP_BLOCK_TYPE_DCE,
2947 .major = 1,
2948 .minor = 0,
2949 .rev = 0,
2950 .funcs = &amdgpu_dm_funcs,
2951};
2952
ca3268c4 2953
b8592b48
LL
2954/**
2955 * DOC: atomic
2956 *
2957 * *WIP*
2958 */
0a323b84 2959
b3663f70 2960static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2961 .fb_create = amdgpu_display_user_framebuffer_create,
8bf0d9cd 2962 .get_format_info = amdgpu_dm_plane_get_format_info,
4562236b 2963 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2964 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2965};
2966
2967static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
a5c2c0d1
LP
2968 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
2969 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
4562236b
HW
2970};
2971
94562810
RS
2972static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2973{
94562810 2974 struct amdgpu_dm_backlight_caps *caps;
94562810
RS
2975 struct drm_connector *conn_base;
2976 struct amdgpu_device *adev;
a61bb342 2977 struct drm_luminance_range_info *luminance_range;
94562810 2978
f196198c
HG
2979 if (aconnector->bl_idx == -1 ||
2980 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
ec11fe37 2981 return;
2982
94562810 2983 conn_base = &aconnector->base;
1348969a 2984 adev = drm_to_adev(conn_base->dev);
f196198c
HG
2985
2986 caps = &adev->dm.backlight_caps[aconnector->bl_idx];
94562810
RS
2987 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2988 caps->aux_support = false;
94562810 2989
d0ae0b64 2990 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2991 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2992 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2993 caps->aux_support = true;
2994
7a46f05e
TI
2995 if (amdgpu_backlight == 0)
2996 caps->aux_support = false;
2997 else if (amdgpu_backlight == 1)
2998 caps->aux_support = true;
2999
a61bb342 3000 luminance_range = &conn_base->display_info.luminance_range;
932698c8
SP
3001
3002 if (luminance_range->max_luminance) {
3003 caps->aux_min_input_signal = luminance_range->min_luminance;
3004 caps->aux_max_input_signal = luminance_range->max_luminance;
3005 } else {
3006 caps->aux_min_input_signal = 0;
3007 caps->aux_max_input_signal = 512;
3008 }
94562810
RS
3009}
3010
97e51c16
HW
3011void amdgpu_dm_update_connector_after_detect(
3012 struct amdgpu_dm_connector *aconnector)
4562236b
HW
3013{
3014 struct drm_connector *connector = &aconnector->base;
3015 struct drm_device *dev = connector->dev;
b73a22d3 3016 struct dc_sink *sink;
4562236b
HW
3017
3018 /* MST handled by drm_mst framework */
3019 if (aconnector->mst_mgr.mst_state == true)
3020 return;
3021
4562236b 3022 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
3023 if (sink)
3024 dc_sink_retain(sink);
4562236b 3025
1f6010a9
DF
3026 /*
3027 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 3028 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 3029 * Skip if already done during boot.
4562236b
HW
3030 */
3031 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3032 && aconnector->dc_em_sink) {
3033
1f6010a9
DF
3034 /*
3035 * For S3 resume with headless use eml_sink to fake stream
3036 * because on resume connector->sink is set to NULL
4562236b
HW
3037 */
3038 mutex_lock(&dev->mode_config.mutex);
3039
3040 if (sink) {
922aa1e1 3041 if (aconnector->dc_sink) {
98e6436d 3042 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
3043 /*
3044 * retain and release below are used to
3045 * bump up refcount for sink because the link doesn't point
3046 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
3047 * reshuffle by UMD we will get into unwanted dc_sink release
3048 */
dcd5fb82 3049 dc_sink_release(aconnector->dc_sink);
922aa1e1 3050 }
4562236b 3051 aconnector->dc_sink = sink;
dcd5fb82 3052 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
3053 amdgpu_dm_update_freesync_caps(connector,
3054 aconnector->edid);
4562236b 3055 } else {
98e6436d 3056 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 3057 if (!aconnector->dc_sink) {
4562236b 3058 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 3059 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 3060 }
4562236b
HW
3061 }
3062
3063 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
3064
3065 if (sink)
3066 dc_sink_release(sink);
4562236b
HW
3067 return;
3068 }
3069
3070 /*
3071 * TODO: temporary guard to look for proper fix
3072 * if this sink is MST sink, we should not do anything
3073 */
dcd5fb82
MF
3074 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3075 dc_sink_release(sink);
4562236b 3076 return;
dcd5fb82 3077 }
4562236b
HW
3078
3079 if (aconnector->dc_sink == sink) {
1f6010a9
DF
3080 /*
3081 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3082 * Do nothing!!
3083 */
f1ad2f5e 3084 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 3085 aconnector->connector_id);
dcd5fb82
MF
3086 if (sink)
3087 dc_sink_release(sink);
4562236b
HW
3088 return;
3089 }
3090
f1ad2f5e 3091 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
3092 aconnector->connector_id, aconnector->dc_sink, sink);
3093
3094 mutex_lock(&dev->mode_config.mutex);
3095
1f6010a9
DF
3096 /*
3097 * 1. Update status of the drm connector
3098 * 2. Send an event and let userspace tell us what to do
3099 */
4562236b 3100 if (sink) {
1f6010a9
DF
3101 /*
3102 * TODO: check if we still need the S3 mode update workaround.
3103 * If yes, put it here.
3104 */
c64b0d6b 3105 if (aconnector->dc_sink) {
98e6436d 3106 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
3107 dc_sink_release(aconnector->dc_sink);
3108 }
4562236b
HW
3109
3110 aconnector->dc_sink = sink;
dcd5fb82 3111 dc_sink_retain(aconnector->dc_sink);
900b3cb1 3112 if (sink->dc_edid.length == 0) {
4562236b 3113 aconnector->edid = NULL;
e6142dd5
AP
3114 if (aconnector->dc_link->aux_mode) {
3115 drm_dp_cec_unset_edid(
3116 &aconnector->dm_dp_aux.aux);
3117 }
900b3cb1 3118 } else {
4562236b 3119 aconnector->edid =
e6142dd5 3120 (struct edid *)sink->dc_edid.raw_edid;
4562236b 3121
e6142dd5
AP
3122 if (aconnector->dc_link->aux_mode)
3123 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3124 aconnector->edid);
4562236b 3125 }
e6142dd5 3126
c0162a05
HW
3127 if (!aconnector->timing_requested) {
3128 aconnector->timing_requested =
3129 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3130 if (!aconnector->timing_requested)
3131 dm_error("failed to create aconnector->requested_timing\n");
3132 }
028c4ccf 3133
20543be9 3134 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 3135 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 3136 update_connector_ext_caps(aconnector);
4562236b 3137 } else {
e86e8947 3138 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 3139 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 3140 drm_connector_update_edid_property(connector, NULL);
4562236b 3141 aconnector->num_modes = 0;
dcd5fb82 3142 dc_sink_release(aconnector->dc_sink);
4562236b 3143 aconnector->dc_sink = NULL;
5326c452 3144 aconnector->edid = NULL;
028c4ccf
QZ
3145 kfree(aconnector->timing_requested);
3146 aconnector->timing_requested = NULL;
0c8620d6
BL
3147 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3148 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3149 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
4562236b
HW
3150 }
3151
3152 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 3153
0f877894
OV
3154 update_subconnector_property(aconnector);
3155
dcd5fb82
MF
3156 if (sink)
3157 dc_sink_release(sink);
4562236b
HW
3158}
3159
e27c41d5 3160static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 3161{
4562236b
HW
3162 struct drm_connector *connector = &aconnector->base;
3163 struct drm_device *dev = connector->dev;
fbbdadf2 3164 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 3165 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 3166 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
15c735e7 3167 bool ret = false;
4562236b 3168
b972b4f9
HW
3169 if (adev->dm.disable_hpd_irq)
3170 return;
3171
1f6010a9
DF
3172 /*
3173 * In case of failure or MST no need to update connector status or notify the OS
3174 * since (for MST case) MST does this in its own context.
4562236b
HW
3175 */
3176 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3177
97f6c917 3178 if (adev->dm.hdcp_workqueue) {
96a3b32e 3179 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3180 dm_con_state->update_hdcp = true;
3181 }
2e0ac3d6
HW
3182 if (aconnector->fake_enable)
3183 aconnector->fake_enable = false;
3184
028c4ccf
QZ
3185 aconnector->timing_changed = false;
3186
54618888 3187 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
fbbdadf2
BL
3188 DRM_ERROR("KMS: Failed to detect connector\n");
3189
3190 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3191 emulated_link_detect(aconnector->dc_link);
3192
fbbdadf2
BL
3193 drm_modeset_lock_all(dev);
3194 dm_restore_drm_connector_state(dev, connector);
3195 drm_modeset_unlock_all(dev);
3196
3197 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3198 drm_kms_helper_connector_hotplug_event(connector);
15c735e7
WL
3199 } else {
3200 mutex_lock(&adev->dm.dc_lock);
3201 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3202 mutex_unlock(&adev->dm.dc_lock);
3203 if (ret) {
3204 amdgpu_dm_update_connector_after_detect(aconnector);
fbbdadf2 3205
15c735e7
WL
3206 drm_modeset_lock_all(dev);
3207 dm_restore_drm_connector_state(dev, connector);
3208 drm_modeset_unlock_all(dev);
4562236b 3209
15c735e7
WL
3210 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3211 drm_kms_helper_connector_hotplug_event(connector);
3212 }
4562236b
HW
3213 }
3214 mutex_unlock(&aconnector->hpd_lock);
3215
3216}
3217
e27c41d5
JS
3218static void handle_hpd_irq(void *param)
3219{
3220 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3221
3222 handle_hpd_irq_helper(aconnector);
3223
3224}
3225
8e794421 3226static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b 3227{
ae67558b
SS
3228 u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3229 u8 dret;
4562236b
HW
3230 bool new_irq_handled = false;
3231 int dpcd_addr;
3232 int dpcd_bytes_to_read;
3233
3234 const int max_process_count = 30;
3235 int process_count = 0;
3236
3237 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3238
3239 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3240 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3241 /* DPCD 0x200 - 0x201 for downstream IRQ */
3242 dpcd_addr = DP_SINK_COUNT;
3243 } else {
3244 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3245 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3246 dpcd_addr = DP_SINK_COUNT_ESI;
3247 }
3248
3249 dret = drm_dp_dpcd_read(
3250 &aconnector->dm_dp_aux.aux,
3251 dpcd_addr,
3252 esi,
3253 dpcd_bytes_to_read);
3254
3255 while (dret == dpcd_bytes_to_read &&
3256 process_count < max_process_count) {
ae67558b 3257 u8 retry;
4562236b
HW
3258 dret = 0;
3259
3260 process_count++;
3261
f1ad2f5e 3262 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3263 /* handle HPD short pulse irq */
3264 if (aconnector->mst_mgr.mst_state)
3265 drm_dp_mst_hpd_irq(
3266 &aconnector->mst_mgr,
3267 esi,
3268 &new_irq_handled);
4562236b
HW
3269
3270 if (new_irq_handled) {
3271 /* ACK at DPCD to notify down stream */
3272 const int ack_dpcd_bytes_to_write =
3273 dpcd_bytes_to_read - 1;
3274
3275 for (retry = 0; retry < 3; retry++) {
ae67558b 3276 u8 wret;
4562236b
HW
3277
3278 wret = drm_dp_dpcd_write(
3279 &aconnector->dm_dp_aux.aux,
3280 dpcd_addr + 1,
3281 &esi[1],
3282 ack_dpcd_bytes_to_write);
3283 if (wret == ack_dpcd_bytes_to_write)
3284 break;
3285 }
3286
1f6010a9 3287 /* check if there is new irq to be handled */
4562236b
HW
3288 dret = drm_dp_dpcd_read(
3289 &aconnector->dm_dp_aux.aux,
3290 dpcd_addr,
3291 esi,
3292 dpcd_bytes_to_read);
3293
3294 new_irq_handled = false;
d4a6e8a9 3295 } else {
4562236b 3296 break;
d4a6e8a9 3297 }
4562236b
HW
3298 }
3299
3300 if (process_count == max_process_count)
f1ad2f5e 3301 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3302}
3303
8e794421
WL
3304static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3305 union hpd_irq_data hpd_irq_data)
3306{
3307 struct hpd_rx_irq_offload_work *offload_work =
3308 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3309
3310 if (!offload_work) {
3311 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3312 return;
3313 }
3314
3315 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3316 offload_work->data = hpd_irq_data;
3317 offload_work->offload_wq = offload_wq;
3318
3319 queue_work(offload_wq->wq, &offload_work->work);
3320 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3321}
3322
4562236b
HW
3323static void handle_hpd_rx_irq(void *param)
3324{
c84dec2f 3325 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3326 struct drm_connector *connector = &aconnector->base;
3327 struct drm_device *dev = connector->dev;
53cbf65c 3328 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3329 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3330 bool result = false;
fbbdadf2 3331 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3332 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3333 union hpd_irq_data hpd_irq_data;
8e794421
WL
3334 bool link_loss = false;
3335 bool has_left_work = false;
e322843e 3336 int idx = dc_link->link_index;
8e794421 3337 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3338
3339 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3340
b972b4f9
HW
3341 if (adev->dm.disable_hpd_irq)
3342 return;
3343
1f6010a9
DF
3344 /*
3345 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3346 * conflict, after implement i2c helper, this mutex should be
3347 * retired.
3348 */
b86e7eef 3349 mutex_lock(&aconnector->hpd_lock);
4562236b 3350
8e794421
WL
3351 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3352 &link_loss, true, &has_left_work);
3083a984 3353
8e794421
WL
3354 if (!has_left_work)
3355 goto out;
3356
3357 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3358 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3359 goto out;
3360 }
3361
3362 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3363 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3364 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3365 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3366 goto out;
3367 }
3083a984 3368
8e794421
WL
3369 if (link_loss) {
3370 bool skip = false;
d2aa1356 3371
8e794421
WL
3372 spin_lock(&offload_wq->offload_lock);
3373 skip = offload_wq->is_handling_link_loss;
3374
3375 if (!skip)
3376 offload_wq->is_handling_link_loss = true;
3377
3378 spin_unlock(&offload_wq->offload_lock);
3379
3380 if (!skip)
3381 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3382
3383 goto out;
3384 }
3385 }
c8ea79a8 3386
3083a984 3387out:
c8ea79a8 3388 if (result && !is_mst_root_connector) {
4562236b 3389 /* Downstream Port status changed. */
54618888 3390 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
fbbdadf2
BL
3391 DRM_ERROR("KMS: Failed to detect connector\n");
3392
3393 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3394 emulated_link_detect(dc_link);
3395
3396 if (aconnector->fake_enable)
3397 aconnector->fake_enable = false;
3398
3399 amdgpu_dm_update_connector_after_detect(aconnector);
3400
3401
3402 drm_modeset_lock_all(dev);
3403 dm_restore_drm_connector_state(dev, connector);
3404 drm_modeset_unlock_all(dev);
3405
fc320a6f 3406 drm_kms_helper_connector_hotplug_event(connector);
15c735e7
WL
3407 } else {
3408 bool ret = false;
88ac3dda 3409
15c735e7
WL
3410 mutex_lock(&adev->dm.dc_lock);
3411 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3412 mutex_unlock(&adev->dm.dc_lock);
88ac3dda 3413
15c735e7
WL
3414 if (ret) {
3415 if (aconnector->fake_enable)
3416 aconnector->fake_enable = false;
4562236b 3417
15c735e7 3418 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b 3419
15c735e7
WL
3420 drm_modeset_lock_all(dev);
3421 dm_restore_drm_connector_state(dev, connector);
3422 drm_modeset_unlock_all(dev);
4562236b 3423
15c735e7
WL
3424 drm_kms_helper_connector_hotplug_event(connector);
3425 }
4562236b
HW
3426 }
3427 }
95f247e7
DC
3428 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3429 if (adev->dm.hdcp_workqueue)
3430 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3431 }
4562236b 3432
b86e7eef 3433 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3434 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3435
3436 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3437}
3438
3439static void register_hpd_handlers(struct amdgpu_device *adev)
3440{
4a580877 3441 struct drm_device *dev = adev_to_drm(adev);
4562236b 3442 struct drm_connector *connector;
c84dec2f 3443 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3444 const struct dc_link *dc_link;
3445 struct dc_interrupt_params int_params = {0};
3446
3447 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3448 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3449
3450 list_for_each_entry(connector,
3451 &dev->mode_config.connector_list, head) {
3452
c84dec2f 3453 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3454 dc_link = aconnector->dc_link;
3455
3456 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3457 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3458 int_params.irq_source = dc_link->irq_source_hpd;
3459
3460 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3461 handle_hpd_irq,
3462 (void *) aconnector);
3463 }
3464
3465 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3466
3467 /* Also register for DP short pulse (hpd_rx). */
3468 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3469 int_params.irq_source = dc_link->irq_source_hpd_rx;
3470
3471 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3472 handle_hpd_rx_irq,
3473 (void *) aconnector);
8e794421
WL
3474
3475 if (adev->dm.hpd_rx_offload_wq)
e322843e 3476 adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
8e794421 3477 aconnector;
4562236b
HW
3478 }
3479 }
3480}
3481
55e56389
MR
3482#if defined(CONFIG_DRM_AMD_DC_SI)
3483/* Register IRQ sources and initialize IRQ callbacks */
3484static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3485{
3486 struct dc *dc = adev->dm.dc;
3487 struct common_irq_params *c_irq_params;
3488 struct dc_interrupt_params int_params = {0};
3489 int r;
3490 int i;
3491 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3492
3493 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3494 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3495
3496 /*
3497 * Actions of amdgpu_irq_add_id():
3498 * 1. Register a set() function with base driver.
3499 * Base driver will call set() function to enable/disable an
3500 * interrupt in DC hardware.
3501 * 2. Register amdgpu_dm_irq_handler().
3502 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3503 * coming from DC hardware.
3504 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3505 * for acknowledging and handling. */
3506
3507 /* Use VBLANK interrupt */
3508 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3509 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3510 if (r) {
3511 DRM_ERROR("Failed to add crtc irq id!\n");
3512 return r;
3513 }
3514
3515 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3516 int_params.irq_source =
3517 dc_interrupt_to_irq_source(dc, i+1 , 0);
3518
3519 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3520
3521 c_irq_params->adev = adev;
3522 c_irq_params->irq_src = int_params.irq_source;
3523
3524 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3525 dm_crtc_high_irq, c_irq_params);
3526 }
3527
3528 /* Use GRPH_PFLIP interrupt */
3529 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3530 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3531 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3532 if (r) {
3533 DRM_ERROR("Failed to add page flip irq id!\n");
3534 return r;
3535 }
3536
3537 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3538 int_params.irq_source =
3539 dc_interrupt_to_irq_source(dc, i, 0);
3540
3541 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3542
3543 c_irq_params->adev = adev;
3544 c_irq_params->irq_src = int_params.irq_source;
3545
3546 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3547 dm_pflip_high_irq, c_irq_params);
3548
3549 }
3550
3551 /* HPD */
3552 r = amdgpu_irq_add_id(adev, client_id,
3553 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3554 if (r) {
3555 DRM_ERROR("Failed to add hpd irq id!\n");
3556 return r;
3557 }
3558
3559 register_hpd_handlers(adev);
3560
3561 return 0;
3562}
3563#endif
3564
4562236b
HW
3565/* Register IRQ sources and initialize IRQ callbacks */
3566static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3567{
3568 struct dc *dc = adev->dm.dc;
3569 struct common_irq_params *c_irq_params;
3570 struct dc_interrupt_params int_params = {0};
3571 int r;
3572 int i;
1ffdeca6 3573 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3574
c08182f2 3575 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3576 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3577
3578 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3579 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3580
1f6010a9
DF
3581 /*
3582 * Actions of amdgpu_irq_add_id():
4562236b
HW
3583 * 1. Register a set() function with base driver.
3584 * Base driver will call set() function to enable/disable an
3585 * interrupt in DC hardware.
3586 * 2. Register amdgpu_dm_irq_handler().
3587 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3588 * coming from DC hardware.
3589 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3590 * for acknowledging and handling. */
3591
b57de80a 3592 /* Use VBLANK interrupt */
e9029155 3593 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3594 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3595 if (r) {
3596 DRM_ERROR("Failed to add crtc irq id!\n");
3597 return r;
3598 }
3599
3600 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3601 int_params.irq_source =
3d761e79 3602 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3603
b57de80a 3604 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3605
3606 c_irq_params->adev = adev;
3607 c_irq_params->irq_src = int_params.irq_source;
3608
3609 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3610 dm_crtc_high_irq, c_irq_params);
3611 }
3612
d2574c33
MK
3613 /* Use VUPDATE interrupt */
3614 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3615 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3616 if (r) {
3617 DRM_ERROR("Failed to add vupdate irq id!\n");
3618 return r;
3619 }
3620
3621 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3622 int_params.irq_source =
3623 dc_interrupt_to_irq_source(dc, i, 0);
3624
3625 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3626
3627 c_irq_params->adev = adev;
3628 c_irq_params->irq_src = int_params.irq_source;
3629
3630 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3631 dm_vupdate_high_irq, c_irq_params);
3632 }
3633
3d761e79 3634 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3635 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3636 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3637 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3638 if (r) {
3639 DRM_ERROR("Failed to add page flip irq id!\n");
3640 return r;
3641 }
3642
3643 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3644 int_params.irq_source =
3645 dc_interrupt_to_irq_source(dc, i, 0);
3646
3647 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3648
3649 c_irq_params->adev = adev;
3650 c_irq_params->irq_src = int_params.irq_source;
3651
3652 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3653 dm_pflip_high_irq, c_irq_params);
3654
3655 }
3656
3657 /* HPD */
2c8ad2d5
AD
3658 r = amdgpu_irq_add_id(adev, client_id,
3659 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3660 if (r) {
3661 DRM_ERROR("Failed to add hpd irq id!\n");
3662 return r;
3663 }
3664
3665 register_hpd_handlers(adev);
3666
3667 return 0;
3668}
3669
ff5ef992
AD
3670/* Register IRQ sources and initialize IRQ callbacks */
3671static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3672{
3673 struct dc *dc = adev->dm.dc;
3674 struct common_irq_params *c_irq_params;
3675 struct dc_interrupt_params int_params = {0};
3676 int r;
3677 int i;
660d5406
WL
3678#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3679 static const unsigned int vrtl_int_srcid[] = {
3680 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3681 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3682 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3683 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3684 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3685 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3686 };
3687#endif
ff5ef992
AD
3688
3689 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3690 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3691
1f6010a9
DF
3692 /*
3693 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3694 * 1. Register a set() function with base driver.
3695 * Base driver will call set() function to enable/disable an
3696 * interrupt in DC hardware.
3697 * 2. Register amdgpu_dm_irq_handler().
3698 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3699 * coming from DC hardware.
3700 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3701 * for acknowledging and handling.
1f6010a9 3702 */
ff5ef992
AD
3703
3704 /* Use VSTARTUP interrupt */
3705 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3706 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3707 i++) {
3760f76c 3708 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3709
3710 if (r) {
3711 DRM_ERROR("Failed to add crtc irq id!\n");
3712 return r;
3713 }
3714
3715 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3716 int_params.irq_source =
3717 dc_interrupt_to_irq_source(dc, i, 0);
3718
3719 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3720
3721 c_irq_params->adev = adev;
3722 c_irq_params->irq_src = int_params.irq_source;
3723
2346ef47
NK
3724 amdgpu_dm_irq_register_interrupt(
3725 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3726 }
3727
86bc2219
WL
3728 /* Use otg vertical line interrupt */
3729#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3730 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3731 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3732 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3733
3734 if (r) {
3735 DRM_ERROR("Failed to add vline0 irq id!\n");
3736 return r;
3737 }
3738
3739 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3740 int_params.irq_source =
660d5406
WL
3741 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3742
3743 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3744 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3745 break;
3746 }
86bc2219
WL
3747
3748 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3749 - DC_IRQ_SOURCE_DC1_VLINE0];
3750
3751 c_irq_params->adev = adev;
3752 c_irq_params->irq_src = int_params.irq_source;
3753
3754 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3755 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3756 }
3757#endif
3758
2346ef47
NK
3759 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3760 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3761 * to trigger at end of each vblank, regardless of state of the lock,
3762 * matching DCE behaviour.
3763 */
3764 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3765 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3766 i++) {
3767 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3768
3769 if (r) {
3770 DRM_ERROR("Failed to add vupdate irq id!\n");
3771 return r;
3772 }
3773
3774 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3775 int_params.irq_source =
3776 dc_interrupt_to_irq_source(dc, i, 0);
3777
3778 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3779
3780 c_irq_params->adev = adev;
3781 c_irq_params->irq_src = int_params.irq_source;
3782
ff5ef992 3783 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3784 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3785 }
3786
ff5ef992
AD
3787 /* Use GRPH_PFLIP interrupt */
3788 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
de95753c 3789 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
ff5ef992 3790 i++) {
3760f76c 3791 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3792 if (r) {
3793 DRM_ERROR("Failed to add page flip irq id!\n");
3794 return r;
3795 }
3796
3797 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3798 int_params.irq_source =
3799 dc_interrupt_to_irq_source(dc, i, 0);
3800
3801 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3802
3803 c_irq_params->adev = adev;
3804 c_irq_params->irq_src = int_params.irq_source;
3805
3806 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3807 dm_pflip_high_irq, c_irq_params);
3808
3809 }
3810
81927e28
JS
3811 /* HPD */
3812 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3813 &adev->hpd_irq);
3814 if (r) {
3815 DRM_ERROR("Failed to add hpd irq id!\n");
3816 return r;
3817 }
a08f16cf 3818
81927e28 3819 register_hpd_handlers(adev);
a08f16cf 3820
81927e28
JS
3821 return 0;
3822}
3823/* Register Outbox IRQ sources and initialize IRQ callbacks */
3824static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3825{
3826 struct dc *dc = adev->dm.dc;
3827 struct common_irq_params *c_irq_params;
3828 struct dc_interrupt_params int_params = {0};
3829 int r, i;
3830
3831 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3832 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3833
3834 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3835 &adev->dmub_outbox_irq);
3836 if (r) {
3837 DRM_ERROR("Failed to add outbox irq id!\n");
3838 return r;
3839 }
3840
3841 if (dc->ctx->dmub_srv) {
3842 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3843 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3844 int_params.irq_source =
81927e28 3845 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3846
81927e28 3847 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3848
3849 c_irq_params->adev = adev;
3850 c_irq_params->irq_src = int_params.irq_source;
3851
3852 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3853 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3854 }
3855
ff5ef992
AD
3856 return 0;
3857}
ff5ef992 3858
eb3dc897
NK
3859/*
3860 * Acquires the lock for the atomic state object and returns
3861 * the new atomic state.
3862 *
3863 * This should only be called during atomic check.
3864 */
17ce8a69
RL
3865int dm_atomic_get_state(struct drm_atomic_state *state,
3866 struct dm_atomic_state **dm_state)
eb3dc897
NK
3867{
3868 struct drm_device *dev = state->dev;
1348969a 3869 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3870 struct amdgpu_display_manager *dm = &adev->dm;
3871 struct drm_private_state *priv_state;
eb3dc897
NK
3872
3873 if (*dm_state)
3874 return 0;
3875
eb3dc897
NK
3876 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3877 if (IS_ERR(priv_state))
3878 return PTR_ERR(priv_state);
3879
3880 *dm_state = to_dm_atomic_state(priv_state);
3881
3882 return 0;
3883}
3884
dfd84d90 3885static struct dm_atomic_state *
eb3dc897
NK
3886dm_atomic_get_new_state(struct drm_atomic_state *state)
3887{
3888 struct drm_device *dev = state->dev;
1348969a 3889 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3890 struct amdgpu_display_manager *dm = &adev->dm;
3891 struct drm_private_obj *obj;
3892 struct drm_private_state *new_obj_state;
3893 int i;
3894
3895 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3896 if (obj->funcs == dm->atomic_obj.funcs)
3897 return to_dm_atomic_state(new_obj_state);
3898 }
3899
3900 return NULL;
3901}
3902
eb3dc897
NK
3903static struct drm_private_state *
3904dm_atomic_duplicate_state(struct drm_private_obj *obj)
3905{
3906 struct dm_atomic_state *old_state, *new_state;
3907
3908 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3909 if (!new_state)
3910 return NULL;
3911
3912 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3913
813d20dc
AW
3914 old_state = to_dm_atomic_state(obj->state);
3915
3916 if (old_state && old_state->context)
3917 new_state->context = dc_copy_state(old_state->context);
3918
eb3dc897
NK
3919 if (!new_state->context) {
3920 kfree(new_state);
3921 return NULL;
3922 }
3923
eb3dc897
NK
3924 return &new_state->base;
3925}
3926
3927static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3928 struct drm_private_state *state)
3929{
3930 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3931
3932 if (dm_state && dm_state->context)
3933 dc_release_state(dm_state->context);
3934
3935 kfree(dm_state);
3936}
3937
3938static struct drm_private_state_funcs dm_atomic_state_funcs = {
3939 .atomic_duplicate_state = dm_atomic_duplicate_state,
3940 .atomic_destroy_state = dm_atomic_destroy_state,
3941};
3942
4562236b
HW
3943static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3944{
eb3dc897 3945 struct dm_atomic_state *state;
4562236b
HW
3946 int r;
3947
3948 adev->mode_info.mode_config_initialized = true;
3949
4a580877
LT
3950 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3951 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3952
4a580877
LT
3953 adev_to_drm(adev)->mode_config.max_width = 16384;
3954 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3955
4a580877 3956 adev_to_drm(adev)->mode_config.preferred_depth = 24;
a6250bdb
AD
3957 if (adev->asic_type == CHIP_HAWAII)
3958 /* disable prefer shadow for now due to hibernation issues */
3959 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3960 else
3961 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3962 /* indicates support for immediate flip */
4a580877 3963 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3964
eb3dc897
NK
3965 state = kzalloc(sizeof(*state), GFP_KERNEL);
3966 if (!state)
3967 return -ENOMEM;
3968
813d20dc 3969 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3970 if (!state->context) {
3971 kfree(state);
3972 return -ENOMEM;
3973 }
3974
3975 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3976
4a580877 3977 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3978 &adev->dm.atomic_obj,
eb3dc897
NK
3979 &state->base,
3980 &dm_atomic_state_funcs);
3981
3dc9b1ce 3982 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3983 if (r) {
3984 dc_release_state(state->context);
3985 kfree(state);
4562236b 3986 return r;
b67a468a 3987 }
4562236b 3988
6ce8f316 3989 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3990 if (r) {
3991 dc_release_state(state->context);
3992 kfree(state);
6ce8f316 3993 return r;
b67a468a 3994 }
6ce8f316 3995
4562236b
HW
3996 return 0;
3997}
3998
206bbafe
DF
3999#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
4000#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 4001#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 4002
7fd13bae
AD
4003static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4004 int bl_idx)
206bbafe
DF
4005{
4006#if defined(CONFIG_ACPI)
4007 struct amdgpu_dm_backlight_caps caps;
4008
58965855
FS
4009 memset(&caps, 0, sizeof(caps));
4010
7fd13bae 4011 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
4012 return;
4013
f9b7f370 4014 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 4015 if (caps.caps_valid) {
7fd13bae 4016 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
4017 if (caps.aux_support)
4018 return;
7fd13bae
AD
4019 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
4020 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 4021 } else {
7fd13bae 4022 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 4023 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 4024 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
4025 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4026 }
4027#else
7fd13bae 4028 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
4029 return;
4030
7fd13bae
AD
4031 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4032 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
4033#endif
4034}
4035
69d9f427
AM
4036static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4037 unsigned *min, unsigned *max)
94562810 4038{
94562810 4039 if (!caps)
69d9f427 4040 return 0;
94562810 4041
69d9f427
AM
4042 if (caps->aux_support) {
4043 // Firmware limits are in nits, DC API wants millinits.
4044 *max = 1000 * caps->aux_max_input_signal;
4045 *min = 1000 * caps->aux_min_input_signal;
94562810 4046 } else {
69d9f427
AM
4047 // Firmware limits are 8-bit, PWM control is 16-bit.
4048 *max = 0x101 * caps->max_input_signal;
4049 *min = 0x101 * caps->min_input_signal;
94562810 4050 }
69d9f427
AM
4051 return 1;
4052}
94562810 4053
69d9f427
AM
4054static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4055 uint32_t brightness)
4056{
4057 unsigned min, max;
94562810 4058
69d9f427
AM
4059 if (!get_brightness_range(caps, &min, &max))
4060 return brightness;
4061
4062 // Rescale 0..255 to min..max
4063 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4064 AMDGPU_MAX_BL_LEVEL);
4065}
4066
4067static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4068 uint32_t brightness)
4069{
4070 unsigned min, max;
4071
4072 if (!get_brightness_range(caps, &min, &max))
4073 return brightness;
4074
4075 if (brightness < min)
4076 return 0;
4077 // Rescale min..max to 0..255
4078 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4079 max - min);
94562810
RS
4080}
4081
4052287a 4082static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 4083 int bl_idx,
3d6c9164 4084 u32 user_brightness)
4562236b 4085{
206bbafe 4086 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
4087 struct dc_link *link;
4088 u32 brightness;
94562810 4089 bool rc;
4562236b 4090
7fd13bae
AD
4091 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4092 caps = dm->backlight_caps[bl_idx];
94562810 4093
7fd13bae 4094 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
4095 /* update scratch register */
4096 if (bl_idx == 0)
4097 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
4098 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4099 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 4100
3d6c9164 4101 /* Change brightness based on AUX property */
118b4627 4102 if (caps.aux_support) {
7fd13bae
AD
4103 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4104 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4105 if (!rc)
4106 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 4107 } else {
7fd13bae
AD
4108 rc = dc_link_set_backlight_level(link, brightness, 0);
4109 if (!rc)
4110 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 4111 }
94562810 4112
4052287a
S
4113 if (rc)
4114 dm->actual_brightness[bl_idx] = user_brightness;
4562236b
HW
4115}
4116
3d6c9164 4117static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 4118{
620a0d27 4119 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4120 int i;
3d6c9164 4121
7fd13bae
AD
4122 for (i = 0; i < dm->num_of_edps; i++) {
4123 if (bd == dm->backlight_dev[i])
4124 break;
4125 }
4126 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4127 i = 0;
4128 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
4129
4130 return 0;
4131}
4132
7fd13bae
AD
4133static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4134 int bl_idx)
3d6c9164 4135{
0ad3e64e 4136 struct amdgpu_dm_backlight_caps caps;
7fd13bae 4137 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 4138
7fd13bae
AD
4139 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4140 caps = dm->backlight_caps[bl_idx];
620a0d27 4141
0ad3e64e 4142 if (caps.aux_support) {
0ad3e64e
AD
4143 u32 avg, peak;
4144 bool rc;
4145
4146 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4147 if (!rc)
7fd13bae 4148 return dm->brightness[bl_idx];
0ad3e64e
AD
4149 return convert_brightness_to_user(&caps, avg);
4150 } else {
7fd13bae 4151 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
4152
4153 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 4154 return dm->brightness[bl_idx];
0ad3e64e
AD
4155 return convert_brightness_to_user(&caps, ret);
4156 }
4562236b
HW
4157}
4158
3d6c9164
AD
4159static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4160{
4161 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4162 int i;
3d6c9164 4163
7fd13bae
AD
4164 for (i = 0; i < dm->num_of_edps; i++) {
4165 if (bd == dm->backlight_dev[i])
4166 break;
4167 }
4168 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4169 i = 0;
4170 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4171}
4172
4562236b 4173static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4174 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4175 .get_brightness = amdgpu_dm_backlight_get_brightness,
4176 .update_status = amdgpu_dm_backlight_update_status,
4177};
4178
7578ecda 4179static void
213eca2b 4180amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
4562236b 4181{
213eca2b
HG
4182 struct drm_device *drm = aconnector->base.dev;
4183 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
4562236b 4184 struct backlight_properties props = { 0 };
213eca2b 4185 char bl_name[16];
4562236b 4186
62f03dad
HG
4187 if (aconnector->bl_idx == -1)
4188 return;
4189
da11ef83 4190 if (!acpi_video_backlight_use_native()) {
213eca2b 4191 drm_info(drm, "Skipping amdgpu DM backlight registration\n");
c0f50c5d
HG
4192 /* Try registering an ACPI video backlight device instead. */
4193 acpi_video_register_backlight();
da11ef83
HG
4194 return;
4195 }
4196
4562236b 4197 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4198 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4199 props.type = BACKLIGHT_RAW;
4200
4201 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
213eca2b 4202 drm->primary->index + aconnector->bl_idx);
4562236b 4203
213eca2b 4204 dm->backlight_dev[aconnector->bl_idx] =
62f03dad 4205 backlight_device_register(bl_name, aconnector->base.kdev, dm,
213eca2b 4206 &amdgpu_dm_backlight_ops, &props);
4562236b 4207
213eca2b 4208 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
4562236b 4209 DRM_ERROR("DM: Backlight registration failed!\n");
213eca2b 4210 dm->backlight_dev[aconnector->bl_idx] = NULL;
4db231d7 4211 } else
f1ad2f5e 4212 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4213}
4562236b 4214
df534fff 4215static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4216 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4217 enum drm_plane_type plane_type,
4218 const struct dc_plane_cap *plane_cap)
df534fff 4219{
f180b4bc 4220 struct drm_plane *plane;
df534fff
S
4221 unsigned long possible_crtcs;
4222 int ret = 0;
4223
f180b4bc 4224 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4225 if (!plane) {
4226 DRM_ERROR("KMS: Failed to allocate plane\n");
4227 return -ENOMEM;
4228 }
b2fddb13 4229 plane->type = plane_type;
df534fff
S
4230
4231 /*
b2fddb13
NK
4232 * HACK: IGT tests expect that the primary plane for a CRTC
4233 * can only have one possible CRTC. Only expose support for
4234 * any CRTC if they're not going to be used as a primary plane
4235 * for a CRTC - like overlay or underlay planes.
df534fff
S
4236 */
4237 possible_crtcs = 1 << plane_id;
4238 if (plane_id >= dm->dc->caps.max_streams)
4239 possible_crtcs = 0xff;
4240
cc1fec57 4241 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4242
4243 if (ret) {
4244 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4245 kfree(plane);
df534fff
S
4246 return ret;
4247 }
4248
54087768
NK
4249 if (mode_info)
4250 mode_info->planes[plane_id] = plane;
4251
df534fff
S
4252 return ret;
4253}
4254
89fc8d4e 4255
618e51cd
HG
4256static void setup_backlight_device(struct amdgpu_display_manager *dm,
4257 struct amdgpu_dm_connector *aconnector)
89fc8d4e 4258{
f196198c 4259 struct dc_link *link = aconnector->dc_link;
ceb4a561 4260 int bl_idx = dm->num_of_edps;
89fc8d4e 4261
ceb4a561
HG
4262 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
4263 link->type == dc_connection_none)
4264 return;
4265
4266 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
4267 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
4268 return;
89fc8d4e 4269 }
ceb4a561 4270
f196198c
HG
4271 aconnector->bl_idx = bl_idx;
4272
618e51cd
HG
4273 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4274 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
ceb4a561
HG
4275 dm->backlight_link[bl_idx] = link;
4276 dm->num_of_edps++;
618e51cd
HG
4277
4278 update_connector_ext_caps(aconnector);
89fc8d4e
HW
4279}
4280
acc96ae0 4281static void amdgpu_set_panel_orientation(struct drm_connector *connector);
89fc8d4e 4282
1f6010a9
DF
4283/*
4284 * In this architecture, the association
4562236b
HW
4285 * connector -> encoder -> crtc
4286 * id not really requried. The crtc and connector will hold the
4287 * display_index as an abstraction to use with DAL component
4288 *
4289 * Returns 0 on success
4290 */
7578ecda 4291static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4292{
4293 struct amdgpu_display_manager *dm = &adev->dm;
ae67558b 4294 s32 i;
c84dec2f 4295 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4296 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4297 struct amdgpu_mode_info *mode_info = &adev->mode_info;
ae67558b
SS
4298 u32 link_cnt;
4299 s32 primary_planes;
fbbdadf2 4300 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4301 const struct dc_plane_cap *plane;
9470620e 4302 bool psr_feature_enabled = false;
35f33086 4303 int max_overlay = dm->dc->caps.max_slave_planes;
4562236b 4304
d58159de
AD
4305 dm->display_indexes_num = dm->dc->caps.max_streams;
4306 /* Update the actual used number of crtc */
4307 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4308
60971b20 4309 amdgpu_dm_set_irq_funcs(adev);
4310
4562236b 4311 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4312 if (amdgpu_dm_mode_config_init(dm->adev)) {
4313 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4314 return -EINVAL;
4562236b
HW
4315 }
4316
b2fddb13
NK
4317 /* There is one primary plane per CRTC */
4318 primary_planes = dm->dc->caps.max_streams;
54087768 4319 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4320
b2fddb13
NK
4321 /*
4322 * Initialize primary planes, implicit planes for legacy IOCTLS.
4323 * Order is reversed to match iteration order in atomic check.
4324 */
4325 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4326 plane = &dm->dc->caps.planes[i];
4327
b2fddb13 4328 if (initialize_plane(dm, mode_info, i,
cc1fec57 4329 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4330 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4331 goto fail;
d4e13b0d 4332 }
df534fff 4333 }
92f3ac40 4334
0d579c7e
NK
4335 /*
4336 * Initialize overlay planes, index starting after primary planes.
4337 * These planes have a higher DRM index than the primary planes since
4338 * they should be considered as having a higher z-order.
4339 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4340 *
4341 * Only support DCN for now, and only expose one so we don't encourage
4342 * userspace to use up all the pipes.
0d579c7e 4343 */
cc1fec57
NK
4344 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4345 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4346
8813381a
LL
4347 /* Do not create overlay if MPO disabled */
4348 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4349 break;
4350
cc1fec57
NK
4351 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4352 continue;
4353
ea36ad34 4354 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4355 continue;
4356
35f33086
BL
4357 if (max_overlay-- == 0)
4358 break;
4359
54087768 4360 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4361 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4362 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4363 goto fail;
d4e13b0d
AD
4364 }
4365 }
4562236b 4366
d4e13b0d 4367 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4368 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4369 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4370 goto fail;
4562236b 4371 }
4562236b 4372
81927e28 4373 /* Use Outbox interrupt */
1d789535 4374 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4375 case IP_VERSION(3, 0, 0):
4376 case IP_VERSION(3, 1, 2):
4377 case IP_VERSION(3, 1, 3):
e850f6b1 4378 case IP_VERSION(3, 1, 4):
b5b8ed44 4379 case IP_VERSION(3, 1, 5):
de7cc1b4 4380 case IP_VERSION(3, 1, 6):
577359ca
AP
4381 case IP_VERSION(3, 2, 0):
4382 case IP_VERSION(3, 2, 1):
c08182f2 4383 case IP_VERSION(2, 1, 0):
81927e28
JS
4384 if (register_outbox_irq_handlers(dm->adev)) {
4385 DRM_ERROR("DM: Failed to initialize IRQ\n");
4386 goto fail;
4387 }
4388 break;
4389 default:
c08182f2 4390 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4391 adev->ip_versions[DCE_HWIP][0]);
81927e28 4392 }
9470620e
NK
4393
4394 /* Determine whether to enable PSR support by default. */
4395 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4396 switch (adev->ip_versions[DCE_HWIP][0]) {
4397 case IP_VERSION(3, 1, 2):
4398 case IP_VERSION(3, 1, 3):
e850f6b1 4399 case IP_VERSION(3, 1, 4):
b5b8ed44 4400 case IP_VERSION(3, 1, 5):
de7cc1b4 4401 case IP_VERSION(3, 1, 6):
577359ca
AP
4402 case IP_VERSION(3, 2, 0):
4403 case IP_VERSION(3, 2, 1):
9470620e
NK
4404 psr_feature_enabled = true;
4405 break;
4406 default:
4407 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4408 break;
4409 }
4410 }
81927e28 4411
4562236b
HW
4412 /* loops over all connectors on the board */
4413 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4414 struct dc_link *link = NULL;
4562236b
HW
4415
4416 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4417 DRM_ERROR(
4418 "KMS: Cannot support more than %d display indexes\n",
4419 AMDGPU_DM_MAX_DISPLAY_INDEX);
4420 continue;
4421 }
4422
4423 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4424 if (!aconnector)
cd8a2ae8 4425 goto fail;
4562236b
HW
4426
4427 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4428 if (!aencoder)
cd8a2ae8 4429 goto fail;
4562236b
HW
4430
4431 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4432 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4433 goto fail;
4562236b
HW
4434 }
4435
4436 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4437 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4438 goto fail;
4562236b
HW
4439 }
4440
89fc8d4e
HW
4441 link = dc_get_link_at_index(dm->dc, i);
4442
54618888 4443 if (!dc_link_detect_connection_type(link, &new_connection_type))
fbbdadf2
BL
4444 DRM_ERROR("KMS: Failed to detect connector\n");
4445
4446 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4447 emulated_link_detect(link);
4448 amdgpu_dm_update_connector_after_detect(aconnector);
15c735e7
WL
4449 } else {
4450 bool ret = false;
fbbdadf2 4451
15c735e7
WL
4452 mutex_lock(&dm->dc_lock);
4453 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4454 mutex_unlock(&dm->dc_lock);
4455
4456 if (ret) {
4457 amdgpu_dm_update_connector_after_detect(aconnector);
618e51cd 4458 setup_backlight_device(dm, aconnector);
89fc8d4e 4459
15c735e7
WL
4460 if (psr_feature_enabled)
4461 amdgpu_dm_set_psr_caps(link);
89fc8d4e 4462
15c735e7
WL
4463 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4464 * PSR is also supported.
4465 */
4466 if (link->psr_settings.psr_feature_enabled)
4467 adev_to_drm(adev)->vblank_disable_immediate = false;
4468 }
4469 }
acc96ae0 4470 amdgpu_set_panel_orientation(&aconnector->base);
4562236b
HW
4471 }
4472
c573e240
ML
4473 /* If we didn't find a panel, notify the acpi video detection */
4474 if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
4475 acpi_video_report_nolcd();
4476
4562236b
HW
4477 /* Software is initialized. Now we can register interrupt handlers. */
4478 switch (adev->asic_type) {
55e56389
MR
4479#if defined(CONFIG_DRM_AMD_DC_SI)
4480 case CHIP_TAHITI:
4481 case CHIP_PITCAIRN:
4482 case CHIP_VERDE:
4483 case CHIP_OLAND:
4484 if (dce60_register_irq_handlers(dm->adev)) {
4485 DRM_ERROR("DM: Failed to initialize IRQ\n");
4486 goto fail;
4487 }
4488 break;
4489#endif
4562236b
HW
4490 case CHIP_BONAIRE:
4491 case CHIP_HAWAII:
cd4b356f
AD
4492 case CHIP_KAVERI:
4493 case CHIP_KABINI:
4494 case CHIP_MULLINS:
4562236b
HW
4495 case CHIP_TONGA:
4496 case CHIP_FIJI:
4497 case CHIP_CARRIZO:
4498 case CHIP_STONEY:
4499 case CHIP_POLARIS11:
4500 case CHIP_POLARIS10:
b264d345 4501 case CHIP_POLARIS12:
7737de91 4502 case CHIP_VEGAM:
2c8ad2d5 4503 case CHIP_VEGA10:
2325ff30 4504 case CHIP_VEGA12:
1fe6bf2f 4505 case CHIP_VEGA20:
4562236b
HW
4506 if (dce110_register_irq_handlers(dm->adev)) {
4507 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4508 goto fail;
4562236b
HW
4509 }
4510 break;
4511 default:
1d789535 4512 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4513 case IP_VERSION(1, 0, 0):
4514 case IP_VERSION(1, 0, 1):
c08182f2
AD
4515 case IP_VERSION(2, 0, 2):
4516 case IP_VERSION(2, 0, 3):
4517 case IP_VERSION(2, 0, 0):
4518 case IP_VERSION(2, 1, 0):
4519 case IP_VERSION(3, 0, 0):
4520 case IP_VERSION(3, 0, 2):
4521 case IP_VERSION(3, 0, 3):
4522 case IP_VERSION(3, 0, 1):
4523 case IP_VERSION(3, 1, 2):
4524 case IP_VERSION(3, 1, 3):
e850f6b1 4525 case IP_VERSION(3, 1, 4):
b5b8ed44 4526 case IP_VERSION(3, 1, 5):
de7cc1b4 4527 case IP_VERSION(3, 1, 6):
577359ca
AP
4528 case IP_VERSION(3, 2, 0):
4529 case IP_VERSION(3, 2, 1):
c08182f2
AD
4530 if (dcn10_register_irq_handlers(dm->adev)) {
4531 DRM_ERROR("DM: Failed to initialize IRQ\n");
4532 goto fail;
4533 }
4534 break;
4535 default:
2cbc6f42 4536 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4537 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4538 goto fail;
c08182f2 4539 }
2cbc6f42 4540 break;
4562236b
HW
4541 }
4542
4562236b 4543 return 0;
cd8a2ae8 4544fail:
4562236b 4545 kfree(aencoder);
4562236b 4546 kfree(aconnector);
54087768 4547
59d0f396 4548 return -EINVAL;
4562236b
HW
4549}
4550
7578ecda 4551static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4552{
eb3dc897 4553 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4554 return;
4555}
4556
4557/******************************************************************************
4558 * amdgpu_display_funcs functions
4559 *****************************************************************************/
4560
1f6010a9 4561/*
4562236b
HW
4562 * dm_bandwidth_update - program display watermarks
4563 *
4564 * @adev: amdgpu_device pointer
4565 *
4566 * Calculate and program the display watermarks and line buffer allocation.
4567 */
4568static void dm_bandwidth_update(struct amdgpu_device *adev)
4569{
49c07a99 4570 /* TODO: implement later */
4562236b
HW
4571}
4572
39cc5be2 4573static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4574 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4575 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4576 .backlight_set_level = NULL, /* never called for DC */
4577 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4578 .hpd_sense = NULL,/* called unconditionally */
4579 .hpd_set_polarity = NULL, /* called unconditionally */
4580 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4581 .page_flip_get_scanoutpos =
4582 dm_crtc_get_scanoutpos,/* called unconditionally */
4583 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4584 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4585};
4586
4587#if defined(CONFIG_DEBUG_KERNEL_DC)
4588
3ee6b26b
AD
4589static ssize_t s3_debug_store(struct device *device,
4590 struct device_attribute *attr,
4591 const char *buf,
4592 size_t count)
4562236b
HW
4593{
4594 int ret;
4595 int s3_state;
ef1de361 4596 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4597 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4598
4599 ret = kstrtoint(buf, 0, &s3_state);
4600
4601 if (ret == 0) {
4602 if (s3_state) {
4603 dm_resume(adev);
4a580877 4604 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4605 } else
4606 dm_suspend(adev);
4607 }
4608
4609 return ret == 0 ? count : 0;
4610}
4611
4612DEVICE_ATTR_WO(s3_debug);
4613
4614#endif
4615
a7ab3451
ML
4616static int dm_init_microcode(struct amdgpu_device *adev)
4617{
4618 char *fw_name_dmub;
4619 int r;
4620
4621 switch (adev->ip_versions[DCE_HWIP][0]) {
4622 case IP_VERSION(2, 1, 0):
4623 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
4624 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
4625 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
4626 break;
4627 case IP_VERSION(3, 0, 0):
4628 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
4629 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
4630 else
4631 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
4632 break;
4633 case IP_VERSION(3, 0, 1):
4634 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
4635 break;
4636 case IP_VERSION(3, 0, 2):
4637 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
4638 break;
4639 case IP_VERSION(3, 0, 3):
4640 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
4641 break;
4642 case IP_VERSION(3, 1, 2):
4643 case IP_VERSION(3, 1, 3):
4644 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
4645 break;
4646 case IP_VERSION(3, 1, 4):
4647 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
4648 break;
4649 case IP_VERSION(3, 1, 5):
4650 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
4651 break;
4652 case IP_VERSION(3, 1, 6):
4653 fw_name_dmub = FIRMWARE_DCN316_DMUB;
4654 break;
4655 case IP_VERSION(3, 2, 0):
4656 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
4657 break;
4658 case IP_VERSION(3, 2, 1):
4659 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
4660 break;
4661 default:
4662 /* ASIC doesn't support DMUB. */
4663 return 0;
4664 }
4665 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
4666 if (r)
4667 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
4668 return r;
4669}
4670
4562236b
HW
4671static int dm_early_init(void *handle)
4672{
4673 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
44900af0
AD
4674 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4675 struct atom_context *ctx = mode_info->atom_context;
4676 int index = GetIndexIntoMasterTable(DATA, Object_Header);
4677 u16 data_offset;
4678
4679 /* if there is no object header, skip DM */
4680 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4681 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4682 dev_info(adev->dev, "No object header, skipping DM\n");
4683 return -ENOENT;
4684 }
4562236b 4685
4562236b 4686 switch (adev->asic_type) {
55e56389
MR
4687#if defined(CONFIG_DRM_AMD_DC_SI)
4688 case CHIP_TAHITI:
4689 case CHIP_PITCAIRN:
4690 case CHIP_VERDE:
4691 adev->mode_info.num_crtc = 6;
4692 adev->mode_info.num_hpd = 6;
4693 adev->mode_info.num_dig = 6;
4694 break;
4695 case CHIP_OLAND:
4696 adev->mode_info.num_crtc = 2;
4697 adev->mode_info.num_hpd = 2;
4698 adev->mode_info.num_dig = 2;
4699 break;
4700#endif
4562236b
HW
4701 case CHIP_BONAIRE:
4702 case CHIP_HAWAII:
4703 adev->mode_info.num_crtc = 6;
4704 adev->mode_info.num_hpd = 6;
4705 adev->mode_info.num_dig = 6;
4562236b 4706 break;
cd4b356f
AD
4707 case CHIP_KAVERI:
4708 adev->mode_info.num_crtc = 4;
4709 adev->mode_info.num_hpd = 6;
4710 adev->mode_info.num_dig = 7;
cd4b356f
AD
4711 break;
4712 case CHIP_KABINI:
4713 case CHIP_MULLINS:
4714 adev->mode_info.num_crtc = 2;
4715 adev->mode_info.num_hpd = 6;
4716 adev->mode_info.num_dig = 6;
cd4b356f 4717 break;
4562236b
HW
4718 case CHIP_FIJI:
4719 case CHIP_TONGA:
4720 adev->mode_info.num_crtc = 6;
4721 adev->mode_info.num_hpd = 6;
4722 adev->mode_info.num_dig = 7;
4562236b
HW
4723 break;
4724 case CHIP_CARRIZO:
4725 adev->mode_info.num_crtc = 3;
4726 adev->mode_info.num_hpd = 6;
4727 adev->mode_info.num_dig = 9;
4562236b
HW
4728 break;
4729 case CHIP_STONEY:
4730 adev->mode_info.num_crtc = 2;
4731 adev->mode_info.num_hpd = 6;
4732 adev->mode_info.num_dig = 9;
4562236b
HW
4733 break;
4734 case CHIP_POLARIS11:
b264d345 4735 case CHIP_POLARIS12:
4562236b
HW
4736 adev->mode_info.num_crtc = 5;
4737 adev->mode_info.num_hpd = 5;
4738 adev->mode_info.num_dig = 5;
4562236b
HW
4739 break;
4740 case CHIP_POLARIS10:
7737de91 4741 case CHIP_VEGAM:
4562236b
HW
4742 adev->mode_info.num_crtc = 6;
4743 adev->mode_info.num_hpd = 6;
4744 adev->mode_info.num_dig = 6;
4562236b 4745 break;
2c8ad2d5 4746 case CHIP_VEGA10:
2325ff30 4747 case CHIP_VEGA12:
1fe6bf2f 4748 case CHIP_VEGA20:
2c8ad2d5
AD
4749 adev->mode_info.num_crtc = 6;
4750 adev->mode_info.num_hpd = 6;
4751 adev->mode_info.num_dig = 6;
4752 break;
4562236b 4753 default:
cae5c1ab 4754
1d789535 4755 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4756 case IP_VERSION(2, 0, 2):
4757 case IP_VERSION(3, 0, 0):
4758 adev->mode_info.num_crtc = 6;
4759 adev->mode_info.num_hpd = 6;
4760 adev->mode_info.num_dig = 6;
4761 break;
4762 case IP_VERSION(2, 0, 0):
4763 case IP_VERSION(3, 0, 2):
4764 adev->mode_info.num_crtc = 5;
4765 adev->mode_info.num_hpd = 5;
4766 adev->mode_info.num_dig = 5;
4767 break;
4768 case IP_VERSION(2, 0, 3):
4769 case IP_VERSION(3, 0, 3):
4770 adev->mode_info.num_crtc = 2;
4771 adev->mode_info.num_hpd = 2;
4772 adev->mode_info.num_dig = 2;
4773 break;
559f591d
AD
4774 case IP_VERSION(1, 0, 0):
4775 case IP_VERSION(1, 0, 1):
c08182f2
AD
4776 case IP_VERSION(3, 0, 1):
4777 case IP_VERSION(2, 1, 0):
4778 case IP_VERSION(3, 1, 2):
4779 case IP_VERSION(3, 1, 3):
e850f6b1 4780 case IP_VERSION(3, 1, 4):
b5b8ed44 4781 case IP_VERSION(3, 1, 5):
de7cc1b4 4782 case IP_VERSION(3, 1, 6):
577359ca
AP
4783 case IP_VERSION(3, 2, 0):
4784 case IP_VERSION(3, 2, 1):
c08182f2
AD
4785 adev->mode_info.num_crtc = 4;
4786 adev->mode_info.num_hpd = 4;
4787 adev->mode_info.num_dig = 4;
4788 break;
4789 default:
2cbc6f42 4790 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4791 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4792 return -EINVAL;
c08182f2 4793 }
2cbc6f42 4794 break;
4562236b
HW
4795 }
4796
39cc5be2
AD
4797 if (adev->mode_info.funcs == NULL)
4798 adev->mode_info.funcs = &dm_display_funcs;
4799
1f6010a9
DF
4800 /*
4801 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4802 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4803 * amdgpu_device_init()
4804 */
4562236b
HW
4805#if defined(CONFIG_DEBUG_KERNEL_DC)
4806 device_create_file(
4a580877 4807 adev_to_drm(adev)->dev,
4562236b
HW
4808 &dev_attr_s3_debug);
4809#endif
d09ef243 4810 adev->dc_enabled = true;
4562236b 4811
a7ab3451 4812 return dm_init_microcode(adev);
4562236b
HW
4813}
4814
e7b07cee
HW
4815static bool modereset_required(struct drm_crtc_state *crtc_state)
4816{
2afda735 4817 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4818}
4819
7578ecda 4820static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4821{
4822 drm_encoder_cleanup(encoder);
4823 kfree(encoder);
4824}
4825
4826static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4827 .destroy = amdgpu_dm_encoder_destroy,
4828};
4829
5d945cbc
RS
4830static int
4831fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4832 const enum surface_pixel_format format,
4833 enum dc_color_space *color_space)
6300b3bd 4834{
5d945cbc 4835 bool full_range;
6300b3bd 4836
5d945cbc
RS
4837 *color_space = COLOR_SPACE_SRGB;
4838
4839 /* DRM color properties only affect non-RGB formats. */
4840 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4841 return 0;
4842
4843 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4844
4845 switch (plane_state->color_encoding) {
4846 case DRM_COLOR_YCBCR_BT601:
4847 if (full_range)
4848 *color_space = COLOR_SPACE_YCBCR601;
4849 else
4850 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
6300b3bd
MK
4851 break;
4852
5d945cbc
RS
4853 case DRM_COLOR_YCBCR_BT709:
4854 if (full_range)
4855 *color_space = COLOR_SPACE_YCBCR709;
4856 else
4857 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
6300b3bd
MK
4858 break;
4859
5d945cbc
RS
4860 case DRM_COLOR_YCBCR_BT2020:
4861 if (full_range)
4862 *color_space = COLOR_SPACE_2020_YCBCR;
4863 else
4864 return -EINVAL;
6300b3bd 4865 break;
6300b3bd 4866
5d945cbc
RS
4867 default:
4868 return -EINVAL;
4869 }
6300b3bd 4870
5d945cbc 4871 return 0;
6300b3bd
MK
4872}
4873
5d945cbc
RS
4874static int
4875fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4876 const struct drm_plane_state *plane_state,
ae67558b 4877 const u64 tiling_flags,
5d945cbc
RS
4878 struct dc_plane_info *plane_info,
4879 struct dc_plane_address *address,
4880 bool tmz_surface,
4881 bool force_disable_dcc)
e7b07cee 4882{
5d945cbc
RS
4883 const struct drm_framebuffer *fb = plane_state->fb;
4884 const struct amdgpu_framebuffer *afb =
4885 to_amdgpu_framebuffer(plane_state->fb);
4886 int ret;
e7b07cee 4887
5d945cbc 4888 memset(plane_info, 0, sizeof(*plane_info));
e7b07cee 4889
5d945cbc
RS
4890 switch (fb->format->format) {
4891 case DRM_FORMAT_C8:
4892 plane_info->format =
4893 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4894 break;
4895 case DRM_FORMAT_RGB565:
4896 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4897 break;
4898 case DRM_FORMAT_XRGB8888:
4899 case DRM_FORMAT_ARGB8888:
4900 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4901 break;
4902 case DRM_FORMAT_XRGB2101010:
4903 case DRM_FORMAT_ARGB2101010:
4904 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4905 break;
4906 case DRM_FORMAT_XBGR2101010:
4907 case DRM_FORMAT_ABGR2101010:
4908 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4909 break;
4910 case DRM_FORMAT_XBGR8888:
4911 case DRM_FORMAT_ABGR8888:
4912 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4913 break;
4914 case DRM_FORMAT_NV21:
4915 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4916 break;
4917 case DRM_FORMAT_NV12:
4918 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4919 break;
4920 case DRM_FORMAT_P010:
4921 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4922 break;
4923 case DRM_FORMAT_XRGB16161616F:
4924 case DRM_FORMAT_ARGB16161616F:
4925 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4926 break;
4927 case DRM_FORMAT_XBGR16161616F:
4928 case DRM_FORMAT_ABGR16161616F:
4929 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4930 break;
4931 case DRM_FORMAT_XRGB16161616:
4932 case DRM_FORMAT_ARGB16161616:
4933 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4934 break;
4935 case DRM_FORMAT_XBGR16161616:
4936 case DRM_FORMAT_ABGR16161616:
4937 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4938 break;
4939 default:
4940 DRM_ERROR(
4941 "Unsupported screen format %p4cc\n",
4942 &fb->format->format);
d89f6048 4943 return -EINVAL;
5d945cbc 4944 }
d89f6048 4945
5d945cbc
RS
4946 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4947 case DRM_MODE_ROTATE_0:
4948 plane_info->rotation = ROTATION_ANGLE_0;
4949 break;
4950 case DRM_MODE_ROTATE_90:
4951 plane_info->rotation = ROTATION_ANGLE_90;
4952 break;
4953 case DRM_MODE_ROTATE_180:
4954 plane_info->rotation = ROTATION_ANGLE_180;
4955 break;
4956 case DRM_MODE_ROTATE_270:
4957 plane_info->rotation = ROTATION_ANGLE_270;
4958 break;
4959 default:
4960 plane_info->rotation = ROTATION_ANGLE_0;
4961 break;
4962 }
695af5f9 4963
695af5f9 4964
5d945cbc
RS
4965 plane_info->visible = true;
4966 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
e7b07cee 4967
22c42b0e 4968 plane_info->layer_index = plane_state->normalized_zpos;
e7b07cee 4969
5d945cbc
RS
4970 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4971 &plane_info->color_space);
4972 if (ret)
4973 return ret;
e7b07cee 4974
8bf0d9cd 4975 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
5d945cbc
RS
4976 plane_info->rotation, tiling_flags,
4977 &plane_info->tiling_info,
4978 &plane_info->plane_size,
4979 &plane_info->dcc, address,
4980 tmz_surface, force_disable_dcc);
4981 if (ret)
4982 return ret;
e7b07cee 4983
8bf0d9cd 4984 amdgpu_dm_plane_fill_blending_from_plane_state(
5d945cbc
RS
4985 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4986 &plane_info->global_alpha, &plane_info->global_alpha_value);
e7b07cee 4987
5d945cbc
RS
4988 return 0;
4989}
e7b07cee 4990
5d945cbc
RS
4991static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4992 struct dc_plane_state *dc_plane_state,
4993 struct drm_plane_state *plane_state,
4994 struct drm_crtc_state *crtc_state)
4995{
4996 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4997 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4998 struct dc_scaling_info scaling_info;
4999 struct dc_plane_info plane_info;
5000 int ret;
5001 bool force_disable_dcc = false;
6300b3bd 5002
8bf0d9cd 5003 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
5d945cbc
RS
5004 if (ret)
5005 return ret;
e7b07cee 5006
5d945cbc
RS
5007 dc_plane_state->src_rect = scaling_info.src_rect;
5008 dc_plane_state->dst_rect = scaling_info.dst_rect;
5009 dc_plane_state->clip_rect = scaling_info.clip_rect;
5010 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
6491f0c0 5011
5d945cbc
RS
5012 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5013 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5014 afb->tiling_flags,
5015 &plane_info,
5016 &dc_plane_state->address,
5017 afb->tmz_surface,
5018 force_disable_dcc);
5019 if (ret)
5020 return ret;
6491f0c0 5021
5d945cbc
RS
5022 dc_plane_state->format = plane_info.format;
5023 dc_plane_state->color_space = plane_info.color_space;
5024 dc_plane_state->format = plane_info.format;
5025 dc_plane_state->plane_size = plane_info.plane_size;
5026 dc_plane_state->rotation = plane_info.rotation;
5027 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5028 dc_plane_state->stereo_format = plane_info.stereo_format;
5029 dc_plane_state->tiling_info = plane_info.tiling_info;
5030 dc_plane_state->visible = plane_info.visible;
5031 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5032 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5033 dc_plane_state->global_alpha = plane_info.global_alpha;
5034 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5035 dc_plane_state->dcc = plane_info.dcc;
22c42b0e 5036 dc_plane_state->layer_index = plane_info.layer_index;
5d945cbc 5037 dc_plane_state->flip_int_enabled = true;
6491f0c0 5038
695af5f9 5039 /*
5d945cbc
RS
5040 * Always set input transfer function, since plane state is refreshed
5041 * every time.
695af5f9 5042 */
5d945cbc
RS
5043 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5044 if (ret)
5045 return ret;
e7b07cee 5046
695af5f9 5047 return 0;
4562236b 5048}
695af5f9 5049
30ebe415
HM
5050static inline void fill_dc_dirty_rect(struct drm_plane *plane,
5051 struct rect *dirty_rect, int32_t x,
ae67558b 5052 s32 y, s32 width, s32 height,
30ebe415
HM
5053 int *i, bool ffu)
5054{
5055 if (*i > DC_MAX_DIRTY_RECTS)
5056 return;
5057
5058 if (*i == DC_MAX_DIRTY_RECTS)
5059 goto out;
5060
5061 dirty_rect->x = x;
5062 dirty_rect->y = y;
5063 dirty_rect->width = width;
5064 dirty_rect->height = height;
5065
5066 if (ffu)
5067 drm_dbg(plane->dev,
5068 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5069 plane->base.id, width, height);
5070 else
5071 drm_dbg(plane->dev,
5072 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
5073 plane->base.id, x, y, width, height);
5074
5075out:
5076 (*i)++;
5077}
5078
5d945cbc
RS
5079/**
5080 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5081 *
5082 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5083 * remote fb
5084 * @old_plane_state: Old state of @plane
5085 * @new_plane_state: New state of @plane
5086 * @crtc_state: New state of CRTC connected to the @plane
5087 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
d6ed6d0d 5088 * @dirty_regions_changed: dirty regions changed
5d945cbc
RS
5089 *
5090 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5091 * (referred to as "damage clips" in DRM nomenclature) that require updating on
5092 * the eDP remote buffer. The responsibility of specifying the dirty regions is
5093 * amdgpu_dm's.
5094 *
5095 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5096 * plane with regions that require flushing to the eDP remote buffer. In
5097 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5098 * implicitly provide damage clips without any client support via the plane
5099 * bounds.
5d945cbc
RS
5100 */
5101static void fill_dc_dirty_rects(struct drm_plane *plane,
5102 struct drm_plane_state *old_plane_state,
5103 struct drm_plane_state *new_plane_state,
5104 struct drm_crtc_state *crtc_state,
d6ed6d0d
TC
5105 struct dc_flip_addrs *flip_addrs,
5106 bool *dirty_regions_changed)
5d945cbc
RS
5107{
5108 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5109 struct rect *dirty_rects = flip_addrs->dirty_rects;
ae67558b 5110 u32 num_clips;
30ebe415 5111 struct drm_mode_rect *clips;
5d945cbc
RS
5112 bool bb_changed;
5113 bool fb_changed;
ae67558b 5114 u32 i = 0;
d6ed6d0d 5115 *dirty_regions_changed = false;
e7b07cee 5116
7cc191ee
LL
5117 /*
5118 * Cursor plane has it's own dirty rect update interface. See
5119 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5120 */
5121 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5122 return;
5123
30ebe415
HM
5124 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5125 clips = drm_plane_get_damage_clips(new_plane_state);
5126
7cc191ee 5127 if (!dm_crtc_state->mpo_requested) {
30ebe415
HM
5128 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
5129 goto ffu;
5130
5131 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
5132 fill_dc_dirty_rect(new_plane_state->plane,
566b6577
BC
5133 &dirty_rects[flip_addrs->dirty_rect_count],
5134 clips->x1, clips->y1,
5135 clips->x2 - clips->x1, clips->y2 - clips->y1,
30ebe415
HM
5136 &flip_addrs->dirty_rect_count,
5137 false);
7cc191ee
LL
5138 return;
5139 }
5140
5141 /*
5142 * MPO is requested. Add entire plane bounding box to dirty rects if
5143 * flipped to or damaged.
5144 *
5145 * If plane is moved or resized, also add old bounding box to dirty
5146 * rects.
5147 */
7cc191ee
LL
5148 fb_changed = old_plane_state->fb->base.id !=
5149 new_plane_state->fb->base.id;
5150 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5151 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5152 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5153 old_plane_state->crtc_h != new_plane_state->crtc_h);
5154
30ebe415
HM
5155 drm_dbg(plane->dev,
5156 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5157 new_plane_state->plane->base.id,
5158 bb_changed, fb_changed, num_clips);
7cc191ee 5159
d6ed6d0d
TC
5160 *dirty_regions_changed = bb_changed;
5161
7cc191ee 5162 if (bb_changed) {
30ebe415
HM
5163 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5164 new_plane_state->crtc_x,
5165 new_plane_state->crtc_y,
5166 new_plane_state->crtc_w,
5167 new_plane_state->crtc_h, &i, false);
5168
5169 /* Add old plane bounding-box if plane is moved or resized */
5170 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5171 old_plane_state->crtc_x,
5172 old_plane_state->crtc_y,
5173 old_plane_state->crtc_w,
5174 old_plane_state->crtc_h, &i, false);
5175 }
5176
5177 if (num_clips) {
5178 for (; i < num_clips; clips++)
5179 fill_dc_dirty_rect(new_plane_state->plane,
5180 &dirty_rects[i], clips->x1,
5181 clips->y1, clips->x2 - clips->x1,
5182 clips->y2 - clips->y1, &i, false);
5183 } else if (fb_changed && !bb_changed) {
5184 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5185 new_plane_state->crtc_x,
5186 new_plane_state->crtc_y,
5187 new_plane_state->crtc_w,
5188 new_plane_state->crtc_h, &i, false);
5189 }
5190
5191 if (i > DC_MAX_DIRTY_RECTS)
5192 goto ffu;
7cc191ee
LL
5193
5194 flip_addrs->dirty_rect_count = i;
30ebe415
HM
5195 return;
5196
5197ffu:
5198 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5199 dm_crtc_state->base.mode.crtc_hdisplay,
5200 dm_crtc_state->base.mode.crtc_vdisplay,
5201 &flip_addrs->dirty_rect_count, true);
7cc191ee
LL
5202}
5203
3ee6b26b
AD
5204static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5205 const struct dm_connector_state *dm_state,
5206 struct dc_stream_state *stream)
e7b07cee
HW
5207{
5208 enum amdgpu_rmx_type rmx_type;
5209
5210 struct rect src = { 0 }; /* viewport in composition space*/
5211 struct rect dst = { 0 }; /* stream addressable area */
5212
5213 /* no mode. nothing to be done */
5214 if (!mode)
5215 return;
5216
5217 /* Full screen scaling by default */
5218 src.width = mode->hdisplay;
5219 src.height = mode->vdisplay;
5220 dst.width = stream->timing.h_addressable;
5221 dst.height = stream->timing.v_addressable;
5222
f4791779
HW
5223 if (dm_state) {
5224 rmx_type = dm_state->scaling;
5225 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5226 if (src.width * dst.height <
5227 src.height * dst.width) {
5228 /* height needs less upscaling/more downscaling */
5229 dst.width = src.width *
5230 dst.height / src.height;
5231 } else {
5232 /* width needs less upscaling/more downscaling */
5233 dst.height = src.height *
5234 dst.width / src.width;
5235 }
5236 } else if (rmx_type == RMX_CENTER) {
5237 dst = src;
e7b07cee 5238 }
e7b07cee 5239
f4791779
HW
5240 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5241 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5242
f4791779
HW
5243 if (dm_state->underscan_enable) {
5244 dst.x += dm_state->underscan_hborder / 2;
5245 dst.y += dm_state->underscan_vborder / 2;
5246 dst.width -= dm_state->underscan_hborder;
5247 dst.height -= dm_state->underscan_vborder;
5248 }
e7b07cee
HW
5249 }
5250
5251 stream->src = src;
5252 stream->dst = dst;
5253
4711c033
LT
5254 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5255 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5256
5257}
5258
3ee6b26b 5259static enum dc_color_depth
42ba01fc 5260convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5261 bool is_y420, int requested_bpc)
e7b07cee 5262{
ae67558b 5263 u8 bpc;
01c22997 5264
1bc22f20
SW
5265 if (is_y420) {
5266 bpc = 8;
5267
5268 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5269 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5270 bpc = 16;
5271 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5272 bpc = 12;
5273 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5274 bpc = 10;
5275 } else {
5276 bpc = (uint8_t)connector->display_info.bpc;
5277 /* Assume 8 bpc by default if no bpc is specified. */
5278 bpc = bpc ? bpc : 8;
5279 }
e7b07cee 5280
cbd14ae7 5281 if (requested_bpc > 0) {
01c22997
NK
5282 /*
5283 * Cap display bpc based on the user requested value.
5284 *
5285 * The value for state->max_bpc may not correctly updated
5286 * depending on when the connector gets added to the state
5287 * or if this was called outside of atomic check, so it
5288 * can't be used directly.
5289 */
cbd14ae7 5290 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5291
1825fd34
NK
5292 /* Round down to the nearest even number. */
5293 bpc = bpc - (bpc & 1);
5294 }
07e3a1cf 5295
e7b07cee
HW
5296 switch (bpc) {
5297 case 0:
1f6010a9
DF
5298 /*
5299 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5300 * EDID revision before 1.4
5301 * TODO: Fix edid parsing
5302 */
5303 return COLOR_DEPTH_888;
5304 case 6:
5305 return COLOR_DEPTH_666;
5306 case 8:
5307 return COLOR_DEPTH_888;
5308 case 10:
5309 return COLOR_DEPTH_101010;
5310 case 12:
5311 return COLOR_DEPTH_121212;
5312 case 14:
5313 return COLOR_DEPTH_141414;
5314 case 16:
5315 return COLOR_DEPTH_161616;
5316 default:
5317 return COLOR_DEPTH_UNDEFINED;
5318 }
5319}
5320
3ee6b26b
AD
5321static enum dc_aspect_ratio
5322get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5323{
e11d4147
LSL
5324 /* 1-1 mapping, since both enums follow the HDMI spec. */
5325 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5326}
5327
3ee6b26b
AD
5328static enum dc_color_space
5329get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5330{
5331 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5332
5333 switch (dc_crtc_timing->pixel_encoding) {
5334 case PIXEL_ENCODING_YCBCR422:
5335 case PIXEL_ENCODING_YCBCR444:
5336 case PIXEL_ENCODING_YCBCR420:
5337 {
5338 /*
5339 * 27030khz is the separation point between HDTV and SDTV
5340 * according to HDMI spec, we use YCbCr709 and YCbCr601
5341 * respectively
5342 */
380604e2 5343 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5344 if (dc_crtc_timing->flags.Y_ONLY)
5345 color_space =
5346 COLOR_SPACE_YCBCR709_LIMITED;
5347 else
5348 color_space = COLOR_SPACE_YCBCR709;
5349 } else {
5350 if (dc_crtc_timing->flags.Y_ONLY)
5351 color_space =
5352 COLOR_SPACE_YCBCR601_LIMITED;
5353 else
5354 color_space = COLOR_SPACE_YCBCR601;
5355 }
5356
5357 }
5358 break;
5359 case PIXEL_ENCODING_RGB:
5360 color_space = COLOR_SPACE_SRGB;
5361 break;
5362
5363 default:
5364 WARN_ON(1);
5365 break;
5366 }
5367
5368 return color_space;
5369}
5370
ea117312
TA
5371static bool adjust_colour_depth_from_display_info(
5372 struct dc_crtc_timing *timing_out,
5373 const struct drm_display_info *info)
400443e8 5374{
ea117312 5375 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5376 int normalized_clk;
400443e8 5377 do {
380604e2 5378 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5379 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5380 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5381 normalized_clk /= 2;
5382 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5383 switch (depth) {
5384 case COLOR_DEPTH_888:
5385 break;
400443e8
ML
5386 case COLOR_DEPTH_101010:
5387 normalized_clk = (normalized_clk * 30) / 24;
5388 break;
5389 case COLOR_DEPTH_121212:
5390 normalized_clk = (normalized_clk * 36) / 24;
5391 break;
5392 case COLOR_DEPTH_161616:
5393 normalized_clk = (normalized_clk * 48) / 24;
5394 break;
5395 default:
ea117312
TA
5396 /* The above depths are the only ones valid for HDMI. */
5397 return false;
400443e8 5398 }
ea117312
TA
5399 if (normalized_clk <= info->max_tmds_clock) {
5400 timing_out->display_color_depth = depth;
5401 return true;
5402 }
5403 } while (--depth > COLOR_DEPTH_666);
5404 return false;
400443e8 5405}
e7b07cee 5406
42ba01fc
NK
5407static void fill_stream_properties_from_drm_display_mode(
5408 struct dc_stream_state *stream,
5409 const struct drm_display_mode *mode_in,
5410 const struct drm_connector *connector,
5411 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5412 const struct dc_stream_state *old_stream,
5413 int requested_bpc)
e7b07cee
HW
5414{
5415 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5416 const struct drm_display_info *info = &connector->display_info;
d4252eee 5417 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5418 struct hdmi_vendor_infoframe hv_frame;
5419 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5420
acf83f86
WL
5421 memset(&hv_frame, 0, sizeof(hv_frame));
5422 memset(&avi_frame, 0, sizeof(avi_frame));
5423
e7b07cee
HW
5424 timing_out->h_border_left = 0;
5425 timing_out->h_border_right = 0;
5426 timing_out->v_border_top = 0;
5427 timing_out->v_border_bottom = 0;
5428 /* TODO: un-hardcode */
fe61a2f1 5429 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5430 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5431 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5432 else if (drm_mode_is_420_also(info, mode_in)
5433 && aconnector->force_yuv420_output)
5434 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
c03d0b52 5435 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
ceb3dbb4 5436 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5437 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5438 else
5439 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5440
5441 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5442 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5443 connector,
5444 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5445 requested_bpc);
e7b07cee
HW
5446 timing_out->scan_type = SCANNING_TYPE_NODATA;
5447 timing_out->hdmi_vic = 0;
b333730d 5448
5d945cbc 5449 if (old_stream) {
b333730d
BL
5450 timing_out->vic = old_stream->timing.vic;
5451 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5452 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5453 } else {
5454 timing_out->vic = drm_match_cea_mode(mode_in);
5455 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5456 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5457 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5458 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5459 }
e7b07cee 5460
1cb1d477
WL
5461 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5462 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5463 timing_out->vic = avi_frame.video_code;
5464 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5465 timing_out->hdmi_vic = hv_frame.vic;
5466 }
5467
fe8858bb
NC
5468 if (is_freesync_video_mode(mode_in, aconnector)) {
5469 timing_out->h_addressable = mode_in->hdisplay;
5470 timing_out->h_total = mode_in->htotal;
5471 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5472 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5473 timing_out->v_total = mode_in->vtotal;
5474 timing_out->v_addressable = mode_in->vdisplay;
5475 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5476 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5477 timing_out->pix_clk_100hz = mode_in->clock * 10;
5478 } else {
5479 timing_out->h_addressable = mode_in->crtc_hdisplay;
5480 timing_out->h_total = mode_in->crtc_htotal;
5481 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5482 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5483 timing_out->v_total = mode_in->crtc_vtotal;
5484 timing_out->v_addressable = mode_in->crtc_vdisplay;
5485 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5486 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5487 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5488 }
a85ba005 5489
e7b07cee 5490 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee 5491
e43a432c
AK
5492 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5493 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5494 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5495 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5496 drm_mode_is_420_also(info, mode_in) &&
5497 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5498 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5499 adjust_colour_depth_from_display_info(timing_out, info);
5500 }
5501 }
766f1792
JA
5502
5503 stream->output_color_space = get_output_color_space(timing_out);
e7b07cee
HW
5504}
5505
3ee6b26b
AD
5506static void fill_audio_info(struct audio_info *audio_info,
5507 const struct drm_connector *drm_connector,
5508 const struct dc_sink *dc_sink)
e7b07cee
HW
5509{
5510 int i = 0;
5511 int cea_revision = 0;
5512 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5513
5514 audio_info->manufacture_id = edid_caps->manufacturer_id;
5515 audio_info->product_id = edid_caps->product_id;
5516
5517 cea_revision = drm_connector->display_info.cea_rev;
5518
090afc1e 5519 strscpy(audio_info->display_name,
d2b2562c 5520 edid_caps->display_name,
090afc1e 5521 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5522
b830ebc9 5523 if (cea_revision >= 3) {
e7b07cee
HW
5524 audio_info->mode_count = edid_caps->audio_mode_count;
5525
5526 for (i = 0; i < audio_info->mode_count; ++i) {
5527 audio_info->modes[i].format_code =
5528 (enum audio_format_code)
5529 (edid_caps->audio_modes[i].format_code);
5530 audio_info->modes[i].channel_count =
5531 edid_caps->audio_modes[i].channel_count;
5532 audio_info->modes[i].sample_rates.all =
5533 edid_caps->audio_modes[i].sample_rate;
5534 audio_info->modes[i].sample_size =
5535 edid_caps->audio_modes[i].sample_size;
5536 }
5537 }
5538
5539 audio_info->flags.all = edid_caps->speaker_flags;
5540
5541 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5542 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5543 audio_info->video_latency = drm_connector->video_latency[0];
5544 audio_info->audio_latency = drm_connector->audio_latency[0];
5545 }
5546
5547 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5548
5549}
5550
3ee6b26b
AD
5551static void
5552copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5553 struct drm_display_mode *dst_mode)
e7b07cee
HW
5554{
5555 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5556 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5557 dst_mode->crtc_clock = src_mode->crtc_clock;
5558 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5559 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5560 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5561 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5562 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5563 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5564 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5565 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5566 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5567 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5568 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5569}
5570
3ee6b26b
AD
5571static void
5572decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5573 const struct drm_display_mode *native_mode,
5574 bool scale_enabled)
e7b07cee
HW
5575{
5576 if (scale_enabled) {
5577 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5578 } else if (native_mode->clock == drm_mode->clock &&
5579 native_mode->htotal == drm_mode->htotal &&
5580 native_mode->vtotal == drm_mode->vtotal) {
5581 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5582 } else {
5583 /* no scaling nor amdgpu inserted, no need to patch */
5584 }
5585}
5586
aed15309
ML
5587static struct dc_sink *
5588create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5589{
2e0ac3d6 5590 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5591 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5592 sink_init_data.link = aconnector->dc_link;
5593 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5594
5595 sink = dc_sink_create(&sink_init_data);
423788c7 5596 if (!sink) {
2e0ac3d6 5597 DRM_ERROR("Failed to create sink!\n");
aed15309 5598 return NULL;
423788c7 5599 }
2e0ac3d6 5600 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5601
aed15309 5602 return sink;
2e0ac3d6
HW
5603}
5604
fa2123db
ML
5605static void set_multisync_trigger_params(
5606 struct dc_stream_state *stream)
5607{
ec372186
ML
5608 struct dc_stream_state *master = NULL;
5609
fa2123db 5610 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5611 master = stream->triggered_crtc_reset.event_source;
5612 stream->triggered_crtc_reset.event =
5613 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5614 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5615 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5616 }
5617}
5618
5619static void set_master_stream(struct dc_stream_state *stream_set[],
5620 int stream_count)
5621{
5622 int j, highest_rfr = 0, master_stream = 0;
5623
5624 for (j = 0; j < stream_count; j++) {
5625 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5626 int refresh_rate = 0;
5627
380604e2 5628 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5629 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5630 if (refresh_rate > highest_rfr) {
5631 highest_rfr = refresh_rate;
5632 master_stream = j;
5633 }
5634 }
5635 }
5636 for (j = 0; j < stream_count; j++) {
03736f4c 5637 if (stream_set[j])
fa2123db
ML
5638 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5639 }
5640}
5641
5642static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5643{
5644 int i = 0;
ec372186 5645 struct dc_stream_state *stream;
fa2123db
ML
5646
5647 if (context->stream_count < 2)
5648 return;
5649 for (i = 0; i < context->stream_count ; i++) {
5650 if (!context->streams[i])
5651 continue;
1f6010a9
DF
5652 /*
5653 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5654 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5655 * For now it's set to false
fa2123db 5656 */
fa2123db 5657 }
ec372186 5658
fa2123db 5659 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5660
5661 for (i = 0; i < context->stream_count ; i++) {
5662 stream = context->streams[i];
5663
5664 if (!stream)
5665 continue;
5666
5667 set_multisync_trigger_params(stream);
5668 }
fa2123db
ML
5669}
5670
5d945cbc
RS
5671/**
5672 * DOC: FreeSync Video
5673 *
5674 * When a userspace application wants to play a video, the content follows a
5675 * standard format definition that usually specifies the FPS for that format.
5676 * The below list illustrates some video format and the expected FPS,
5677 * respectively:
5678 *
5679 * - TV/NTSC (23.976 FPS)
5680 * - Cinema (24 FPS)
5681 * - TV/PAL (25 FPS)
5682 * - TV/NTSC (29.97 FPS)
5683 * - TV/NTSC (30 FPS)
5684 * - Cinema HFR (48 FPS)
5685 * - TV/PAL (50 FPS)
5686 * - Commonly used (60 FPS)
5687 * - Multiples of 24 (48,72,96 FPS)
5688 *
5689 * The list of standards video format is not huge and can be added to the
5690 * connector modeset list beforehand. With that, userspace can leverage
5691 * FreeSync to extends the front porch in order to attain the target refresh
5692 * rate. Such a switch will happen seamlessly, without screen blanking or
5693 * reprogramming of the output in any other way. If the userspace requests a
5694 * modesetting change compatible with FreeSync modes that only differ in the
5695 * refresh rate, DC will skip the full update and avoid blink during the
5696 * transition. For example, the video player can change the modesetting from
5697 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5698 * causing any display blink. This same concept can be applied to a mode
5699 * setting change.
5700 */
5701static struct drm_display_mode *
5702get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5703 bool use_probed_modes)
5704{
5705 struct drm_display_mode *m, *m_pref = NULL;
5706 u16 current_refresh, highest_refresh;
5707 struct list_head *list_head = use_probed_modes ?
5708 &aconnector->base.probed_modes :
5709 &aconnector->base.modes;
5710
5711 if (aconnector->freesync_vid_base.clock != 0)
5712 return &aconnector->freesync_vid_base;
5713
5714 /* Find the preferred mode */
5715 list_for_each_entry (m, list_head, head) {
5716 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5717 m_pref = m;
5718 break;
5719 }
5720 }
5721
5722 if (!m_pref) {
5723 /* Probably an EDID with no preferred mode. Fallback to first entry */
5724 m_pref = list_first_entry_or_null(
5725 &aconnector->base.modes, struct drm_display_mode, head);
5726 if (!m_pref) {
5727 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5728 return NULL;
5729 }
5730 }
5731
5732 highest_refresh = drm_mode_vrefresh(m_pref);
5733
5734 /*
5735 * Find the mode with highest refresh rate with same resolution.
5736 * For some monitors, preferred mode is not the mode with highest
5737 * supported refresh rate.
5738 */
5739 list_for_each_entry (m, list_head, head) {
5740 current_refresh = drm_mode_vrefresh(m);
5741
5742 if (m->hdisplay == m_pref->hdisplay &&
5743 m->vdisplay == m_pref->vdisplay &&
5744 highest_refresh < current_refresh) {
5745 highest_refresh = current_refresh;
5746 m_pref = m;
5747 }
5748 }
5749
5750 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5751 return m_pref;
5752}
5753
5754static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5755 struct amdgpu_dm_connector *aconnector)
5756{
5757 struct drm_display_mode *high_mode;
5758 int timing_diff;
5759
5760 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5761 if (!high_mode || !mode)
5762 return false;
5763
5764 timing_diff = high_mode->vtotal - mode->vtotal;
5765
5766 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5767 high_mode->hdisplay != mode->hdisplay ||
5768 high_mode->vdisplay != mode->vdisplay ||
5769 high_mode->hsync_start != mode->hsync_start ||
5770 high_mode->hsync_end != mode->hsync_end ||
5771 high_mode->htotal != mode->htotal ||
5772 high_mode->hskew != mode->hskew ||
5773 high_mode->vscan != mode->vscan ||
5774 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5775 high_mode->vsync_end - mode->vsync_end != timing_diff)
5776 return false;
5777 else
5778 return true;
5779}
5780
998b7ad2 5781static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5d945cbc
RS
5782 struct dc_sink *sink, struct dc_stream_state *stream,
5783 struct dsc_dec_dpcd_caps *dsc_caps)
998b7ad2
FZ
5784{
5785 stream->timing.flags.DSC = 0;
63ad5371 5786 dsc_caps->is_dsc_supported = false;
998b7ad2 5787
2665f63a 5788 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5d945cbc 5789 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
5790 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5791 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5792 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5793 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5794 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5795 dsc_caps);
998b7ad2
FZ
5796 }
5797}
5798
5d945cbc 5799
2665f63a
ML
5800static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5801 struct dc_sink *sink, struct dc_stream_state *stream,
5802 struct dsc_dec_dpcd_caps *dsc_caps,
5803 uint32_t max_dsc_target_bpp_limit_override)
5804{
5805 const struct dc_link_settings *verified_link_cap = NULL;
ae67558b
SS
5806 u32 link_bw_in_kbps;
5807 u32 edp_min_bpp_x16, edp_max_bpp_x16;
2665f63a
ML
5808 struct dc *dc = sink->ctx->dc;
5809 struct dc_dsc_bw_range bw_range = {0};
5810 struct dc_dsc_config dsc_cfg = {0};
de534c1c
MH
5811 struct dc_dsc_config_options dsc_options = {0};
5812
5813 dc_dsc_get_default_config_option(dc, &dsc_options);
5814 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
2665f63a
ML
5815
5816 verified_link_cap = dc_link_get_link_cap(stream->link);
5817 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5818 edp_min_bpp_x16 = 8 * 16;
5819 edp_max_bpp_x16 = 8 * 16;
5820
5821 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5822 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5823
5824 if (edp_max_bpp_x16 < edp_min_bpp_x16)
5825 edp_min_bpp_x16 = edp_max_bpp_x16;
5826
5827 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5828 dc->debug.dsc_min_slice_height_override,
5829 edp_min_bpp_x16, edp_max_bpp_x16,
5830 dsc_caps,
5831 &stream->timing,
5832 &bw_range)) {
5833
5834 if (bw_range.max_kbps < link_bw_in_kbps) {
5835 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5836 dsc_caps,
de534c1c 5837 &dsc_options,
2665f63a
ML
5838 0,
5839 &stream->timing,
5840 &dsc_cfg)) {
5841 stream->timing.dsc_cfg = dsc_cfg;
5842 stream->timing.flags.DSC = 1;
5843 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5844 }
5845 return;
5846 }
5847 }
5848
5849 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5850 dsc_caps,
de534c1c 5851 &dsc_options,
2665f63a
ML
5852 link_bw_in_kbps,
5853 &stream->timing,
5854 &dsc_cfg)) {
5855 stream->timing.dsc_cfg = dsc_cfg;
5856 stream->timing.flags.DSC = 1;
5857 }
5858}
5859
5d945cbc 5860
998b7ad2 5861static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5d945cbc
RS
5862 struct dc_sink *sink, struct dc_stream_state *stream,
5863 struct dsc_dec_dpcd_caps *dsc_caps)
998b7ad2
FZ
5864{
5865 struct drm_connector *drm_connector = &aconnector->base;
ae67558b 5866 u32 link_bandwidth_kbps;
2665f63a 5867 struct dc *dc = sink->ctx->dc;
ae67558b
SS
5868 u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
5869 u32 dsc_max_supported_bw_in_kbps;
5870 u32 max_dsc_target_bpp_limit_override =
6e5abe94 5871 drm_connector->display_info.max_dsc_bpp;
de534c1c
MH
5872 struct dc_dsc_config_options dsc_options = {0};
5873
5874 dc_dsc_get_default_config_option(dc, &dsc_options);
5875 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
998b7ad2
FZ
5876
5877 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5878 dc_link_get_link_cap(aconnector->dc_link));
de7cc1b4 5879
998b7ad2
FZ
5880 /* Set DSC policy according to dsc_clock_en */
5881 dc_dsc_policy_set_enable_dsc_when_not_needed(
5882 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5883
c17a34e0
IC
5884 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5885 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
2665f63a
ML
5886 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5887
5888 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5889
5890 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
5891 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5892 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2 5893 dsc_caps,
de534c1c 5894 &dsc_options,
998b7ad2
FZ
5895 link_bandwidth_kbps,
5896 &stream->timing,
5897 &stream->timing.dsc_cfg)) {
50b1f44e 5898 stream->timing.flags.DSC = 1;
5d945cbc 5899 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
50b1f44e
FZ
5900 }
5901 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5902 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5903 max_supported_bw_in_kbps = link_bandwidth_kbps;
5904 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5905
5906 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5907 max_supported_bw_in_kbps > 0 &&
5908 dsc_max_supported_bw_in_kbps > 0)
5909 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5910 dsc_caps,
de534c1c 5911 &dsc_options,
50b1f44e
FZ
5912 dsc_max_supported_bw_in_kbps,
5913 &stream->timing,
5914 &stream->timing.dsc_cfg)) {
5915 stream->timing.flags.DSC = 1;
5916 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5917 __func__, drm_connector->name);
5918 }
998b7ad2
FZ
5919 }
5920 }
5921
5922 /* Overwrite the stream flag if DSC is enabled through debugfs */
5923 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5924 stream->timing.flags.DSC = 1;
5925
5d945cbc
RS
5926 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5927 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
a85ba005 5928
5d945cbc
RS
5929 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5930 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
a85ba005 5931
5d945cbc
RS
5932 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5933 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
a85ba005
NC
5934}
5935
f11d9373 5936static struct dc_stream_state *
3ee6b26b
AD
5937create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5938 const struct drm_display_mode *drm_mode,
b333730d 5939 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5940 const struct dc_stream_state *old_stream,
5941 int requested_bpc)
e7b07cee
HW
5942{
5943 struct drm_display_mode *preferred_mode = NULL;
391ef035 5944 struct drm_connector *drm_connector;
42ba01fc
NK
5945 const struct drm_connector_state *con_state =
5946 dm_state ? &dm_state->base : NULL;
0971c40e 5947 struct dc_stream_state *stream = NULL;
0a204ce0 5948 struct drm_display_mode mode;
a85ba005
NC
5949 struct drm_display_mode saved_mode;
5950 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5951 bool native_mode_found = false;
b0781603
NK
5952 bool recalculate_timing = false;
5953 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5954 int mode_refresh;
58124bf8 5955 int preferred_refresh = 0;
b1a98cf8 5956 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
df2f1015 5957 struct dsc_dec_dpcd_caps dsc_caps;
5d945cbc 5958
aed15309 5959 struct dc_sink *sink = NULL;
a85ba005 5960
0a204ce0 5961 drm_mode_init(&mode, drm_mode);
a85ba005
NC
5962 memset(&saved_mode, 0, sizeof(saved_mode));
5963
b830ebc9 5964 if (aconnector == NULL) {
e7b07cee 5965 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5966 return stream;
e7b07cee
HW
5967 }
5968
e7b07cee 5969 drm_connector = &aconnector->base;
2e0ac3d6 5970
f4ac176e 5971 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5972 sink = create_fake_sink(aconnector);
5973 if (!sink)
5974 return stream;
aed15309
ML
5975 } else {
5976 sink = aconnector->dc_sink;
dcd5fb82 5977 dc_sink_retain(sink);
f4ac176e 5978 }
2e0ac3d6 5979
aed15309 5980 stream = dc_create_stream_for_sink(sink);
4562236b 5981
b830ebc9 5982 if (stream == NULL) {
e7b07cee 5983 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5984 goto finish;
e7b07cee
HW
5985 }
5986
ceb3dbb4
JL
5987 stream->dm_stream_context = aconnector;
5988
4a36fcba
WL
5989 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5990 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5991
e7b07cee
HW
5992 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5993 /* Search for preferred mode */
5994 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5995 native_mode_found = true;
5996 break;
5997 }
5998 }
5999 if (!native_mode_found)
6000 preferred_mode = list_first_entry_or_null(
6001 &aconnector->base.modes,
6002 struct drm_display_mode,
6003 head);
6004
b333730d
BL
6005 mode_refresh = drm_mode_vrefresh(&mode);
6006
b830ebc9 6007 if (preferred_mode == NULL) {
1f6010a9
DF
6008 /*
6009 * This may not be an error, the use case is when we have no
e7b07cee
HW
6010 * usermode calls to reset and set mode upon hotplug. In this
6011 * case, we call set mode ourselves to restore the previous mode
6012 * and the modelist may not be filled in in time.
6013 */
f1ad2f5e 6014 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6015 } else {
4243c84a
MD
6016 recalculate_timing = amdgpu_freesync_vid_mode &&
6017 is_freesync_video_mode(&mode, aconnector);
a85ba005
NC
6018 if (recalculate_timing) {
6019 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
426c89aa
VS
6020 drm_mode_copy(&saved_mode, &mode);
6021 drm_mode_copy(&mode, freesync_mode);
a85ba005
NC
6022 } else {
6023 decide_crtc_timing_for_drm_display_mode(
5d945cbc 6024 &mode, preferred_mode, scale);
a85ba005 6025
b0781603
NK
6026 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6027 }
e7b07cee
HW
6028 }
6029
a85ba005
NC
6030 if (recalculate_timing)
6031 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6032 else if (!dm_state)
f783577c
JFZ
6033 drm_mode_set_crtcinfo(&mode, 0);
6034
5d945cbc 6035 /*
b333730d
BL
6036 * If scaling is enabled and refresh rate didn't change
6037 * we copy the vic and polarities of the old timings
6038 */
b0781603 6039 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6040 fill_stream_properties_from_drm_display_mode(
6041 stream, &mode, &aconnector->base, con_state, NULL,
6042 requested_bpc);
b333730d 6043 else
a85ba005
NC
6044 fill_stream_properties_from_drm_display_mode(
6045 stream, &mode, &aconnector->base, con_state, old_stream,
6046 requested_bpc);
b333730d 6047
028c4ccf
QZ
6048 if (aconnector->timing_changed) {
6049 DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
6050 __func__,
6051 stream->timing.display_color_depth,
6052 aconnector->timing_requested->display_color_depth);
6053 stream->timing = *aconnector->timing_requested;
6054 }
6055
998b7ad2
FZ
6056 /* SST DSC determination policy */
6057 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6058 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6059 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85 6060
e7b07cee
HW
6061 update_stream_scaling_settings(&mode, dm_state, stream);
6062
6063 fill_audio_info(
6064 &stream->audio_info,
6065 drm_connector,
aed15309 6066 sink);
e7b07cee 6067
ceb3dbb4 6068 update_stream_signal(stream, sink);
9182b4cb 6069
d832fc3b 6070 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6071 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6072
8a488f5d
RL
6073 if (stream->link->psr_settings.psr_feature_enabled) {
6074 //
6075 // should decide stream support vsc sdp colorimetry capability
6076 // before building vsc info packet
6077 //
6078 stream->use_vsc_sdp_for_colorimetry = false;
6079 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6080 stream->use_vsc_sdp_for_colorimetry =
6081 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6082 } else {
6083 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6084 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6085 }
b1a98cf8
MH
6086 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
6087 tf = TRANSFER_FUNC_GAMMA_22;
6088 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
1a365683
RL
6089 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6090
8c322309 6091 }
aed15309 6092finish:
dcd5fb82 6093 dc_sink_release(sink);
9e3efe3e 6094
e7b07cee
HW
6095 return stream;
6096}
6097
e7b07cee
HW
6098static enum drm_connector_status
6099amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6100{
6101 bool connected;
c84dec2f 6102 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6103
1f6010a9
DF
6104 /*
6105 * Notes:
e7b07cee
HW
6106 * 1. This interface is NOT called in context of HPD irq.
6107 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6108 * makes it a bad place for *any* MST-related activity.
6109 */
e7b07cee 6110
8580d60b
HW
6111 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6112 !aconnector->fake_enable)
e7b07cee
HW
6113 connected = (aconnector->dc_sink != NULL);
6114 else
5d945cbc
RS
6115 connected = (aconnector->base.force == DRM_FORCE_ON ||
6116 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
e7b07cee 6117
0f877894
OV
6118 update_subconnector_property(aconnector);
6119
e7b07cee
HW
6120 return (connected ? connector_status_connected :
6121 connector_status_disconnected);
6122}
6123
3ee6b26b
AD
6124int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6125 struct drm_connector_state *connector_state,
6126 struct drm_property *property,
6127 uint64_t val)
e7b07cee
HW
6128{
6129 struct drm_device *dev = connector->dev;
1348969a 6130 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6131 struct dm_connector_state *dm_old_state =
6132 to_dm_connector_state(connector->state);
6133 struct dm_connector_state *dm_new_state =
6134 to_dm_connector_state(connector_state);
6135
6136 int ret = -EINVAL;
6137
6138 if (property == dev->mode_config.scaling_mode_property) {
6139 enum amdgpu_rmx_type rmx_type;
6140
6141 switch (val) {
6142 case DRM_MODE_SCALE_CENTER:
6143 rmx_type = RMX_CENTER;
6144 break;
6145 case DRM_MODE_SCALE_ASPECT:
6146 rmx_type = RMX_ASPECT;
6147 break;
6148 case DRM_MODE_SCALE_FULLSCREEN:
6149 rmx_type = RMX_FULL;
6150 break;
6151 case DRM_MODE_SCALE_NONE:
6152 default:
6153 rmx_type = RMX_OFF;
6154 break;
6155 }
6156
6157 if (dm_old_state->scaling == rmx_type)
6158 return 0;
6159
6160 dm_new_state->scaling = rmx_type;
6161 ret = 0;
6162 } else if (property == adev->mode_info.underscan_hborder_property) {
6163 dm_new_state->underscan_hborder = val;
6164 ret = 0;
6165 } else if (property == adev->mode_info.underscan_vborder_property) {
6166 dm_new_state->underscan_vborder = val;
6167 ret = 0;
6168 } else if (property == adev->mode_info.underscan_property) {
6169 dm_new_state->underscan_enable = val;
6170 ret = 0;
c1ee92f9
DF
6171 } else if (property == adev->mode_info.abm_level_property) {
6172 dm_new_state->abm_level = val;
6173 ret = 0;
e7b07cee
HW
6174 }
6175
6176 return ret;
6177}
6178
3ee6b26b
AD
6179int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6180 const struct drm_connector_state *state,
6181 struct drm_property *property,
6182 uint64_t *val)
e7b07cee
HW
6183{
6184 struct drm_device *dev = connector->dev;
1348969a 6185 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6186 struct dm_connector_state *dm_state =
6187 to_dm_connector_state(state);
6188 int ret = -EINVAL;
6189
6190 if (property == dev->mode_config.scaling_mode_property) {
6191 switch (dm_state->scaling) {
6192 case RMX_CENTER:
6193 *val = DRM_MODE_SCALE_CENTER;
6194 break;
6195 case RMX_ASPECT:
6196 *val = DRM_MODE_SCALE_ASPECT;
6197 break;
6198 case RMX_FULL:
6199 *val = DRM_MODE_SCALE_FULLSCREEN;
6200 break;
6201 case RMX_OFF:
6202 default:
6203 *val = DRM_MODE_SCALE_NONE;
6204 break;
6205 }
6206 ret = 0;
6207 } else if (property == adev->mode_info.underscan_hborder_property) {
6208 *val = dm_state->underscan_hborder;
6209 ret = 0;
6210 } else if (property == adev->mode_info.underscan_vborder_property) {
6211 *val = dm_state->underscan_vborder;
6212 ret = 0;
6213 } else if (property == adev->mode_info.underscan_property) {
6214 *val = dm_state->underscan_enable;
6215 ret = 0;
c1ee92f9
DF
6216 } else if (property == adev->mode_info.abm_level_property) {
6217 *val = dm_state->abm_level;
6218 ret = 0;
e7b07cee 6219 }
c1ee92f9 6220
e7b07cee
HW
6221 return ret;
6222}
6223
526c654a
ED
6224static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6225{
6226 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6227
6228 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6229}
6230
7578ecda 6231static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6232{
c84dec2f 6233 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1348969a 6234 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6235 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 6236
5dff80bd 6237 /*
5d945cbc 6238 * Call only if mst_mgr was initialized before since it's not done
5dff80bd
AG
6239 * for all connector types.
6240 */
6241 if (aconnector->mst_mgr.dev)
6242 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6243
f196198c
HG
6244 if (aconnector->bl_idx != -1) {
6245 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
6246 dm->backlight_dev[aconnector->bl_idx] = NULL;
e7b07cee 6247 }
dcd5fb82
MF
6248
6249 if (aconnector->dc_em_sink)
6250 dc_sink_release(aconnector->dc_em_sink);
6251 aconnector->dc_em_sink = NULL;
6252 if (aconnector->dc_sink)
6253 dc_sink_release(aconnector->dc_sink);
6254 aconnector->dc_sink = NULL;
6255
e86e8947 6256 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6257 drm_connector_unregister(connector);
6258 drm_connector_cleanup(connector);
526c654a
ED
6259 if (aconnector->i2c) {
6260 i2c_del_adapter(&aconnector->i2c->base);
6261 kfree(aconnector->i2c);
6262 }
7daec99f 6263 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6264
e7b07cee
HW
6265 kfree(connector);
6266}
6267
6268void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6269{
6270 struct dm_connector_state *state =
6271 to_dm_connector_state(connector->state);
6272
df099b9b
LSL
6273 if (connector->state)
6274 __drm_atomic_helper_connector_destroy_state(connector->state);
6275
e7b07cee
HW
6276 kfree(state);
6277
6278 state = kzalloc(sizeof(*state), GFP_KERNEL);
6279
6280 if (state) {
6281 state->scaling = RMX_OFF;
6282 state->underscan_enable = false;
6283 state->underscan_hborder = 0;
6284 state->underscan_vborder = 0;
01933ba4 6285 state->base.max_requested_bpc = 8;
3261e013
ML
6286 state->vcpi_slots = 0;
6287 state->pbn = 0;
5d945cbc 6288
c3e50f89
NK
6289 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6290 state->abm_level = amdgpu_dm_abm_level;
6291
df099b9b 6292 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6293 }
6294}
6295
3ee6b26b
AD
6296struct drm_connector_state *
6297amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6298{
6299 struct dm_connector_state *state =
6300 to_dm_connector_state(connector->state);
6301
6302 struct dm_connector_state *new_state =
6303 kmemdup(state, sizeof(*state), GFP_KERNEL);
6304
98e6436d
AK
6305 if (!new_state)
6306 return NULL;
e7b07cee 6307
98e6436d
AK
6308 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6309
6310 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6311 new_state->abm_level = state->abm_level;
922454c2
NK
6312 new_state->scaling = state->scaling;
6313 new_state->underscan_enable = state->underscan_enable;
6314 new_state->underscan_hborder = state->underscan_hborder;
6315 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6316 new_state->vcpi_slots = state->vcpi_slots;
6317 new_state->pbn = state->pbn;
98e6436d 6318 return &new_state->base;
e7b07cee
HW
6319}
6320
14f04fa4
AD
6321static int
6322amdgpu_dm_connector_late_register(struct drm_connector *connector)
6323{
6324 struct amdgpu_dm_connector *amdgpu_dm_connector =
6325 to_amdgpu_dm_connector(connector);
00a8037e 6326 int r;
14f04fa4 6327
62f03dad
HG
6328 amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
6329
00a8037e
AD
6330 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6331 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6332 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6333 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6334 if (r)
6335 return r;
6336 }
6337
6338#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6339 connector_debugfs_init(amdgpu_dm_connector);
6340#endif
6341
6342 return 0;
6343}
6344
dae343b3 6345static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
0ba4a784
AH
6346{
6347 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6348 struct dc_link *dc_link = aconnector->dc_link;
6349 struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
6350 struct edid *edid;
6351
6352 if (!connector->edid_override)
6353 return;
6354
6355 drm_edid_override_connector_update(&aconnector->base);
6356 edid = aconnector->base.edid_blob_ptr->data;
6357 aconnector->edid = edid;
6358
6359 /* Update emulated (virtual) sink's EDID */
6360 if (dc_em_sink && dc_link) {
6361 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
6362 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
6363 dm_helpers_parse_edid_caps(
6364 dc_link,
6365 &dc_em_sink->dc_edid,
6366 &dc_em_sink->edid_caps);
6367 }
6368}
6369
e7b07cee
HW
6370static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6371 .reset = amdgpu_dm_connector_funcs_reset,
6372 .detect = amdgpu_dm_connector_detect,
6373 .fill_modes = drm_helper_probe_single_connector_modes,
6374 .destroy = amdgpu_dm_connector_destroy,
6375 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6376 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6377 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6378 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6379 .late_register = amdgpu_dm_connector_late_register,
0ba4a784
AH
6380 .early_unregister = amdgpu_dm_connector_unregister,
6381 .force = amdgpu_dm_connector_funcs_force
e7b07cee
HW
6382};
6383
e7b07cee
HW
6384static int get_modes(struct drm_connector *connector)
6385{
6386 return amdgpu_dm_connector_get_modes(connector);
6387}
6388
c84dec2f 6389static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6390{
6391 struct dc_sink_init_data init_params = {
6392 .link = aconnector->dc_link,
6393 .sink_signal = SIGNAL_TYPE_VIRTUAL
6394 };
70e8ffc5 6395 struct edid *edid;
e7b07cee 6396
a89ff457 6397 if (!aconnector->base.edid_blob_ptr) {
550e5d23
HW
6398 /* if connector->edid_override valid, pass
6399 * it to edid_override to edid_blob_ptr
6400 */
e7b07cee 6401
8789989b 6402 drm_edid_override_connector_update(&aconnector->base);
550e5d23
HW
6403
6404 if (!aconnector->base.edid_blob_ptr) {
6405 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6406 aconnector->base.name);
6407
6408 aconnector->base.force = DRM_FORCE_OFF;
6409 return;
6410 }
e7b07cee
HW
6411 }
6412
70e8ffc5
HW
6413 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6414
e7b07cee
HW
6415 aconnector->edid = edid;
6416
6417 aconnector->dc_em_sink = dc_link_add_remote_sink(
6418 aconnector->dc_link,
6419 (uint8_t *)edid,
6420 (edid->extensions + 1) * EDID_LENGTH,
6421 &init_params);
6422
dcd5fb82 6423 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6424 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6425 aconnector->dc_link->local_sink :
6426 aconnector->dc_em_sink;
dcd5fb82
MF
6427 dc_sink_retain(aconnector->dc_sink);
6428 }
e7b07cee
HW
6429}
6430
c84dec2f 6431static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6432{
6433 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6434
1f6010a9
DF
6435 /*
6436 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6437 * Those settings have to be != 0 to get initial modeset
6438 */
6439 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6440 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6441 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6442 }
6443
e7b07cee
HW
6444 create_eml_sink(aconnector);
6445}
6446
5468c36d
FZ
6447static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6448 struct dc_stream_state *stream)
6449{
6450 enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6451 struct dc_plane_state *dc_plane_state = NULL;
6452 struct dc_state *dc_state = NULL;
6453
6454 if (!stream)
6455 goto cleanup;
6456
6457 dc_plane_state = dc_create_plane_state(dc);
6458 if (!dc_plane_state)
6459 goto cleanup;
6460
6461 dc_state = dc_create_state(dc);
6462 if (!dc_state)
6463 goto cleanup;
6464
6465 /* populate stream to plane */
6466 dc_plane_state->src_rect.height = stream->src.height;
6467 dc_plane_state->src_rect.width = stream->src.width;
6468 dc_plane_state->dst_rect.height = stream->src.height;
6469 dc_plane_state->dst_rect.width = stream->src.width;
6470 dc_plane_state->clip_rect.height = stream->src.height;
6471 dc_plane_state->clip_rect.width = stream->src.width;
6472 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6473 dc_plane_state->plane_size.surface_size.height = stream->src.height;
6474 dc_plane_state->plane_size.surface_size.width = stream->src.width;
6475 dc_plane_state->plane_size.chroma_size.height = stream->src.height;
6476 dc_plane_state->plane_size.chroma_size.width = stream->src.width;
5468c36d
FZ
6477 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6478 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6479 dc_plane_state->rotation = ROTATION_ANGLE_0;
6480 dc_plane_state->is_tiling_rotated = false;
6481 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6482
6483 dc_result = dc_validate_stream(dc, stream);
6484 if (dc_result == DC_OK)
6485 dc_result = dc_validate_plane(dc, dc_plane_state);
6486
6487 if (dc_result == DC_OK)
6488 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6489
6490 if (dc_result == DC_OK && !dc_add_plane_to_context(
6491 dc,
6492 stream,
6493 dc_plane_state,
6494 dc_state))
6495 dc_result = DC_FAIL_ATTACH_SURFACES;
6496
6497 if (dc_result == DC_OK)
6498 dc_result = dc_validate_global_state(dc, dc_state, true);
6499
6500cleanup:
6501 if (dc_state)
6502 dc_release_state(dc_state);
6503
6504 if (dc_plane_state)
6505 dc_plane_state_release(dc_plane_state);
6506
6507 return dc_result;
6508}
6509
17ce8a69 6510struct dc_stream_state *
cbd14ae7
SW
6511create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6512 const struct drm_display_mode *drm_mode,
6513 const struct dm_connector_state *dm_state,
6514 const struct dc_stream_state *old_stream)
6515{
6516 struct drm_connector *connector = &aconnector->base;
1348969a 6517 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6518 struct dc_stream_state *stream;
4b7da34b
SW
6519 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6520 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6521 enum dc_status dc_result = DC_OK;
6522
6523 do {
6524 stream = create_stream_for_sink(aconnector, drm_mode,
6525 dm_state, old_stream,
6526 requested_bpc);
6527 if (stream == NULL) {
6528 DRM_ERROR("Failed to create stream for sink!\n");
6529 break;
6530 }
6531
e9a7d236
RS
6532 dc_result = dc_validate_stream(adev->dm.dc, stream);
6533 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
f04d275d 6534 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6535
5468c36d
FZ
6536 if (dc_result == DC_OK)
6537 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6538
cbd14ae7 6539 if (dc_result != DC_OK) {
74a16675 6540 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6541 drm_mode->hdisplay,
6542 drm_mode->vdisplay,
6543 drm_mode->clock,
74a16675
RS
6544 dc_result,
6545 dc_status_to_str(dc_result));
cbd14ae7
SW
6546
6547 dc_stream_release(stream);
6548 stream = NULL;
6549 requested_bpc -= 2; /* lower bpc to retry validation */
6550 }
6551
6552 } while (stream == NULL && requested_bpc >= 6);
6553
68eb3ae3
WS
6554 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6555 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6556
6557 aconnector->force_yuv420_output = true;
6558 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6559 dm_state, old_stream);
6560 aconnector->force_yuv420_output = false;
6561 }
6562
cbd14ae7
SW
6563 return stream;
6564}
6565
ba9ca088 6566enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6567 struct drm_display_mode *mode)
e7b07cee
HW
6568{
6569 int result = MODE_ERROR;
6570 struct dc_sink *dc_sink;
e7b07cee 6571 /* TODO: Unhardcode stream count */
0971c40e 6572 struct dc_stream_state *stream;
c84dec2f 6573 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6574
6575 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6576 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6577 return result;
6578
1f6010a9
DF
6579 /*
6580 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6581 * EDID mgmt
6582 */
6583 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6584 !aconnector->dc_em_sink)
6585 handle_edid_mgmt(aconnector);
6586
c84dec2f 6587 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6588
ad975f44
VL
6589 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6590 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6591 DRM_ERROR("dc_sink is NULL!\n");
6592 goto fail;
6593 }
6594
cbd14ae7
SW
6595 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6596 if (stream) {
6597 dc_stream_release(stream);
e7b07cee 6598 result = MODE_OK;
cbd14ae7 6599 }
e7b07cee
HW
6600
6601fail:
6602 /* TODO: error handling*/
6603 return result;
6604}
6605
88694af9
NK
6606static int fill_hdr_info_packet(const struct drm_connector_state *state,
6607 struct dc_info_packet *out)
6608{
6609 struct hdmi_drm_infoframe frame;
6610 unsigned char buf[30]; /* 26 + 4 */
6611 ssize_t len;
6612 int ret, i;
6613
6614 memset(out, 0, sizeof(*out));
6615
6616 if (!state->hdr_output_metadata)
6617 return 0;
6618
6619 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6620 if (ret)
6621 return ret;
6622
6623 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6624 if (len < 0)
6625 return (int)len;
6626
6627 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6628 if (len != 30)
6629 return -EINVAL;
6630
6631 /* Prepare the infopacket for DC. */
6632 switch (state->connector->connector_type) {
6633 case DRM_MODE_CONNECTOR_HDMIA:
6634 out->hb0 = 0x87; /* type */
6635 out->hb1 = 0x01; /* version */
6636 out->hb2 = 0x1A; /* length */
6637 out->sb[0] = buf[3]; /* checksum */
6638 i = 1;
6639 break;
6640
6641 case DRM_MODE_CONNECTOR_DisplayPort:
6642 case DRM_MODE_CONNECTOR_eDP:
6643 out->hb0 = 0x00; /* sdp id, zero */
6644 out->hb1 = 0x87; /* type */
6645 out->hb2 = 0x1D; /* payload len - 1 */
6646 out->hb3 = (0x13 << 2); /* sdp version */
6647 out->sb[0] = 0x01; /* version */
6648 out->sb[1] = 0x1A; /* length */
6649 i = 2;
6650 break;
6651
6652 default:
6653 return -EINVAL;
6654 }
6655
6656 memcpy(&out->sb[i], &buf[4], 26);
6657 out->valid = true;
6658
6659 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6660 sizeof(out->sb), false);
6661
6662 return 0;
6663}
6664
88694af9
NK
6665static int
6666amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6667 struct drm_atomic_state *state)
88694af9 6668{
51e857af
SP
6669 struct drm_connector_state *new_con_state =
6670 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6671 struct drm_connector_state *old_con_state =
6672 drm_atomic_get_old_connector_state(state, conn);
6673 struct drm_crtc *crtc = new_con_state->crtc;
6674 struct drm_crtc_state *new_crtc_state;
a76eb429 6675 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
88694af9
NK
6676 int ret;
6677
e8a98235
RS
6678 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6679
a76eb429
LP
6680 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6681 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6682 if (ret < 0)
6683 return ret;
6684 }
6685
88694af9
NK
6686 if (!crtc)
6687 return 0;
6688
72921cdf 6689 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6690 struct dc_info_packet hdr_infopacket;
6691
6692 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6693 if (ret)
6694 return ret;
6695
6696 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6697 if (IS_ERR(new_crtc_state))
6698 return PTR_ERR(new_crtc_state);
6699
6700 /*
6701 * DC considers the stream backends changed if the
6702 * static metadata changes. Forcing the modeset also
6703 * gives a simple way for userspace to switch from
b232d4ed
NK
6704 * 8bpc to 10bpc when setting the metadata to enter
6705 * or exit HDR.
6706 *
6707 * Changing the static metadata after it's been
6708 * set is permissible, however. So only force a
6709 * modeset if we're entering or exiting HDR.
88694af9 6710 */
b232d4ed
NK
6711 new_crtc_state->mode_changed =
6712 !old_con_state->hdr_output_metadata ||
6713 !new_con_state->hdr_output_metadata;
88694af9
NK
6714 }
6715
6716 return 0;
6717}
6718
e7b07cee
HW
6719static const struct drm_connector_helper_funcs
6720amdgpu_dm_connector_helper_funcs = {
6721 /*
1f6010a9 6722 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6723 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6724 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6725 * in get_modes call back, not just return the modes count
6726 */
e7b07cee
HW
6727 .get_modes = get_modes,
6728 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6729 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6730};
6731
e7b07cee
HW
6732static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6733{
6734
6735}
6736
f04d275d 6737int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
3261e013
ML
6738{
6739 switch (display_color_depth) {
5d945cbc
RS
6740 case COLOR_DEPTH_666:
6741 return 6;
6742 case COLOR_DEPTH_888:
6743 return 8;
6744 case COLOR_DEPTH_101010:
6745 return 10;
6746 case COLOR_DEPTH_121212:
6747 return 12;
6748 case COLOR_DEPTH_141414:
6749 return 14;
6750 case COLOR_DEPTH_161616:
6751 return 16;
6752 default:
6753 break;
6754 }
3261e013
ML
6755 return 0;
6756}
6757
3ee6b26b
AD
6758static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6759 struct drm_crtc_state *crtc_state,
6760 struct drm_connector_state *conn_state)
e7b07cee 6761{
3261e013
ML
6762 struct drm_atomic_state *state = crtc_state->state;
6763 struct drm_connector *connector = conn_state->connector;
6764 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6765 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6766 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6767 struct drm_dp_mst_topology_mgr *mst_mgr;
6768 struct drm_dp_mst_port *mst_port;
4d07b0bc 6769 struct drm_dp_mst_topology_state *mst_state;
3261e013
ML
6770 enum dc_color_depth color_depth;
6771 int clock, bpp = 0;
1bc22f20 6772 bool is_y420 = false;
3261e013 6773
f0127cb1 6774 if (!aconnector->mst_output_port || !aconnector->dc_sink)
3261e013
ML
6775 return 0;
6776
f0127cb1
WL
6777 mst_port = aconnector->mst_output_port;
6778 mst_mgr = &aconnector->mst_root->mst_mgr;
3261e013
ML
6779
6780 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6781 return 0;
6782
4d07b0bc
LP
6783 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6784 if (IS_ERR(mst_state))
6785 return PTR_ERR(mst_state);
6786
6787 if (!mst_state->pbn_div)
f0127cb1 6788 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
4d07b0bc 6789
3261e013 6790 if (!state->duplicated) {
cbd14ae7 6791 int max_bpc = conn_state->max_requested_bpc;
1bc22f20 6792 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5d945cbc 6793 aconnector->force_yuv420_output;
cbd14ae7
SW
6794 color_depth = convert_color_depth_from_display_info(connector,
6795 is_y420,
6796 max_bpc);
3261e013
ML
6797 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6798 clock = adjusted_mode->clock;
dc48529f 6799 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013 6800 }
4d07b0bc
LP
6801
6802 dm_new_connector_state->vcpi_slots =
6803 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
6804 dm_new_connector_state->pbn);
3261e013
ML
6805 if (dm_new_connector_state->vcpi_slots < 0) {
6806 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6807 return dm_new_connector_state->vcpi_slots;
6808 }
e7b07cee
HW
6809 return 0;
6810}
6811
6812const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6813 .disable = dm_encoder_helper_disable,
6814 .atomic_check = dm_encoder_helper_atomic_check
6815};
6816
29b9ba74 6817static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
6818 struct dc_state *dc_state,
6819 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
6820{
6821 struct dc_stream_state *stream = NULL;
6822 struct drm_connector *connector;
5760dcb9 6823 struct drm_connector_state *new_con_state;
29b9ba74
ML
6824 struct amdgpu_dm_connector *aconnector;
6825 struct dm_connector_state *dm_conn_state;
7cce4cd6 6826 int i, j, ret;
a550bb16 6827 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 6828
5760dcb9 6829 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
6830
6831 aconnector = to_amdgpu_dm_connector(connector);
6832
f0127cb1 6833 if (!aconnector->mst_output_port)
29b9ba74
ML
6834 continue;
6835
6836 if (!new_con_state || !new_con_state->crtc)
6837 continue;
6838
6839 dm_conn_state = to_dm_connector_state(new_con_state);
6840
6841 for (j = 0; j < dc_state->stream_count; j++) {
6842 stream = dc_state->streams[j];
6843 if (!stream)
6844 continue;
6845
5d945cbc 6846 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
29b9ba74
ML
6847 break;
6848
6849 stream = NULL;
6850 }
6851
6852 if (!stream)
6853 continue;
6854
29b9ba74 6855 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
6856 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6857 for (j = 0; j < dc_state->stream_count; j++) {
6858 if (vars[j].aconnector == aconnector) {
6859 pbn = vars[j].pbn;
6860 break;
6861 }
6862 }
6863
a550bb16
HW
6864 if (j == dc_state->stream_count)
6865 continue;
6866
6867 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6868
6869 if (stream->timing.flags.DSC != 1) {
6870 dm_conn_state->pbn = pbn;
6871 dm_conn_state->vcpi_slots = slot_num;
6872
f0127cb1 6873 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
7cce4cd6
LP
6874 dm_conn_state->pbn, false);
6875 if (ret < 0)
6876 return ret;
6877
a550bb16
HW
6878 continue;
6879 }
6880
f0127cb1 6881 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
29b9ba74
ML
6882 if (vcpi < 0)
6883 return vcpi;
6884
6885 dm_conn_state->pbn = pbn;
6886 dm_conn_state->vcpi_slots = vcpi;
6887 }
6888 return 0;
6889}
6890
e7b07cee
HW
6891static int to_drm_connector_type(enum signal_type st)
6892{
6893 switch (st) {
6894 case SIGNAL_TYPE_HDMI_TYPE_A:
6895 return DRM_MODE_CONNECTOR_HDMIA;
6896 case SIGNAL_TYPE_EDP:
6897 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6898 case SIGNAL_TYPE_LVDS:
6899 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6900 case SIGNAL_TYPE_RGB:
6901 return DRM_MODE_CONNECTOR_VGA;
6902 case SIGNAL_TYPE_DISPLAY_PORT:
6903 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6904 return DRM_MODE_CONNECTOR_DisplayPort;
6905 case SIGNAL_TYPE_DVI_DUAL_LINK:
6906 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6907 return DRM_MODE_CONNECTOR_DVID;
6908 case SIGNAL_TYPE_VIRTUAL:
6909 return DRM_MODE_CONNECTOR_VIRTUAL;
6910
6911 default:
6912 return DRM_MODE_CONNECTOR_Unknown;
6913 }
6914}
6915
2b4c1c05
DV
6916static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6917{
62afb4ad
JRS
6918 struct drm_encoder *encoder;
6919
6920 /* There is only one encoder per connector */
6921 drm_connector_for_each_possible_encoder(connector, encoder)
6922 return encoder;
6923
6924 return NULL;
2b4c1c05
DV
6925}
6926
e7b07cee
HW
6927static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6928{
e7b07cee
HW
6929 struct drm_encoder *encoder;
6930 struct amdgpu_encoder *amdgpu_encoder;
6931
2b4c1c05 6932 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6933
6934 if (encoder == NULL)
6935 return;
6936
6937 amdgpu_encoder = to_amdgpu_encoder(encoder);
6938
6939 amdgpu_encoder->native_mode.clock = 0;
6940
6941 if (!list_empty(&connector->probed_modes)) {
6942 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6943
e7b07cee 6944 list_for_each_entry(preferred_mode,
b830ebc9
HW
6945 &connector->probed_modes,
6946 head) {
6947 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6948 amdgpu_encoder->native_mode = *preferred_mode;
6949
e7b07cee
HW
6950 break;
6951 }
6952
6953 }
6954}
6955
3ee6b26b
AD
6956static struct drm_display_mode *
6957amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6958 char *name,
6959 int hdisplay, int vdisplay)
e7b07cee
HW
6960{
6961 struct drm_device *dev = encoder->dev;
6962 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6963 struct drm_display_mode *mode = NULL;
6964 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6965
6966 mode = drm_mode_duplicate(dev, native_mode);
6967
b830ebc9 6968 if (mode == NULL)
e7b07cee
HW
6969 return NULL;
6970
6971 mode->hdisplay = hdisplay;
6972 mode->vdisplay = vdisplay;
6973 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6974 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6975
6976 return mode;
6977
6978}
6979
6980static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6981 struct drm_connector *connector)
e7b07cee
HW
6982{
6983 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6984 struct drm_display_mode *mode = NULL;
6985 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6986 struct amdgpu_dm_connector *amdgpu_dm_connector =
6987 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6988 int i;
6989 int n;
6990 struct mode_size {
6991 char name[DRM_DISPLAY_MODE_LEN];
6992 int w;
6993 int h;
b830ebc9 6994 } common_modes[] = {
e7b07cee
HW
6995 { "640x480", 640, 480},
6996 { "800x600", 800, 600},
6997 { "1024x768", 1024, 768},
6998 { "1280x720", 1280, 720},
6999 { "1280x800", 1280, 800},
7000 {"1280x1024", 1280, 1024},
7001 { "1440x900", 1440, 900},
7002 {"1680x1050", 1680, 1050},
7003 {"1600x1200", 1600, 1200},
7004 {"1920x1080", 1920, 1080},
7005 {"1920x1200", 1920, 1200}
7006 };
7007
b830ebc9 7008 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7009
7010 for (i = 0; i < n; i++) {
7011 struct drm_display_mode *curmode = NULL;
7012 bool mode_existed = false;
7013
7014 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7015 common_modes[i].h > native_mode->vdisplay ||
7016 (common_modes[i].w == native_mode->hdisplay &&
7017 common_modes[i].h == native_mode->vdisplay))
7018 continue;
e7b07cee
HW
7019
7020 list_for_each_entry(curmode, &connector->probed_modes, head) {
7021 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7022 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7023 mode_existed = true;
7024 break;
7025 }
7026 }
7027
7028 if (mode_existed)
7029 continue;
7030
7031 mode = amdgpu_dm_create_common_mode(encoder,
7032 common_modes[i].name, common_modes[i].w,
7033 common_modes[i].h);
588a7017
ZQ
7034 if (!mode)
7035 continue;
7036
e7b07cee 7037 drm_mode_probed_add(connector, mode);
c84dec2f 7038 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7039 }
7040}
7041
d77de788
SS
7042static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7043{
7044 struct drm_encoder *encoder;
7045 struct amdgpu_encoder *amdgpu_encoder;
7046 const struct drm_display_mode *native_mode;
7047
7048 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7049 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7050 return;
7051
acc96ae0
MW
7052 mutex_lock(&connector->dev->mode_config.mutex);
7053 amdgpu_dm_connector_get_modes(connector);
7054 mutex_unlock(&connector->dev->mode_config.mutex);
7055
d77de788
SS
7056 encoder = amdgpu_dm_connector_to_encoder(connector);
7057 if (!encoder)
7058 return;
7059
7060 amdgpu_encoder = to_amdgpu_encoder(encoder);
7061
7062 native_mode = &amdgpu_encoder->native_mode;
7063 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7064 return;
7065
7066 drm_connector_set_panel_orientation_with_quirk(connector,
7067 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7068 native_mode->hdisplay,
7069 native_mode->vdisplay);
7070}
7071
3ee6b26b
AD
7072static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7073 struct edid *edid)
e7b07cee 7074{
c84dec2f
HW
7075 struct amdgpu_dm_connector *amdgpu_dm_connector =
7076 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7077
7078 if (edid) {
7079 /* empty probed_modes */
7080 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7081 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7082 drm_add_edid_modes(connector, edid);
7083
f1e5e913
YMM
7084 /* sorting the probed modes before calling function
7085 * amdgpu_dm_get_native_mode() since EDID can have
7086 * more than one preferred mode. The modes that are
7087 * later in the probed mode list could be of higher
7088 * and preferred resolution. For example, 3840x2160
7089 * resolution in base EDID preferred timing and 4096x2160
7090 * preferred resolution in DID extension block later.
7091 */
7092 drm_mode_sort(&connector->probed_modes);
e7b07cee 7093 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7094
7095 /* Freesync capabilities are reset by calling
7096 * drm_add_edid_modes() and need to be
7097 * restored here.
7098 */
7099 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7100 } else {
c84dec2f 7101 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7102 }
e7b07cee
HW
7103}
7104
a85ba005
NC
7105static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7106 struct drm_display_mode *mode)
7107{
7108 struct drm_display_mode *m;
7109
7110 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7111 if (drm_mode_equal(m, mode))
7112 return true;
7113 }
7114
7115 return false;
7116}
7117
7118static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7119{
7120 const struct drm_display_mode *m;
7121 struct drm_display_mode *new_mode;
7122 uint i;
ae67558b 7123 u32 new_modes_count = 0;
a85ba005
NC
7124
7125 /* Standard FPS values
7126 *
12cdff6b 7127 * 23.976 - TV/NTSC
3335a135
UKK
7128 * 24 - Cinema
7129 * 25 - TV/PAL
12cdff6b 7130 * 29.97 - TV/NTSC
3335a135
UKK
7131 * 30 - TV/NTSC
7132 * 48 - Cinema HFR
7133 * 50 - TV/PAL
7134 * 60 - Commonly used
12cdff6b 7135 * 48,72,96,120 - Multiples of 24
a85ba005 7136 */
ae67558b 7137 static const u32 common_rates[] = {
9ce5ed6e 7138 23976, 24000, 25000, 29970, 30000,
12cdff6b 7139 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 7140 };
a85ba005
NC
7141
7142 /*
7143 * Find mode with highest refresh rate with the same resolution
7144 * as the preferred mode. Some monitors report a preferred mode
7145 * with lower resolution than the highest refresh rate supported.
7146 */
7147
7148 m = get_highest_refresh_rate_mode(aconnector, true);
7149 if (!m)
7150 return 0;
7151
7152 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
ae67558b
SS
7153 u64 target_vtotal, target_vtotal_diff;
7154 u64 num, den;
a85ba005
NC
7155
7156 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7157 continue;
7158
7159 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7160 common_rates[i] > aconnector->max_vfreq * 1000)
7161 continue;
7162
7163 num = (unsigned long long)m->clock * 1000 * 1000;
7164 den = common_rates[i] * (unsigned long long)m->htotal;
7165 target_vtotal = div_u64(num, den);
7166 target_vtotal_diff = target_vtotal - m->vtotal;
7167
7168 /* Check for illegal modes */
7169 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7170 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7171 m->vtotal + target_vtotal_diff < m->vsync_end)
7172 continue;
7173
7174 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7175 if (!new_mode)
7176 goto out;
7177
7178 new_mode->vtotal += (u16)target_vtotal_diff;
7179 new_mode->vsync_start += (u16)target_vtotal_diff;
7180 new_mode->vsync_end += (u16)target_vtotal_diff;
7181 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7182 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7183
7184 if (!is_duplicate_mode(aconnector, new_mode)) {
7185 drm_mode_probed_add(&aconnector->base, new_mode);
7186 new_modes_count += 1;
7187 } else
7188 drm_mode_destroy(aconnector->base.dev, new_mode);
7189 }
7190 out:
7191 return new_modes_count;
7192}
7193
7194static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7195 struct edid *edid)
7196{
7197 struct amdgpu_dm_connector *amdgpu_dm_connector =
7198 to_amdgpu_dm_connector(connector);
7199
4243c84a 7200 if (!(amdgpu_freesync_vid_mode && edid))
a85ba005 7201 return;
fe8858bb 7202
a85ba005
NC
7203 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7204 amdgpu_dm_connector->num_modes +=
7205 add_fs_modes(amdgpu_dm_connector);
7206}
7207
7578ecda 7208static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7209{
c84dec2f
HW
7210 struct amdgpu_dm_connector *amdgpu_dm_connector =
7211 to_amdgpu_dm_connector(connector);
e7b07cee 7212 struct drm_encoder *encoder;
c84dec2f 7213 struct edid *edid = amdgpu_dm_connector->edid;
c32699ca
JD
7214 struct dc_link_settings *verified_link_cap =
7215 &amdgpu_dm_connector->dc_link->verified_link_cap;
98ce7d32 7216 const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
e7b07cee 7217
2b4c1c05 7218 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7219
5c0e6840 7220 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7221 amdgpu_dm_connector->num_modes =
7222 drm_add_modes_noedid(connector, 640, 480);
98ce7d32 7223 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
c32699ca
JD
7224 amdgpu_dm_connector->num_modes +=
7225 drm_add_modes_noedid(connector, 1920, 1080);
85ee15d6
ML
7226 } else {
7227 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7228 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7229 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7230 }
3e332d3a 7231 amdgpu_dm_fbc_init(connector);
5099114b 7232
c84dec2f 7233 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7234}
7235
3ee6b26b
AD
7236void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7237 struct amdgpu_dm_connector *aconnector,
7238 int connector_type,
7239 struct dc_link *link,
7240 int link_index)
e7b07cee 7241{
1348969a 7242 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7243
f04bee34
NK
7244 /*
7245 * Some of the properties below require access to state, like bpc.
7246 * Allocate some default initial connector state with our reset helper.
7247 */
7248 if (aconnector->base.funcs->reset)
7249 aconnector->base.funcs->reset(&aconnector->base);
7250
e7b07cee 7251 aconnector->connector_id = link_index;
f196198c 7252 aconnector->bl_idx = -1;
e7b07cee
HW
7253 aconnector->dc_link = link;
7254 aconnector->base.interlace_allowed = false;
7255 aconnector->base.doublescan_allowed = false;
7256 aconnector->base.stereo_allowed = false;
7257 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7258 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7259 aconnector->audio_inst = -1;
5b49da02
SJK
7260 aconnector->pack_sdp_v1_3 = false;
7261 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
7262 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
e7b07cee
HW
7263 mutex_init(&aconnector->hpd_lock);
7264
1f6010a9
DF
7265 /*
7266 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7267 * which means HPD hot plug not supported
7268 */
e7b07cee
HW
7269 switch (connector_type) {
7270 case DRM_MODE_CONNECTOR_HDMIA:
7271 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7272 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7273 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7274 break;
7275 case DRM_MODE_CONNECTOR_DisplayPort:
7276 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
d715c9a2 7277 link->link_enc = link_enc_cfg_get_link_enc(link);
7b201d53 7278 ASSERT(link->link_enc);
f6e03f80
JS
7279 if (link->link_enc)
7280 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7281 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7282 break;
7283 case DRM_MODE_CONNECTOR_DVID:
7284 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7285 break;
7286 default:
7287 break;
7288 }
7289
7290 drm_object_attach_property(&aconnector->base.base,
7291 dm->ddev->mode_config.scaling_mode_property,
7292 DRM_MODE_SCALE_NONE);
7293
7294 drm_object_attach_property(&aconnector->base.base,
7295 adev->mode_info.underscan_property,
7296 UNDERSCAN_OFF);
7297 drm_object_attach_property(&aconnector->base.base,
7298 adev->mode_info.underscan_hborder_property,
7299 0);
7300 drm_object_attach_property(&aconnector->base.base,
7301 adev->mode_info.underscan_vborder_property,
7302 0);
1825fd34 7303
f0127cb1 7304 if (!aconnector->mst_root)
8c61b31e 7305 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7306
e47f1691 7307 aconnector->base.state->max_bpc = 16;
4a8ca46b 7308 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7309
c1ee92f9 7310 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7311 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7312 drm_object_attach_property(&aconnector->base.base,
7313 adev->mode_info.abm_level_property, 0);
7314 }
bb47de73
NK
7315
7316 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7317 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7318 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 7319 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 7320
f0127cb1 7321 if (!aconnector->mst_root)
8c61b31e
JFZ
7322 drm_connector_attach_vrr_capable_property(&aconnector->base);
7323
e22bb562 7324 if (adev->dm.hdcp_workqueue)
53e108aa 7325 drm_connector_attach_content_protection_property(&aconnector->base, true);
bb47de73 7326 }
e7b07cee
HW
7327}
7328
7578ecda
AD
7329static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7330 struct i2c_msg *msgs, int num)
e7b07cee
HW
7331{
7332 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7333 struct ddc_service *ddc_service = i2c->ddc_service;
7334 struct i2c_command cmd;
7335 int i;
7336 int result = -EIO;
7337
b830ebc9 7338 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7339
7340 if (!cmd.payloads)
7341 return result;
7342
7343 cmd.number_of_payloads = num;
7344 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7345 cmd.speed = 100;
7346
7347 for (i = 0; i < num; i++) {
7348 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7349 cmd.payloads[i].address = msgs[i].addr;
7350 cmd.payloads[i].length = msgs[i].len;
7351 cmd.payloads[i].data = msgs[i].buf;
7352 }
7353
c85e6e54
DF
7354 if (dc_submit_i2c(
7355 ddc_service->ctx->dc,
22676bc5 7356 ddc_service->link->link_index,
e7b07cee
HW
7357 &cmd))
7358 result = num;
7359
7360 kfree(cmd.payloads);
7361 return result;
7362}
7363
7578ecda 7364static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7365{
7366 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7367}
7368
7369static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7370 .master_xfer = amdgpu_dm_i2c_xfer,
7371 .functionality = amdgpu_dm_i2c_func,
7372};
7373
3ee6b26b
AD
7374static struct amdgpu_i2c_adapter *
7375create_i2c(struct ddc_service *ddc_service,
7376 int link_index,
7377 int *res)
e7b07cee
HW
7378{
7379 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7380 struct amdgpu_i2c_adapter *i2c;
7381
b830ebc9 7382 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7383 if (!i2c)
7384 return NULL;
e7b07cee
HW
7385 i2c->base.owner = THIS_MODULE;
7386 i2c->base.class = I2C_CLASS_DDC;
7387 i2c->base.dev.parent = &adev->pdev->dev;
7388 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7389 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7390 i2c_set_adapdata(&i2c->base, i2c);
7391 i2c->ddc_service = ddc_service;
7392
7393 return i2c;
7394}
7395
89fc8d4e 7396
1f6010a9
DF
7397/*
7398 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7399 * dc_link which will be represented by this aconnector.
7400 */
7578ecda
AD
7401static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7402 struct amdgpu_dm_connector *aconnector,
ae67558b 7403 u32 link_index,
7578ecda 7404 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7405{
7406 int res = 0;
7407 int connector_type;
7408 struct dc *dc = dm->dc;
7409 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7410 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7411
7412 link->priv = aconnector;
e7b07cee 7413
f1ad2f5e 7414 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7415
7416 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7417 if (!i2c) {
7418 DRM_ERROR("Failed to create i2c adapter data\n");
7419 return -ENOMEM;
7420 }
7421
e7b07cee
HW
7422 aconnector->i2c = i2c;
7423 res = i2c_add_adapter(&i2c->base);
7424
7425 if (res) {
7426 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7427 goto out_free;
7428 }
7429
7430 connector_type = to_drm_connector_type(link->connector_signal);
7431
17165de2 7432 res = drm_connector_init_with_ddc(
e7b07cee
HW
7433 dm->ddev,
7434 &aconnector->base,
7435 &amdgpu_dm_connector_funcs,
17165de2
AP
7436 connector_type,
7437 &i2c->base);
e7b07cee
HW
7438
7439 if (res) {
7440 DRM_ERROR("connector_init failed\n");
7441 aconnector->connector_id = -1;
7442 goto out_free;
7443 }
7444
7445 drm_connector_helper_add(
7446 &aconnector->base,
7447 &amdgpu_dm_connector_helper_funcs);
7448
7449 amdgpu_dm_connector_init_helper(
7450 dm,
7451 aconnector,
7452 connector_type,
7453 link,
7454 link_index);
7455
cde4c44d 7456 drm_connector_attach_encoder(
e7b07cee
HW
7457 &aconnector->base, &aencoder->base);
7458
e7b07cee
HW
7459 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7460 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7461 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7462
e7b07cee
HW
7463out_free:
7464 if (res) {
7465 kfree(i2c);
7466 aconnector->i2c = NULL;
7467 }
7468 return res;
7469}
7470
7471int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7472{
7473 switch (adev->mode_info.num_crtc) {
7474 case 1:
7475 return 0x1;
7476 case 2:
7477 return 0x3;
7478 case 3:
7479 return 0x7;
7480 case 4:
7481 return 0xf;
7482 case 5:
7483 return 0x1f;
7484 case 6:
7485 default:
7486 return 0x3f;
7487 }
7488}
7489
7578ecda
AD
7490static int amdgpu_dm_encoder_init(struct drm_device *dev,
7491 struct amdgpu_encoder *aencoder,
7492 uint32_t link_index)
e7b07cee 7493{
1348969a 7494 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7495
7496 int res = drm_encoder_init(dev,
7497 &aencoder->base,
7498 &amdgpu_dm_encoder_funcs,
7499 DRM_MODE_ENCODER_TMDS,
7500 NULL);
7501
7502 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7503
7504 if (!res)
7505 aencoder->encoder_id = link_index;
7506 else
7507 aencoder->encoder_id = -1;
7508
7509 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7510
7511 return res;
7512}
7513
3ee6b26b
AD
7514static void manage_dm_interrupts(struct amdgpu_device *adev,
7515 struct amdgpu_crtc *acrtc,
7516 bool enable)
e7b07cee
HW
7517{
7518 /*
8fe684e9
NK
7519 * We have no guarantee that the frontend index maps to the same
7520 * backend index - some even map to more than one.
7521 *
7522 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7523 */
7524 int irq_type =
734dd01d 7525 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7526 adev,
7527 acrtc->crtc_id);
7528
7529 if (enable) {
7530 drm_crtc_vblank_on(&acrtc->base);
7531 amdgpu_irq_get(
7532 adev,
7533 &adev->pageflip_irq,
7534 irq_type);
86bc2219
WL
7535#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7536 amdgpu_irq_get(
7537 adev,
7538 &adev->vline0_irq,
7539 irq_type);
7540#endif
e7b07cee 7541 } else {
86bc2219
WL
7542#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7543 amdgpu_irq_put(
7544 adev,
7545 &adev->vline0_irq,
7546 irq_type);
7547#endif
e7b07cee
HW
7548 amdgpu_irq_put(
7549 adev,
7550 &adev->pageflip_irq,
7551 irq_type);
7552 drm_crtc_vblank_off(&acrtc->base);
7553 }
7554}
7555
8fe684e9
NK
7556static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7557 struct amdgpu_crtc *acrtc)
7558{
7559 int irq_type =
7560 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7561
7562 /**
7563 * This reads the current state for the IRQ and force reapplies
7564 * the setting to hardware.
7565 */
7566 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7567}
7568
3ee6b26b
AD
7569static bool
7570is_scaling_state_different(const struct dm_connector_state *dm_state,
7571 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7572{
7573 if (dm_state->scaling != old_dm_state->scaling)
7574 return true;
7575 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7576 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7577 return true;
7578 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7579 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7580 return true;
b830ebc9
HW
7581 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7582 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7583 return true;
e7b07cee
HW
7584 return false;
7585}
7586
e8fd3eeb 7587static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
7588 struct drm_crtc_state *old_crtc_state,
7589 struct drm_connector_state *new_conn_state,
7590 struct drm_connector_state *old_conn_state,
7591 const struct drm_connector *connector,
7592 struct hdcp_workqueue *hdcp_w)
0c8620d6
BL
7593{
7594 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7595 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7596
e8fd3eeb 7597 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
7598 connector->index, connector->status, connector->dpms);
7599 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
7600 old_conn_state->content_protection, new_conn_state->content_protection);
7601
7602 if (old_crtc_state)
7603 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7604 old_crtc_state->enable,
7605 old_crtc_state->active,
7606 old_crtc_state->mode_changed,
7607 old_crtc_state->active_changed,
7608 old_crtc_state->connectors_changed);
7609
7610 if (new_crtc_state)
7611 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7612 new_crtc_state->enable,
7613 new_crtc_state->active,
7614 new_crtc_state->mode_changed,
7615 new_crtc_state->active_changed,
7616 new_crtc_state->connectors_changed);
7617
7618 /* hdcp content type change */
7619 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
7620 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7621 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7622 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
53e108aa
BL
7623 return true;
7624 }
7625
e8fd3eeb 7626 /* CP is being re enabled, ignore this */
7627 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7628 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7629 if (new_crtc_state && new_crtc_state->mode_changed) {
7630 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7631 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
7632 return true;
0b8f42ab 7633 }
e8fd3eeb 7634 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7635 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
0c8620d6
BL
7636 return false;
7637 }
7638
31c0ed90
BL
7639 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7640 *
7641 * Handles: UNDESIRED -> ENABLED
7642 */
e8fd3eeb 7643 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7644 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7645 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
0c8620d6 7646
0d9a947b
QZ
7647 /* Stream removed and re-enabled
7648 *
7649 * Can sometimes overlap with the HPD case,
7650 * thus set update_hdcp to false to avoid
7651 * setting HDCP multiple times.
7652 *
7653 * Handles: DESIRED -> DESIRED (Special case)
7654 */
e8fd3eeb 7655 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
7656 new_conn_state->crtc && new_conn_state->crtc->enabled &&
0d9a947b
QZ
7657 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7658 dm_con_state->update_hdcp = false;
e8fd3eeb 7659 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
7660 __func__);
0d9a947b
QZ
7661 return true;
7662 }
7663
7664 /* Hot-plug, headless s3, dpms
7665 *
7666 * Only start HDCP if the display is connected/enabled.
7667 * update_hdcp flag will be set to false until the next
7668 * HPD comes in.
31c0ed90
BL
7669 *
7670 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7671 */
e8fd3eeb 7672 if (dm_con_state->update_hdcp &&
7673 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7674 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
97f6c917 7675 dm_con_state->update_hdcp = false;
e8fd3eeb 7676 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
7677 __func__);
0c8620d6 7678 return true;
97f6c917 7679 }
0c8620d6 7680
e8fd3eeb 7681 if (old_conn_state->content_protection == new_conn_state->content_protection) {
7682 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7683 if (new_crtc_state && new_crtc_state->mode_changed) {
7684 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
7685 __func__);
7686 return true;
0b8f42ab 7687 }
e8fd3eeb 7688 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
7689 __func__);
7690 return false;
0b8f42ab 7691 }
e8fd3eeb 7692
7693 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
0c8620d6 7694 return false;
e8fd3eeb 7695 }
0c8620d6 7696
e8fd3eeb 7697 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7698 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
7699 __func__);
0c8620d6 7700 return true;
e8fd3eeb 7701 }
0c8620d6 7702
e8fd3eeb 7703 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
0c8620d6
BL
7704 return false;
7705}
e8fd3eeb 7706
3ee6b26b
AD
7707static void remove_stream(struct amdgpu_device *adev,
7708 struct amdgpu_crtc *acrtc,
7709 struct dc_stream_state *stream)
e7b07cee
HW
7710{
7711 /* this is the update mode case */
e7b07cee
HW
7712
7713 acrtc->otg_inst = -1;
7714 acrtc->enabled = false;
7715}
7716
e7b07cee
HW
7717static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7718{
7719
7720 assert_spin_locked(&acrtc->base.dev->event_lock);
7721 WARN_ON(acrtc->event);
7722
7723 acrtc->event = acrtc->base.state->event;
7724
7725 /* Set the flip status */
7726 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7727
7728 /* Mark this event as consumed */
7729 acrtc->base.state->event = NULL;
7730
cb2318b7
VL
7731 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7732 acrtc->crtc_id);
e7b07cee
HW
7733}
7734
bb47de73
NK
7735static void update_freesync_state_on_stream(
7736 struct amdgpu_display_manager *dm,
7737 struct dm_crtc_state *new_crtc_state,
180db303
NK
7738 struct dc_stream_state *new_stream,
7739 struct dc_plane_state *surface,
7740 u32 flip_timestamp_in_us)
bb47de73 7741{
09aef2c4 7742 struct mod_vrr_params vrr_params;
bb47de73 7743 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7744 struct amdgpu_device *adev = dm->adev;
585d450c 7745 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7746 unsigned long flags;
4cda3243 7747 bool pack_sdp_v1_3 = false;
5b49da02
SJK
7748 struct amdgpu_dm_connector *aconn;
7749 enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
bb47de73
NK
7750
7751 if (!new_stream)
7752 return;
7753
7754 /*
7755 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7756 * For now it's sufficient to just guard against these conditions.
7757 */
7758
7759 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7760 return;
7761
4a580877 7762 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3335a135 7763 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7764
180db303
NK
7765 if (surface) {
7766 mod_freesync_handle_preflip(
7767 dm->freesync_module,
7768 surface,
7769 new_stream,
7770 flip_timestamp_in_us,
7771 &vrr_params);
09aef2c4
MK
7772
7773 if (adev->family < AMDGPU_FAMILY_AI &&
6c5e25a0 7774 amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
09aef2c4
MK
7775 mod_freesync_handle_v_update(dm->freesync_module,
7776 new_stream, &vrr_params);
e63e2491
EB
7777
7778 /* Need to call this before the frame ends. */
7779 dc_stream_adjust_vmin_vmax(dm->dc,
7780 new_crtc_state->stream,
7781 &vrr_params.adjust);
09aef2c4 7782 }
180db303 7783 }
bb47de73 7784
5b49da02
SJK
7785 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
7786
7787 if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
7788 pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
7789
7790 if (aconn->vsdb_info.amd_vsdb_version == 1)
7791 packet_type = PACKET_TYPE_FS_V1;
7792 else if (aconn->vsdb_info.amd_vsdb_version == 2)
7793 packet_type = PACKET_TYPE_FS_V2;
7794 else if (aconn->vsdb_info.amd_vsdb_version == 3)
7795 packet_type = PACKET_TYPE_FS_V3;
7796
7797 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
7798 &new_stream->adaptive_sync_infopacket);
7799 }
7800
bb47de73
NK
7801 mod_freesync_build_vrr_infopacket(
7802 dm->freesync_module,
7803 new_stream,
180db303 7804 &vrr_params,
5b49da02 7805 packet_type,
ecd0136b 7806 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
7807 &vrr_infopacket,
7808 pack_sdp_v1_3);
bb47de73 7809
8a48b44c 7810 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7811 (memcmp(&new_crtc_state->vrr_infopacket,
7812 &vrr_infopacket,
7813 sizeof(vrr_infopacket)) != 0);
7814
585d450c 7815 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7816 new_crtc_state->vrr_infopacket = vrr_infopacket;
7817
bb47de73 7818 new_stream->vrr_infopacket = vrr_infopacket;
7eaef116 7819 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
bb47de73
NK
7820
7821 if (new_crtc_state->freesync_vrr_info_changed)
7822 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7823 new_crtc_state->base.crtc->base.id,
7824 (int)new_crtc_state->base.vrr_enabled,
180db303 7825 (int)vrr_params.state);
09aef2c4 7826
4a580877 7827 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7828}
7829
585d450c 7830static void update_stream_irq_parameters(
e854194c
MK
7831 struct amdgpu_display_manager *dm,
7832 struct dm_crtc_state *new_crtc_state)
7833{
7834 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7835 struct mod_vrr_params vrr_params;
e854194c 7836 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7837 struct amdgpu_device *adev = dm->adev;
585d450c 7838 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7839 unsigned long flags;
e854194c
MK
7840
7841 if (!new_stream)
7842 return;
7843
7844 /*
7845 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7846 * For now it's sufficient to just guard against these conditions.
7847 */
7848 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7849 return;
7850
4a580877 7851 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7852 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7853
e854194c
MK
7854 if (new_crtc_state->vrr_supported &&
7855 config.min_refresh_in_uhz &&
7856 config.max_refresh_in_uhz) {
a85ba005
NC
7857 /*
7858 * if freesync compatible mode was set, config.state will be set
7859 * in atomic check
7860 */
7861 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7862 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7863 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7864 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7865 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7866 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7867 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7868 } else {
7869 config.state = new_crtc_state->base.vrr_enabled ?
7870 VRR_STATE_ACTIVE_VARIABLE :
7871 VRR_STATE_INACTIVE;
7872 }
e854194c
MK
7873 } else {
7874 config.state = VRR_STATE_UNSUPPORTED;
7875 }
7876
7877 mod_freesync_build_vrr_params(dm->freesync_module,
7878 new_stream,
7879 &config, &vrr_params);
7880
585d450c
AP
7881 new_crtc_state->freesync_config = config;
7882 /* Copy state for access from DM IRQ handler */
7883 acrtc->dm_irq_params.freesync_config = config;
7884 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7885 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7886 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7887}
7888
66b0c973
MK
7889static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7890 struct dm_crtc_state *new_state)
7891{
6c5e25a0
DT
7892 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
7893 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
66b0c973
MK
7894
7895 if (!old_vrr_active && new_vrr_active) {
7896 /* Transition VRR inactive -> active:
7897 * While VRR is active, we must not disable vblank irq, as a
7898 * reenable after disable would compute bogus vblank/pflip
7899 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7900 *
7901 * We also need vupdate irq for the actual core vblank handling
7902 * at end of vblank.
66b0c973 7903 */
6c5e25a0 7904 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
8799c0be 7905 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
66b0c973
MK
7906 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7907 __func__, new_state->base.crtc->base.id);
7908 } else if (old_vrr_active && !new_vrr_active) {
7909 /* Transition VRR active -> inactive:
7910 * Allow vblank irq disable again for fixed refresh rate.
7911 */
6c5e25a0 7912 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
66b0c973
MK
7913 drm_crtc_vblank_put(new_state->base.crtc);
7914 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7915 __func__, new_state->base.crtc->base.id);
7916 }
7917}
7918
8ad27806
NK
7919static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7920{
7921 struct drm_plane *plane;
5760dcb9 7922 struct drm_plane_state *old_plane_state;
8ad27806
NK
7923 int i;
7924
7925 /*
7926 * TODO: Make this per-stream so we don't issue redundant updates for
7927 * commits with multiple streams.
7928 */
5760dcb9 7929 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806 7930 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8bf0d9cd 7931 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
8ad27806
NK
7932}
7933
8f7f1b02
HM
7934static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
7935{
7936 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
7937
7938 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
7939}
7940
3be5262e 7941static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7942 struct dc_state *dc_state,
3ee6b26b
AD
7943 struct drm_device *dev,
7944 struct amdgpu_display_manager *dm,
7945 struct drm_crtc *pcrtc,
420cd472 7946 bool wait_for_vblank)
e7b07cee 7947{
ae67558b 7948 u32 i;
d6ed6d0d 7949 u64 timestamp_ns = ktime_get_ns();
e7b07cee 7950 struct drm_plane *plane;
0bc9706d 7951 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7952 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7953 struct drm_crtc_state *new_pcrtc_state =
7954 drm_atomic_get_new_crtc_state(state, pcrtc);
7955 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7956 struct dm_crtc_state *dm_old_crtc_state =
7957 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7958 int planes_count = 0, vpos, hpos;
e7b07cee 7959 unsigned long flags;
ae67558b 7960 u32 target_vblank, last_flip_vblank;
6c5e25a0 7961 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
cc79950b 7962 bool cursor_update = false;
74aa7bd4 7963 bool pflip_present = false;
d6ed6d0d 7964 bool dirty_rects_changed = false;
bc7f670e
DF
7965 struct {
7966 struct dc_surface_update surface_updates[MAX_SURFACES];
7967 struct dc_plane_info plane_infos[MAX_SURFACES];
7968 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7969 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7970 struct dc_stream_update stream_update;
74aa7bd4 7971 } *bundle;
bc7f670e 7972
74aa7bd4 7973 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7974
74aa7bd4
DF
7975 if (!bundle) {
7976 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7977 goto cleanup;
7978 }
e7b07cee 7979
8ad27806
NK
7980 /*
7981 * Disable the cursor first if we're disabling all the planes.
7982 * It'll remain on the screen after the planes are re-enabled
7983 * if we don't.
7984 */
7985 if (acrtc_state->active_planes == 0)
7986 amdgpu_dm_commit_cursors(state);
7987
e7b07cee 7988 /* update planes when needed */
efc8278e 7989 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 7990 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7991 struct drm_crtc_state *new_crtc_state;
0bc9706d 7992 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7993 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7994 bool plane_needs_flip;
c7af5f77 7995 struct dc_plane_state *dc_plane;
54d76575 7996 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7997
80c218d5 7998 /* Cursor plane is handled after stream updates */
cc79950b
MD
7999 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8000 if ((fb && crtc == pcrtc) ||
8001 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
8002 cursor_update = true;
8003
e7b07cee 8004 continue;
cc79950b 8005 }
e7b07cee 8006
f5ba60fe
DD
8007 if (!fb || !crtc || pcrtc != crtc)
8008 continue;
8009
8010 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8011 if (!new_crtc_state->active)
e7b07cee
HW
8012 continue;
8013
bc7f670e 8014 dc_plane = dm_new_plane_state->dc_state;
72529b68
AP
8015 if (!dc_plane)
8016 continue;
e7b07cee 8017
74aa7bd4 8018 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8019 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8020 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8021 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8022 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8023 }
8a48b44c 8024
8bf0d9cd 8025 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 8026 &bundle->scaling_infos[planes_count]);
8a48b44c 8027
695af5f9
NK
8028 bundle->surface_updates[planes_count].scaling_info =
8029 &bundle->scaling_infos[planes_count];
8a48b44c 8030
f5031000 8031 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8032
f5031000 8033 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8034
f5031000
DF
8035 if (!plane_needs_flip) {
8036 planes_count += 1;
8037 continue;
8038 }
8a48b44c 8039
695af5f9 8040 fill_dc_plane_info_and_addr(
8ce5d842 8041 dm->adev, new_plane_state,
6eed95b0 8042 afb->tiling_flags,
695af5f9 8043 &bundle->plane_infos[planes_count],
87b7ebc2 8044 &bundle->flip_addrs[planes_count].address,
6eed95b0 8045 afb->tmz_surface, false);
87b7ebc2 8046
9f07550b 8047 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8048 new_plane_state->plane->index,
8049 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8050
8051 bundle->surface_updates[planes_count].plane_info =
8052 &bundle->plane_infos[planes_count];
8a48b44c 8053
d6ed6d0d 8054 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
d852871c
HM
8055 fill_dc_dirty_rects(plane, old_plane_state,
8056 new_plane_state, new_crtc_state,
d6ed6d0d
TC
8057 &bundle->flip_addrs[planes_count],
8058 &dirty_rects_changed);
8059
8060 /*
8061 * If the dirty regions changed, PSR-SU need to be disabled temporarily
8062 * and enabled it again after dirty regions are stable to avoid video glitch.
8063 * PSR-SU will be enabled in vblank_control_worker() if user pause the video
8064 * during the PSR-SU was disabled.
8065 */
8066 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8067 acrtc_attach->dm_irq_params.allow_psr_entry &&
8068#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8069 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8070#endif
8071 dirty_rects_changed) {
8072 mutex_lock(&dm->dc_lock);
8073 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
8074 timestamp_ns;
8075 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
8076 amdgpu_dm_psr_disable(acrtc_state->stream);
8077 mutex_unlock(&dm->dc_lock);
8078 }
8079 }
7cc191ee 8080
caff0e66
NK
8081 /*
8082 * Only allow immediate flips for fast updates that don't
8f7f1b02
HM
8083 * change memory domain, FB pitch, DCC state, rotation or
8084 * mirroring.
caff0e66 8085 */
f5031000 8086 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8087 crtc->state->async_flip &&
8f7f1b02
HM
8088 acrtc_state->update_type == UPDATE_TYPE_FAST &&
8089 get_mem_type(old_plane_state->fb) == get_mem_type(fb);
8a48b44c 8090
f5031000
DF
8091 timestamp_ns = ktime_get_ns();
8092 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8093 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8094 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8095
f5031000
DF
8096 if (!bundle->surface_updates[planes_count].surface) {
8097 DRM_ERROR("No surface for CRTC: id=%d\n",
8098 acrtc_attach->crtc_id);
8099 continue;
bc7f670e
DF
8100 }
8101
f5031000
DF
8102 if (plane == pcrtc->primary)
8103 update_freesync_state_on_stream(
8104 dm,
8105 acrtc_state,
8106 acrtc_state->stream,
8107 dc_plane,
8108 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8109
9f07550b 8110 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8111 __func__,
8112 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8113 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8114
8115 planes_count += 1;
8116
8a48b44c
DF
8117 }
8118
74aa7bd4 8119 if (pflip_present) {
634092b1
MK
8120 if (!vrr_active) {
8121 /* Use old throttling in non-vrr fixed refresh rate mode
8122 * to keep flip scheduling based on target vblank counts
8123 * working in a backwards compatible way, e.g., for
8124 * clients using the GLX_OML_sync_control extension or
8125 * DRI3/Present extension with defined target_msc.
8126 */
e3eff4b5 8127 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8128 }
8129 else {
8130 /* For variable refresh rate mode only:
8131 * Get vblank of last completed flip to avoid > 1 vrr
8132 * flips per video frame by use of throttling, but allow
8133 * flip programming anywhere in the possibly large
8134 * variable vrr vblank interval for fine-grained flip
8135 * timing control and more opportunity to avoid stutter
8136 * on late submission of flips.
8137 */
8138 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8139 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8140 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8141 }
8142
fdd1fe57 8143 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8144
8145 /*
8146 * Wait until we're out of the vertical blank period before the one
8147 * targeted by the flip
8148 */
8149 while ((acrtc_attach->enabled &&
8150 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8151 0, &vpos, &hpos, NULL,
8152 NULL, &pcrtc->hwmode)
8153 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8154 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8155 (int)(target_vblank -
e3eff4b5 8156 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8157 usleep_range(1000, 1100);
8158 }
8159
8fe684e9
NK
8160 /**
8161 * Prepare the flip event for the pageflip interrupt to handle.
8162 *
8163 * This only works in the case where we've already turned on the
8164 * appropriate hardware blocks (eg. HUBP) so in the transition case
8165 * from 0 -> n planes we have to skip a hardware generated event
8166 * and rely on sending it from software.
8167 */
8168 if (acrtc_attach->base.state->event &&
10a36226 8169 acrtc_state->active_planes > 0) {
8a48b44c
DF
8170 drm_crtc_vblank_get(pcrtc);
8171
8172 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8173
8174 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8175 prepare_flip_isr(acrtc_attach);
8176
8177 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8178 }
8179
8180 if (acrtc_state->stream) {
8a48b44c 8181 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8182 bundle->stream_update.vrr_infopacket =
8a48b44c 8183 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8184 }
cc79950b
MD
8185 } else if (cursor_update && acrtc_state->active_planes > 0 &&
8186 acrtc_attach->base.state->event) {
8187 drm_crtc_vblank_get(pcrtc);
8188
8189 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8190
8191 acrtc_attach->event = acrtc_attach->base.state->event;
8192 acrtc_attach->base.state->event = NULL;
8193
8194 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
e7b07cee
HW
8195 }
8196
bc92c065 8197 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8198 if ((planes_count || acrtc_state->active_planes == 0) &&
8199 acrtc_state->stream) {
58aa1c50
NK
8200 /*
8201 * If PSR or idle optimizations are enabled then flush out
8202 * any pending work before hardware programming.
8203 */
06dd1888
NK
8204 if (dm->vblank_control_workqueue)
8205 flush_workqueue(dm->vblank_control_workqueue);
58aa1c50 8206
b6e881c9 8207 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8208 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8209 bundle->stream_update.src = acrtc_state->stream->src;
8210 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8211 }
8212
cf020d49
NK
8213 if (new_pcrtc_state->color_mgmt_changed) {
8214 /*
8215 * TODO: This isn't fully correct since we've actually
8216 * already modified the stream in place.
8217 */
8218 bundle->stream_update.gamut_remap =
8219 &acrtc_state->stream->gamut_remap_matrix;
8220 bundle->stream_update.output_csc_transform =
8221 &acrtc_state->stream->csc_color_matrix;
8222 bundle->stream_update.out_transfer_func =
8223 acrtc_state->stream->out_transfer_func;
8224 }
bc7f670e 8225
8a48b44c 8226 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8227 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8228 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8229
e63e2491
EB
8230 /*
8231 * If FreeSync state on the stream has changed then we need to
8232 * re-adjust the min/max bounds now that DC doesn't handle this
8233 * as part of commit.
8234 */
a85ba005 8235 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8236 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8237 dc_stream_adjust_vmin_vmax(
8238 dm->dc, acrtc_state->stream,
585d450c 8239 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8240 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8241 }
bc7f670e 8242 mutex_lock(&dm->dc_lock);
8c322309 8243 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8244 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8245 amdgpu_dm_psr_disable(acrtc_state->stream);
8246
81f743a0
RS
8247 update_planes_and_stream_adapter(dm->dc,
8248 acrtc_state->update_type,
8249 planes_count,
8250 acrtc_state->stream,
8251 &bundle->stream_update,
8252 bundle->surface_updates);
8c322309 8253
8fe684e9
NK
8254 /**
8255 * Enable or disable the interrupts on the backend.
8256 *
8257 * Most pipes are put into power gating when unused.
8258 *
8259 * When power gating is enabled on a pipe we lose the
8260 * interrupt enablement state when power gating is disabled.
8261 *
8262 * So we need to update the IRQ control state in hardware
8263 * whenever the pipe turns on (since it could be previously
8264 * power gated) or off (since some pipes can't be power gated
8265 * on some ASICs).
8266 */
8267 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8268 dm_update_pflip_irq_state(drm_to_adev(dev),
8269 acrtc_attach);
8fe684e9 8270
8c322309 8271 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8272 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8273 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 8274 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
8275
8276 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8277 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8278 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8279 struct amdgpu_dm_connector *aconn =
8280 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
8281
8282 if (aconn->psr_skip_count > 0)
8283 aconn->psr_skip_count--;
58aa1c50
NK
8284
8285 /* Allow PSR when skip count is 0. */
8286 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7cc191ee
LL
8287
8288 /*
8289 * If sink supports PSR SU, there is no need to rely on
8290 * a vblank event disable request to enable PSR. PSR SU
8291 * can be enabled immediately once OS demonstrates an
8292 * adequate number of fast atomic commits to notify KMD
8293 * of update events. See `vblank_control_worker()`.
8294 */
8295 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8296 acrtc_attach->dm_irq_params.allow_psr_entry &&
c0459bdd
AL
8297#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8298 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8299#endif
d6ed6d0d
TC
8300 !acrtc_state->stream->link->psr_settings.psr_allow_active &&
8301 (timestamp_ns -
8302 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
8303 500000000)
7cc191ee 8304 amdgpu_dm_psr_enable(acrtc_state->stream);
58aa1c50
NK
8305 } else {
8306 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
8307 }
8308
bc7f670e 8309 mutex_unlock(&dm->dc_lock);
e7b07cee 8310 }
4b510503 8311
8ad27806
NK
8312 /*
8313 * Update cursor state *after* programming all the planes.
8314 * This avoids redundant programming in the case where we're going
8315 * to be disabling a single plane - those pipes are being disabled.
8316 */
8317 if (acrtc_state->active_planes)
8318 amdgpu_dm_commit_cursors(state);
80c218d5 8319
4b510503 8320cleanup:
74aa7bd4 8321 kfree(bundle);
e7b07cee
HW
8322}
8323
6ce8f316
NK
8324static void amdgpu_dm_commit_audio(struct drm_device *dev,
8325 struct drm_atomic_state *state)
8326{
1348969a 8327 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8328 struct amdgpu_dm_connector *aconnector;
8329 struct drm_connector *connector;
8330 struct drm_connector_state *old_con_state, *new_con_state;
8331 struct drm_crtc_state *new_crtc_state;
8332 struct dm_crtc_state *new_dm_crtc_state;
8333 const struct dc_stream_status *status;
8334 int i, inst;
8335
8336 /* Notify device removals. */
8337 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8338 if (old_con_state->crtc != new_con_state->crtc) {
8339 /* CRTC changes require notification. */
8340 goto notify;
8341 }
8342
8343 if (!new_con_state->crtc)
8344 continue;
8345
8346 new_crtc_state = drm_atomic_get_new_crtc_state(
8347 state, new_con_state->crtc);
8348
8349 if (!new_crtc_state)
8350 continue;
8351
8352 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8353 continue;
8354
3335a135 8355notify:
6ce8f316
NK
8356 aconnector = to_amdgpu_dm_connector(connector);
8357
8358 mutex_lock(&adev->dm.audio_lock);
8359 inst = aconnector->audio_inst;
8360 aconnector->audio_inst = -1;
8361 mutex_unlock(&adev->dm.audio_lock);
8362
8363 amdgpu_dm_audio_eld_notify(adev, inst);
8364 }
8365
8366 /* Notify audio device additions. */
8367 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8368 if (!new_con_state->crtc)
8369 continue;
8370
8371 new_crtc_state = drm_atomic_get_new_crtc_state(
8372 state, new_con_state->crtc);
8373
8374 if (!new_crtc_state)
8375 continue;
8376
8377 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8378 continue;
8379
8380 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8381 if (!new_dm_crtc_state->stream)
8382 continue;
8383
8384 status = dc_stream_get_status(new_dm_crtc_state->stream);
8385 if (!status)
8386 continue;
8387
8388 aconnector = to_amdgpu_dm_connector(connector);
8389
8390 mutex_lock(&adev->dm.audio_lock);
8391 inst = status->audio_inst;
8392 aconnector->audio_inst = inst;
8393 mutex_unlock(&adev->dm.audio_lock);
8394
8395 amdgpu_dm_audio_eld_notify(adev, inst);
8396 }
8397}
8398
1f6010a9 8399/*
27b3f4fc
LSL
8400 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8401 * @crtc_state: the DRM CRTC state
8402 * @stream_state: the DC stream state.
8403 *
8404 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8405 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8406 */
8407static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8408 struct dc_stream_state *stream_state)
8409{
b9952f93 8410 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8411}
e7b07cee 8412
b8592b48
LL
8413/**
8414 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8415 * @state: The atomic state to commit
8416 *
8417 * This will tell DC to commit the constructed DC state from atomic_check,
8418 * programming the hardware. Any failures here implies a hardware failure, since
8419 * atomic check should have filtered anything non-kosher.
8420 */
7578ecda 8421static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8422{
8423 struct drm_device *dev = state->dev;
1348969a 8424 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8425 struct amdgpu_display_manager *dm = &adev->dm;
8426 struct dm_atomic_state *dm_state;
eb3dc897 8427 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
ae67558b 8428 u32 i, j;
5cc6dcbd 8429 struct drm_crtc *crtc;
0bc9706d 8430 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8431 unsigned long flags;
8432 bool wait_for_vblank = true;
8433 struct drm_connector *connector;
c2cea706 8434 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8435 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8436 int crtc_disable_count = 0;
6ee90e88 8437 bool mode_set_reset_required = false;
047de3f1 8438 int r;
e7b07cee 8439
e8a98235
RS
8440 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8441
047de3f1
CK
8442 r = drm_atomic_helper_wait_for_fences(dev, state, false);
8443 if (unlikely(r))
8444 DRM_ERROR("Waiting for fences timed out!");
8445
e7b07cee 8446 drm_atomic_helper_update_legacy_modeset_state(dev, state);
a5c2c0d1 8447 drm_dp_mst_atomic_wait_for_dependencies(state);
e7b07cee 8448
eb3dc897
NK
8449 dm_state = dm_atomic_get_new_state(state);
8450 if (dm_state && dm_state->context) {
8451 dc_state = dm_state->context;
8452 } else {
8453 /* No state changes, retain current state. */
813d20dc 8454 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8455 ASSERT(dc_state_temp);
8456 dc_state = dc_state_temp;
8457 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8458 }
e7b07cee 8459
6d90a208
AP
8460 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8461 new_crtc_state, i) {
8462 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8463
8464 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8465
8466 if (old_crtc_state->active &&
8467 (!new_crtc_state->active ||
8468 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8469 manage_dm_interrupts(adev, acrtc, false);
8470 dc_stream_release(dm_old_crtc_state->stream);
8471 }
8472 }
8473
8976f73b
RS
8474 drm_atomic_helper_calc_timestamping_constants(state);
8475
e7b07cee 8476 /* update changed items */
0bc9706d 8477 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8478 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8479
54d76575
LSL
8480 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8481 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8482
9f07550b 8483 drm_dbg_state(state->dev,
e7b07cee
HW
8484 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8485 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8486 "connectors_changed:%d\n",
8487 acrtc->crtc_id,
0bc9706d
LSL
8488 new_crtc_state->enable,
8489 new_crtc_state->active,
8490 new_crtc_state->planes_changed,
8491 new_crtc_state->mode_changed,
8492 new_crtc_state->active_changed,
8493 new_crtc_state->connectors_changed);
e7b07cee 8494
5c68c652
VL
8495 /* Disable cursor if disabling crtc */
8496 if (old_crtc_state->active && !new_crtc_state->active) {
8497 struct dc_cursor_position position;
8498
8499 memset(&position, 0, sizeof(position));
8500 mutex_lock(&dm->dc_lock);
8501 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8502 mutex_unlock(&dm->dc_lock);
8503 }
8504
27b3f4fc
LSL
8505 /* Copy all transient state flags into dc state */
8506 if (dm_new_crtc_state->stream) {
8507 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8508 dm_new_crtc_state->stream);
8509 }
8510
e7b07cee
HW
8511 /* handles headless hotplug case, updating new_state and
8512 * aconnector as needed
8513 */
8514
6c5e25a0 8515 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8516
4711c033 8517 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8518
54d76575 8519 if (!dm_new_crtc_state->stream) {
e7b07cee 8520 /*
b830ebc9
HW
8521 * this could happen because of issues with
8522 * userspace notifications delivery.
8523 * In this case userspace tries to set mode on
1f6010a9
DF
8524 * display which is disconnected in fact.
8525 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8526 * We expect reset mode will come soon.
8527 *
8528 * This can also happen when unplug is done
8529 * during resume sequence ended
8530 *
8531 * In this case, we want to pretend we still
8532 * have a sink to keep the pipe running so that
8533 * hw state is consistent with the sw state
8534 */
f1ad2f5e 8535 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8536 __func__, acrtc->base.base.id);
8537 continue;
8538 }
8539
54d76575
LSL
8540 if (dm_old_crtc_state->stream)
8541 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8542
97028037
LP
8543 pm_runtime_get_noresume(dev->dev);
8544
e7b07cee 8545 acrtc->enabled = true;
0bc9706d
LSL
8546 acrtc->hw_mode = new_crtc_state->mode;
8547 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8548 mode_set_reset_required = true;
0bc9706d 8549 } else if (modereset_required(new_crtc_state)) {
4711c033 8550 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8551 /* i.e. reset mode */
6ee90e88 8552 if (dm_old_crtc_state->stream)
54d76575 8553 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8554
6ee90e88 8555 mode_set_reset_required = true;
e7b07cee
HW
8556 }
8557 } /* for_each_crtc_in_state() */
8558
eb3dc897 8559 if (dc_state) {
6ee90e88 8560 /* if there mode set or reset, disable eDP PSR */
58aa1c50 8561 if (mode_set_reset_required) {
06dd1888
NK
8562 if (dm->vblank_control_workqueue)
8563 flush_workqueue(dm->vblank_control_workqueue);
cae5c1ab 8564
6ee90e88 8565 amdgpu_dm_psr_disable_all(dm);
58aa1c50 8566 }
6ee90e88 8567
eb3dc897 8568 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8569 mutex_lock(&dm->dc_lock);
b8272241 8570 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
f3106c94
JC
8571
8572 /* Allow idle optimization when vblank count is 0 for display off */
8573 if (dm->active_vblank_irq_count == 0)
8574 dc_allow_idle_optimizations(dm->dc, true);
674e78ac 8575 mutex_unlock(&dm->dc_lock);
fa2123db 8576 }
fe8858bb 8577
0bc9706d 8578 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8579 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8580
54d76575 8581 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8582
54d76575 8583 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8584 const struct dc_stream_status *status =
54d76575 8585 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8586
eb3dc897 8587 if (!status)
09f609c3
LL
8588 status = dc_stream_get_status_from_state(dc_state,
8589 dm_new_crtc_state->stream);
e7b07cee 8590 if (!status)
54d76575 8591 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8592 else
8593 acrtc->otg_inst = status->primary_otg_inst;
8594 }
8595 }
0c8620d6
BL
8596 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8597 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8598 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8599 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8600
cdff36a0
HW
8601 if (!adev->dm.hdcp_workqueue)
8602 continue;
8603
e8fd3eeb 8604 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
8605
8606 if (!connector)
8607 continue;
8608
8609 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
8610 connector->index, connector->status, connector->dpms);
8611 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
8612 old_con_state->content_protection, new_con_state->content_protection);
8613
8614 if (aconnector->dc_sink) {
8615 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
8616 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
8617 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
8618 aconnector->dc_sink->edid_caps.display_name);
8619 }
8620 }
8621
0c8620d6 8622 new_crtc_state = NULL;
e8fd3eeb 8623 old_crtc_state = NULL;
0c8620d6 8624
e8fd3eeb 8625 if (acrtc) {
0c8620d6 8626 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
e8fd3eeb 8627 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8628 }
8629
8630 if (old_crtc_state)
8631 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8632 old_crtc_state->enable,
8633 old_crtc_state->active,
8634 old_crtc_state->mode_changed,
8635 old_crtc_state->active_changed,
8636 old_crtc_state->connectors_changed);
8637
8638 if (new_crtc_state)
8639 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8640 new_crtc_state->enable,
8641 new_crtc_state->active,
8642 new_crtc_state->mode_changed,
8643 new_crtc_state->active_changed,
8644 new_crtc_state->connectors_changed);
8645 }
8646
8647 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8648 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8649 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8650 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8651
cdff36a0
HW
8652 if (!adev->dm.hdcp_workqueue)
8653 continue;
8654
e8fd3eeb 8655 new_crtc_state = NULL;
8656 old_crtc_state = NULL;
8657
8658 if (acrtc) {
8659 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8660 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8661 }
0c8620d6
BL
8662
8663 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8664
8665 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8666 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8667 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8668 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8669 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8670 continue;
8671 }
8672
e8fd3eeb 8673 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
8674 old_con_state, connector, adev->dm.hdcp_workqueue)) {
82986fd6 8675 /* when display is unplugged from mst hub, connctor will
8676 * be destroyed within dm_dp_mst_connector_destroy. connector
8677 * hdcp perperties, like type, undesired, desired, enabled,
8678 * will be lost. So, save hdcp properties into hdcp_work within
8679 * amdgpu_dm_atomic_commit_tail. if the same display is
8680 * plugged back with same display index, its hdcp properties
8681 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
8682 */
8683
e8fd3eeb 8684 bool enable_encryption = false;
8685
8686 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
8687 enable_encryption = true;
8688
82986fd6 8689 if (aconnector->dc_link && aconnector->dc_sink &&
8690 aconnector->dc_link->type == dc_connection_mst_branch) {
8691 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
8692 struct hdcp_workqueue *hdcp_w =
8693 &hdcp_work[aconnector->dc_link->link_index];
8694
8695 hdcp_w->hdcp_content_type[connector->index] =
8696 new_con_state->hdcp_content_type;
8697 hdcp_w->content_protection[connector->index] =
8698 new_con_state->content_protection;
8699 }
8700
e8fd3eeb 8701 if (new_crtc_state && new_crtc_state->mode_changed &&
8702 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
8703 enable_encryption = true;
8704
8705 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
8706
b1abe558
BL
8707 hdcp_update_display(
8708 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
e8fd3eeb 8709 new_con_state->hdcp_content_type, enable_encryption);
8710 }
0c8620d6 8711 }
e7b07cee 8712
02d6a6fc 8713 /* Handle connector state changes */
c2cea706 8714 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8715 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8716 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8717 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8718 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8719 struct dc_stream_update stream_update;
b232d4ed 8720 struct dc_info_packet hdr_packet;
e7b07cee 8721 struct dc_stream_status *status = NULL;
b232d4ed 8722 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8723
efc8278e 8724 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8725 memset(&stream_update, 0, sizeof(stream_update));
8726
44d09c6a 8727 if (acrtc) {
0bc9706d 8728 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8729 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8730 }
0bc9706d 8731
e7b07cee 8732 /* Skip any modesets/resets */
0bc9706d 8733 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8734 continue;
8735
54d76575 8736 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8737 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8738
b232d4ed
NK
8739 scaling_changed = is_scaling_state_different(dm_new_con_state,
8740 dm_old_con_state);
8741
8742 abm_changed = dm_new_crtc_state->abm_level !=
8743 dm_old_crtc_state->abm_level;
8744
8745 hdr_changed =
72921cdf 8746 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
8747
8748 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8749 continue;
e7b07cee 8750
b6e881c9 8751 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8752 if (scaling_changed) {
02d6a6fc 8753 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8754 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8755
02d6a6fc
DF
8756 stream_update.src = dm_new_crtc_state->stream->src;
8757 stream_update.dst = dm_new_crtc_state->stream->dst;
8758 }
8759
b232d4ed 8760 if (abm_changed) {
02d6a6fc
DF
8761 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8762
8763 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8764 }
70e8ffc5 8765
b232d4ed
NK
8766 if (hdr_changed) {
8767 fill_hdr_info_packet(new_con_state, &hdr_packet);
8768 stream_update.hdr_static_metadata = &hdr_packet;
8769 }
8770
54d76575 8771 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
8772
8773 if (WARN_ON(!status))
8774 continue;
8775
3be5262e 8776 WARN_ON(!status->plane_count);
e7b07cee 8777
02d6a6fc
DF
8778 /*
8779 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8780 * Here we create an empty update on each plane.
8781 * To fix this, DC should permit updating only stream properties.
8782 */
8783 for (j = 0; j < status->plane_count; j++)
efc8278e 8784 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8785
8786
8787 mutex_lock(&dm->dc_lock);
f7511289
RS
8788 dc_update_planes_and_stream(dm->dc,
8789 dummy_updates,
8790 status->plane_count,
8791 dm_new_crtc_state->stream,
8792 &stream_update);
02d6a6fc 8793 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8794 }
8795
8fe684e9
NK
8796 /**
8797 * Enable interrupts for CRTCs that are newly enabled or went through
8798 * a modeset. It was intentionally deferred until after the front end
8799 * state was modified to wait until the OTG was on and so the IRQ
8800 * handlers didn't access stale or invalid state.
8801 */
8802 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8803 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee
WL
8804#ifdef CONFIG_DEBUG_FS
8805 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8799c0be
YL
8806#endif
8807 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8808 if (old_crtc_state->active && !new_crtc_state->active)
8809 crtc_disable_count++;
8810
8811 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8812 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8813
8814 /* For freesync config update on crtc state and params for irq */
8815 update_stream_irq_parameters(dm, dm_new_crtc_state);
8816
8817#ifdef CONFIG_DEBUG_FS
d98af272
WL
8818 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8819 cur_crc_src = acrtc->dm_irq_params.crc_src;
8820 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 8821#endif
585d450c 8822
8fe684e9
NK
8823 if (new_crtc_state->active &&
8824 (!old_crtc_state->active ||
8825 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8826 dc_stream_retain(dm_new_crtc_state->stream);
8827 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8828 manage_dm_interrupts(adev, acrtc, true);
8799c0be
YL
8829 }
8830 /* Handle vrr on->off / off->on transitions */
8831 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
e2881d6d 8832
24eb9374 8833#ifdef CONFIG_DEBUG_FS
8799c0be
YL
8834 if (new_crtc_state->active &&
8835 (!old_crtc_state->active ||
8836 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8fe684e9
NK
8837 /**
8838 * Frontend may have changed so reapply the CRC capture
8839 * settings for the stream.
8840 */
8e7b6fee 8841 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219 8842#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
8843 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8844 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
c0459bdd 8845 acrtc->dm_irq_params.window_param.update_win = true;
1b11ff76
AL
8846
8847 /**
8848 * It takes 2 frames for HW to stably generate CRC when
8849 * resuming from suspend, so we set skip_frame_cnt 2.
8850 */
c0459bdd 8851 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
d98af272
WL
8852 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8853 }
86bc2219 8854#endif
bbc49fc0
WL
8855 if (amdgpu_dm_crtc_configure_crc_source(
8856 crtc, dm_new_crtc_state, cur_crc_src))
8857 DRM_DEBUG_DRIVER("Failed to configure crc source");
8799c0be 8858 }
8fe684e9 8859 }
2130b87b 8860#endif
8fe684e9 8861 }
e7b07cee 8862
420cd472 8863 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8864 if (new_crtc_state->async_flip)
420cd472
DF
8865 wait_for_vblank = false;
8866
e7b07cee 8867 /* update planes when needed per crtc*/
5cc6dcbd 8868 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8869 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8870
54d76575 8871 if (dm_new_crtc_state->stream)
eb3dc897 8872 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8873 dm, crtc, wait_for_vblank);
e7b07cee
HW
8874 }
8875
6ce8f316
NK
8876 /* Update audio instances for each connector. */
8877 amdgpu_dm_commit_audio(dev, state);
8878
7230362c 8879 /* restore the backlight level */
7fd13bae
AD
8880 for (i = 0; i < dm->num_of_edps; i++) {
8881 if (dm->backlight_dev[i] &&
4052287a 8882 (dm->actual_brightness[i] != dm->brightness[i]))
7fd13bae
AD
8883 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8884 }
83a3439d 8885
e7b07cee
HW
8886 /*
8887 * send vblank event on all events not handled in flip and
8888 * mark consumed event for drm_atomic_helper_commit_hw_done
8889 */
4a580877 8890 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8891 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8892
0bc9706d
LSL
8893 if (new_crtc_state->event)
8894 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8895
0bc9706d 8896 new_crtc_state->event = NULL;
e7b07cee 8897 }
4a580877 8898 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8899
29c8f234
LL
8900 /* Signal HW programming completion */
8901 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8902
8903 if (wait_for_vblank)
320a1274 8904 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8905
8906 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8907
5f6fab24
AD
8908 /* return the stolen vga memory back to VRAM */
8909 if (!adev->mman.keep_stolen_vga_memory)
8910 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8911 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8912
1f6010a9
DF
8913 /*
8914 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8915 * so we can put the GPU into runtime suspend if we're not driving any
8916 * displays anymore
8917 */
fe2a1965
LP
8918 for (i = 0; i < crtc_disable_count; i++)
8919 pm_runtime_put_autosuspend(dev->dev);
97028037 8920 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8921
8922 if (dc_state_temp)
8923 dc_release_state(dc_state_temp);
e7b07cee
HW
8924}
8925
e7b07cee
HW
8926static int dm_force_atomic_commit(struct drm_connector *connector)
8927{
8928 int ret = 0;
8929 struct drm_device *ddev = connector->dev;
8930 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8931 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8932 struct drm_plane *plane = disconnected_acrtc->base.primary;
8933 struct drm_connector_state *conn_state;
8934 struct drm_crtc_state *crtc_state;
8935 struct drm_plane_state *plane_state;
8936
8937 if (!state)
8938 return -ENOMEM;
8939
8940 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8941
8942 /* Construct an atomic state to restore previous display setting */
8943
8944 /*
8945 * Attach connectors to drm_atomic_state
8946 */
8947 conn_state = drm_atomic_get_connector_state(state, connector);
8948
8949 ret = PTR_ERR_OR_ZERO(conn_state);
8950 if (ret)
2dc39051 8951 goto out;
e7b07cee
HW
8952
8953 /* Attach crtc to drm_atomic_state*/
8954 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8955
8956 ret = PTR_ERR_OR_ZERO(crtc_state);
8957 if (ret)
2dc39051 8958 goto out;
e7b07cee
HW
8959
8960 /* force a restore */
8961 crtc_state->mode_changed = true;
8962
8963 /* Attach plane to drm_atomic_state */
8964 plane_state = drm_atomic_get_plane_state(state, plane);
8965
8966 ret = PTR_ERR_OR_ZERO(plane_state);
8967 if (ret)
2dc39051 8968 goto out;
e7b07cee
HW
8969
8970 /* Call commit internally with the state we just constructed */
8971 ret = drm_atomic_commit(state);
e7b07cee 8972
2dc39051 8973out:
e7b07cee 8974 drm_atomic_state_put(state);
2dc39051
VL
8975 if (ret)
8976 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
8977
8978 return ret;
8979}
8980
8981/*
1f6010a9
DF
8982 * This function handles all cases when set mode does not come upon hotplug.
8983 * This includes when a display is unplugged then plugged back into the
8984 * same port and when running without usermode desktop manager supprot
e7b07cee 8985 */
3ee6b26b
AD
8986void dm_restore_drm_connector_state(struct drm_device *dev,
8987 struct drm_connector *connector)
e7b07cee 8988{
c84dec2f 8989 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8990 struct amdgpu_crtc *disconnected_acrtc;
8991 struct dm_crtc_state *acrtc_state;
8992
8993 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8994 return;
8995
8996 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8997 if (!disconnected_acrtc)
8998 return;
e7b07cee 8999
70e8ffc5
HW
9000 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9001 if (!acrtc_state->stream)
e7b07cee
HW
9002 return;
9003
9004 /*
9005 * If the previous sink is not released and different from the current,
9006 * we deduce we are in a state where we can not rely on usermode call
9007 * to turn on the display, so we do it here
9008 */
9009 if (acrtc_state->stream->sink != aconnector->dc_sink)
9010 dm_force_atomic_commit(&aconnector->base);
9011}
9012
1f6010a9 9013/*
e7b07cee
HW
9014 * Grabs all modesetting locks to serialize against any blocking commits,
9015 * Waits for completion of all non blocking commits.
9016 */
3ee6b26b
AD
9017static int do_aquire_global_lock(struct drm_device *dev,
9018 struct drm_atomic_state *state)
e7b07cee
HW
9019{
9020 struct drm_crtc *crtc;
9021 struct drm_crtc_commit *commit;
9022 long ret;
9023
1f6010a9
DF
9024 /*
9025 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9026 * ensure that when the framework release it the
9027 * extra locks we are locking here will get released to
9028 */
9029 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9030 if (ret)
9031 return ret;
9032
9033 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9034 spin_lock(&crtc->commit_lock);
9035 commit = list_first_entry_or_null(&crtc->commit_list,
9036 struct drm_crtc_commit, commit_entry);
9037 if (commit)
9038 drm_crtc_commit_get(commit);
9039 spin_unlock(&crtc->commit_lock);
9040
9041 if (!commit)
9042 continue;
9043
1f6010a9
DF
9044 /*
9045 * Make sure all pending HW programming completed and
e7b07cee
HW
9046 * page flips done
9047 */
9048 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9049
9050 if (ret > 0)
9051 ret = wait_for_completion_interruptible_timeout(
9052 &commit->flip_done, 10*HZ);
9053
9054 if (ret == 0)
9055 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9056 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9057
9058 drm_crtc_commit_put(commit);
9059 }
9060
9061 return ret < 0 ? ret : 0;
9062}
9063
bb47de73
NK
9064static void get_freesync_config_for_crtc(
9065 struct dm_crtc_state *new_crtc_state,
9066 struct dm_connector_state *new_con_state)
98e6436d
AK
9067{
9068 struct mod_freesync_config config = {0};
98e6436d
AK
9069 struct amdgpu_dm_connector *aconnector =
9070 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9071 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9072 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9073 bool fs_vid_mode = false;
98e6436d 9074
a057ec46 9075 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9076 vrefresh >= aconnector->min_vfreq &&
9077 vrefresh <= aconnector->max_vfreq;
bb47de73 9078
6ffa6799 9079 if (new_crtc_state->vrr_supported) {
7e5098ab 9080 new_crtc_state->stream->ignore_msa_timing_param = true;
6ffa6799 9081 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
7e5098ab 9082
a85ba005
NC
9083 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9084 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9085 config.vsif_supported = true;
180db303 9086 config.btr = true;
98e6436d 9087
a85ba005
NC
9088 if (fs_vid_mode) {
9089 config.state = VRR_STATE_ACTIVE_FIXED;
9090 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9091 goto out;
9092 } else if (new_crtc_state->base.vrr_enabled) {
9093 config.state = VRR_STATE_ACTIVE_VARIABLE;
9094 } else {
9095 config.state = VRR_STATE_INACTIVE;
9096 }
9097 }
9098out:
bb47de73
NK
9099 new_crtc_state->freesync_config = config;
9100}
98e6436d 9101
bb47de73
NK
9102static void reset_freesync_config_for_crtc(
9103 struct dm_crtc_state *new_crtc_state)
9104{
9105 new_crtc_state->vrr_supported = false;
98e6436d 9106
bb47de73
NK
9107 memset(&new_crtc_state->vrr_infopacket, 0,
9108 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9109}
9110
a85ba005
NC
9111static bool
9112is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9113 struct drm_crtc_state *new_crtc_state)
9114{
1cbd7887 9115 const struct drm_display_mode *old_mode, *new_mode;
a85ba005
NC
9116
9117 if (!old_crtc_state || !new_crtc_state)
9118 return false;
9119
1cbd7887
VS
9120 old_mode = &old_crtc_state->mode;
9121 new_mode = &new_crtc_state->mode;
9122
9123 if (old_mode->clock == new_mode->clock &&
9124 old_mode->hdisplay == new_mode->hdisplay &&
9125 old_mode->vdisplay == new_mode->vdisplay &&
9126 old_mode->htotal == new_mode->htotal &&
9127 old_mode->vtotal != new_mode->vtotal &&
9128 old_mode->hsync_start == new_mode->hsync_start &&
9129 old_mode->vsync_start != new_mode->vsync_start &&
9130 old_mode->hsync_end == new_mode->hsync_end &&
9131 old_mode->vsync_end != new_mode->vsync_end &&
9132 old_mode->hskew == new_mode->hskew &&
9133 old_mode->vscan == new_mode->vscan &&
9134 (old_mode->vsync_end - old_mode->vsync_start) ==
9135 (new_mode->vsync_end - new_mode->vsync_start))
a85ba005
NC
9136 return true;
9137
9138 return false;
9139}
9140
9141static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
ae67558b 9142 u64 num, den, res;
a85ba005
NC
9143 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9144
9145 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9146
9147 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9148 den = (unsigned long long)new_crtc_state->mode.htotal *
9149 (unsigned long long)new_crtc_state->mode.vtotal;
9150
9151 res = div_u64(num, den);
9152 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9153}
9154
f11d9373 9155static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
17ce8a69
RL
9156 struct drm_atomic_state *state,
9157 struct drm_crtc *crtc,
9158 struct drm_crtc_state *old_crtc_state,
9159 struct drm_crtc_state *new_crtc_state,
9160 bool enable,
9161 bool *lock_and_validation_needed)
e7b07cee 9162{
eb3dc897 9163 struct dm_atomic_state *dm_state = NULL;
54d76575 9164 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9165 struct dc_stream_state *new_stream;
62f55537 9166 int ret = 0;
d4d4a645 9167
1f6010a9
DF
9168 /*
9169 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9170 * update changed items
9171 */
4b9674e5
LL
9172 struct amdgpu_crtc *acrtc = NULL;
9173 struct amdgpu_dm_connector *aconnector = NULL;
9174 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9175 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9176
4b9674e5 9177 new_stream = NULL;
9635b754 9178
4b9674e5
LL
9179 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9180 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9181 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9182 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9183
4b9674e5
LL
9184 /* TODO This hack should go away */
9185 if (aconnector && enable) {
9186 /* Make sure fake sink is created in plug-in scenario */
9187 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9188 &aconnector->base);
9189 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9190 &aconnector->base);
19f89e23 9191
4b9674e5
LL
9192 if (IS_ERR(drm_new_conn_state)) {
9193 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9194 goto fail;
9195 }
19f89e23 9196
4b9674e5
LL
9197 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9198 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9199
02d35a67
JFZ
9200 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9201 goto skip_modeset;
9202
cbd14ae7
SW
9203 new_stream = create_validate_stream_for_sink(aconnector,
9204 &new_crtc_state->mode,
9205 dm_new_conn_state,
9206 dm_old_crtc_state->stream);
19f89e23 9207
4b9674e5
LL
9208 /*
9209 * we can have no stream on ACTION_SET if a display
9210 * was disconnected during S3, in this case it is not an
9211 * error, the OS will be updated after detection, and
9212 * will do the right thing on next atomic commit
9213 */
19f89e23 9214
4b9674e5
LL
9215 if (!new_stream) {
9216 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9217 __func__, acrtc->base.base.id);
9218 ret = -ENOMEM;
9219 goto fail;
9220 }
e7b07cee 9221
3d4e52d0
VL
9222 /*
9223 * TODO: Check VSDB bits to decide whether this should
9224 * be enabled or not.
9225 */
9226 new_stream->triggered_crtc_reset.enabled =
9227 dm->force_timing_sync;
9228
4b9674e5 9229 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9230
88694af9
NK
9231 ret = fill_hdr_info_packet(drm_new_conn_state,
9232 &new_stream->hdr_static_metadata);
9233 if (ret)
9234 goto fail;
9235
7e930949
NK
9236 /*
9237 * If we already removed the old stream from the context
9238 * (and set the new stream to NULL) then we can't reuse
9239 * the old stream even if the stream and scaling are unchanged.
9240 * We'll hit the BUG_ON and black screen.
9241 *
9242 * TODO: Refactor this function to allow this check to work
9243 * in all conditions.
9244 */
4243c84a
MD
9245 if (amdgpu_freesync_vid_mode &&
9246 dm_new_crtc_state->stream &&
a85ba005
NC
9247 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9248 goto skip_modeset;
9249
7e930949
NK
9250 if (dm_new_crtc_state->stream &&
9251 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9252 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9253 new_crtc_state->mode_changed = false;
9254 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9255 new_crtc_state->mode_changed);
62f55537 9256 }
4b9674e5 9257 }
b830ebc9 9258
02d35a67 9259 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9260 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9261 goto skip_modeset;
e7b07cee 9262
9f07550b 9263 drm_dbg_state(state->dev,
4b9674e5
LL
9264 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9265 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9266 "connectors_changed:%d\n",
9267 acrtc->crtc_id,
9268 new_crtc_state->enable,
9269 new_crtc_state->active,
9270 new_crtc_state->planes_changed,
9271 new_crtc_state->mode_changed,
9272 new_crtc_state->active_changed,
9273 new_crtc_state->connectors_changed);
62f55537 9274
4b9674e5
LL
9275 /* Remove stream for any changed/disabled CRTC */
9276 if (!enable) {
62f55537 9277
4b9674e5
LL
9278 if (!dm_old_crtc_state->stream)
9279 goto skip_modeset;
eb3dc897 9280
0f5f1ee4
AP
9281 /* Unset freesync video if it was active before */
9282 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
9283 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
9284 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
9285 }
9286
9287 /* Now check if we should set freesync video mode */
4243c84a 9288 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
b18f05a0
AP
9289 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9290 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
a85ba005
NC
9291 is_timing_unchanged_for_freesync(new_crtc_state,
9292 old_crtc_state)) {
9293 new_crtc_state->mode_changed = false;
9294 DRM_DEBUG_DRIVER(
9295 "Mode change not required for front porch change, "
9296 "setting mode_changed to %d",
9297 new_crtc_state->mode_changed);
9298
9299 set_freesync_fixed_config(dm_new_crtc_state);
9300
9301 goto skip_modeset;
4243c84a 9302 } else if (amdgpu_freesync_vid_mode && aconnector &&
a85ba005
NC
9303 is_freesync_video_mode(&new_crtc_state->mode,
9304 aconnector)) {
e88ebd83
SC
9305 struct drm_display_mode *high_mode;
9306
9307 high_mode = get_highest_refresh_rate_mode(aconnector, false);
9308 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9309 set_freesync_fixed_config(dm_new_crtc_state);
9310 }
a85ba005
NC
9311 }
9312
4b9674e5
LL
9313 ret = dm_atomic_get_state(state, &dm_state);
9314 if (ret)
9315 goto fail;
e7b07cee 9316
4b9674e5
LL
9317 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9318 crtc->base.id);
62f55537 9319
4b9674e5
LL
9320 /* i.e. reset mode */
9321 if (dc_remove_stream_from_ctx(
9322 dm->dc,
9323 dm_state->context,
9324 dm_old_crtc_state->stream) != DC_OK) {
9325 ret = -EINVAL;
9326 goto fail;
9327 }
62f55537 9328
4b9674e5
LL
9329 dc_stream_release(dm_old_crtc_state->stream);
9330 dm_new_crtc_state->stream = NULL;
bb47de73 9331
4b9674e5 9332 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9333
4b9674e5 9334 *lock_and_validation_needed = true;
62f55537 9335
4b9674e5
LL
9336 } else {/* Add stream for any updated/enabled CRTC */
9337 /*
9338 * Quick fix to prevent NULL pointer on new_stream when
9339 * added MST connectors not found in existing crtc_state in the chained mode
9340 * TODO: need to dig out the root cause of that
9341 */
84a8b390 9342 if (!aconnector)
4b9674e5 9343 goto skip_modeset;
62f55537 9344
4b9674e5
LL
9345 if (modereset_required(new_crtc_state))
9346 goto skip_modeset;
62f55537 9347
6c5e25a0 9348 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
4b9674e5 9349 dm_old_crtc_state->stream)) {
62f55537 9350
4b9674e5 9351 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9352
4b9674e5
LL
9353 ret = dm_atomic_get_state(state, &dm_state);
9354 if (ret)
9355 goto fail;
27b3f4fc 9356
4b9674e5 9357 dm_new_crtc_state->stream = new_stream;
62f55537 9358
4b9674e5 9359 dc_stream_retain(new_stream);
1dc90497 9360
4711c033
LT
9361 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9362 crtc->base.id);
1dc90497 9363
4b9674e5
LL
9364 if (dc_add_stream_to_ctx(
9365 dm->dc,
9366 dm_state->context,
9367 dm_new_crtc_state->stream) != DC_OK) {
9368 ret = -EINVAL;
9369 goto fail;
9b690ef3
BL
9370 }
9371
4b9674e5
LL
9372 *lock_and_validation_needed = true;
9373 }
9374 }
e277adc5 9375
4b9674e5
LL
9376skip_modeset:
9377 /* Release extra reference */
9378 if (new_stream)
3335a135 9379 dc_stream_release(new_stream);
e277adc5 9380
4b9674e5
LL
9381 /*
9382 * We want to do dc stream updates that do not require a
9383 * full modeset below.
9384 */
2afda735 9385 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9386 return 0;
9387 /*
9388 * Given above conditions, the dc state cannot be NULL because:
9389 * 1. We're in the process of enabling CRTCs (just been added
9390 * to the dc context, or already is on the context)
9391 * 2. Has a valid connector attached, and
9392 * 3. Is currently active and enabled.
9393 * => The dc stream state currently exists.
9394 */
9395 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9396
4b9674e5 9397 /* Scaling or underscan settings */
c521fc31
RL
9398 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9399 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
9400 update_stream_scaling_settings(
9401 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9402
b05e2c5e
DF
9403 /* ABM settings */
9404 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9405
4b9674e5
LL
9406 /*
9407 * Color management settings. We also update color properties
9408 * when a modeset is needed, to ensure it gets reprogrammed.
9409 */
9410 if (dm_new_crtc_state->base.color_mgmt_changed ||
9411 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9412 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9413 if (ret)
9414 goto fail;
62f55537 9415 }
e7b07cee 9416
4b9674e5
LL
9417 /* Update Freesync settings. */
9418 get_freesync_config_for_crtc(dm_new_crtc_state,
9419 dm_new_conn_state);
9420
62f55537 9421 return ret;
9635b754
DS
9422
9423fail:
9424 if (new_stream)
9425 dc_stream_release(new_stream);
9426 return ret;
62f55537 9427}
9b690ef3 9428
f6ff2a08
NK
9429static bool should_reset_plane(struct drm_atomic_state *state,
9430 struct drm_plane *plane,
9431 struct drm_plane_state *old_plane_state,
9432 struct drm_plane_state *new_plane_state)
9433{
9434 struct drm_plane *other;
9435 struct drm_plane_state *old_other_state, *new_other_state;
9436 struct drm_crtc_state *new_crtc_state;
9437 int i;
9438
70a1efac
NK
9439 /*
9440 * TODO: Remove this hack once the checks below are sufficient
9441 * enough to determine when we need to reset all the planes on
9442 * the stream.
9443 */
9444 if (state->allow_modeset)
9445 return true;
9446
f6ff2a08
NK
9447 /* Exit early if we know that we're adding or removing the plane. */
9448 if (old_plane_state->crtc != new_plane_state->crtc)
9449 return true;
9450
9451 /* old crtc == new_crtc == NULL, plane not in context. */
9452 if (!new_plane_state->crtc)
9453 return false;
9454
9455 new_crtc_state =
9456 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9457
9458 if (!new_crtc_state)
9459 return true;
9460
7316c4ad
NK
9461 /* CRTC Degamma changes currently require us to recreate planes. */
9462 if (new_crtc_state->color_mgmt_changed)
9463 return true;
9464
f6ff2a08
NK
9465 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9466 return true;
9467
9468 /*
9469 * If there are any new primary or overlay planes being added or
9470 * removed then the z-order can potentially change. To ensure
9471 * correct z-order and pipe acquisition the current DC architecture
9472 * requires us to remove and recreate all existing planes.
9473 *
9474 * TODO: Come up with a more elegant solution for this.
9475 */
9476 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9477 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9478 if (other->type == DRM_PLANE_TYPE_CURSOR)
9479 continue;
9480
9481 if (old_other_state->crtc != new_plane_state->crtc &&
9482 new_other_state->crtc != new_plane_state->crtc)
9483 continue;
9484
9485 if (old_other_state->crtc != new_other_state->crtc)
9486 return true;
9487
dc4cb30d
NK
9488 /* Src/dst size and scaling updates. */
9489 if (old_other_state->src_w != new_other_state->src_w ||
9490 old_other_state->src_h != new_other_state->src_h ||
9491 old_other_state->crtc_w != new_other_state->crtc_w ||
9492 old_other_state->crtc_h != new_other_state->crtc_h)
9493 return true;
9494
9495 /* Rotation / mirroring updates. */
9496 if (old_other_state->rotation != new_other_state->rotation)
9497 return true;
9498
9499 /* Blending updates. */
9500 if (old_other_state->pixel_blend_mode !=
9501 new_other_state->pixel_blend_mode)
9502 return true;
9503
9504 /* Alpha updates. */
9505 if (old_other_state->alpha != new_other_state->alpha)
9506 return true;
9507
9508 /* Colorspace changes. */
9509 if (old_other_state->color_range != new_other_state->color_range ||
9510 old_other_state->color_encoding != new_other_state->color_encoding)
9511 return true;
9512
9a81cc60
NK
9513 /* Framebuffer checks fall at the end. */
9514 if (!old_other_state->fb || !new_other_state->fb)
9515 continue;
9516
9517 /* Pixel format changes can require bandwidth updates. */
9518 if (old_other_state->fb->format != new_other_state->fb->format)
9519 return true;
9520
6eed95b0
BN
9521 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9522 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9523
9524 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9525 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9526 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9527 return true;
9528 }
9529
9530 return false;
9531}
9532
b0455fda
SS
9533static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9534 struct drm_plane_state *new_plane_state,
9535 struct drm_framebuffer *fb)
9536{
e72868c4
SS
9537 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9538 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9539 unsigned int pitch;
e72868c4 9540 bool linear;
b0455fda
SS
9541
9542 if (fb->width > new_acrtc->max_cursor_width ||
9543 fb->height > new_acrtc->max_cursor_height) {
9544 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9545 new_plane_state->fb->width,
9546 new_plane_state->fb->height);
9547 return -EINVAL;
9548 }
9549 if (new_plane_state->src_w != fb->width << 16 ||
9550 new_plane_state->src_h != fb->height << 16) {
9551 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9552 return -EINVAL;
9553 }
9554
9555 /* Pitch in pixels */
9556 pitch = fb->pitches[0] / fb->format->cpp[0];
9557
9558 if (fb->width != pitch) {
9559 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9560 fb->width, pitch);
9561 return -EINVAL;
9562 }
9563
9564 switch (pitch) {
9565 case 64:
9566 case 128:
9567 case 256:
9568 /* FB pitch is supported by cursor plane */
9569 break;
9570 default:
9571 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9572 return -EINVAL;
9573 }
9574
e72868c4
SS
9575 /* Core DRM takes care of checking FB modifiers, so we only need to
9576 * check tiling flags when the FB doesn't have a modifier. */
9577 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9578 if (adev->family < AMDGPU_FAMILY_AI) {
9579 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9580 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9581 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9582 } else {
9583 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9584 }
9585 if (!linear) {
9586 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9587 return -EINVAL;
9588 }
9589 }
9590
b0455fda
SS
9591 return 0;
9592}
9593
9e869063
LL
9594static int dm_update_plane_state(struct dc *dc,
9595 struct drm_atomic_state *state,
9596 struct drm_plane *plane,
9597 struct drm_plane_state *old_plane_state,
9598 struct drm_plane_state *new_plane_state,
9599 bool enable,
35f33086
BL
9600 bool *lock_and_validation_needed,
9601 bool *is_top_most_overlay)
62f55537 9602{
eb3dc897
NK
9603
9604 struct dm_atomic_state *dm_state = NULL;
62f55537 9605 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9606 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9607 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9608 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9609 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9610 bool needs_reset;
62f55537 9611 int ret = 0;
e7b07cee 9612
9b690ef3 9613
9e869063
LL
9614 new_plane_crtc = new_plane_state->crtc;
9615 old_plane_crtc = old_plane_state->crtc;
9616 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9617 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9618
626bf90f
SS
9619 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9620 if (!enable || !new_plane_crtc ||
9621 drm_atomic_plane_disabling(plane->state, new_plane_state))
9622 return 0;
9623
9624 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9625
5f581248
SS
9626 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9627 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9628 return -EINVAL;
9629 }
9630
24f99d2b 9631 if (new_plane_state->fb) {
b0455fda
SS
9632 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9633 new_plane_state->fb);
9634 if (ret)
9635 return ret;
24f99d2b
SS
9636 }
9637
9e869063 9638 return 0;
626bf90f 9639 }
9b690ef3 9640
f6ff2a08
NK
9641 needs_reset = should_reset_plane(state, plane, old_plane_state,
9642 new_plane_state);
9643
9e869063
LL
9644 /* Remove any changed/removed planes */
9645 if (!enable) {
f6ff2a08 9646 if (!needs_reset)
9e869063 9647 return 0;
a7b06724 9648
9e869063
LL
9649 if (!old_plane_crtc)
9650 return 0;
62f55537 9651
9e869063
LL
9652 old_crtc_state = drm_atomic_get_old_crtc_state(
9653 state, old_plane_crtc);
9654 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9655
9e869063
LL
9656 if (!dm_old_crtc_state->stream)
9657 return 0;
62f55537 9658
9e869063
LL
9659 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9660 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9661
9e869063
LL
9662 ret = dm_atomic_get_state(state, &dm_state);
9663 if (ret)
9664 return ret;
eb3dc897 9665
9e869063
LL
9666 if (!dc_remove_plane_from_context(
9667 dc,
9668 dm_old_crtc_state->stream,
9669 dm_old_plane_state->dc_state,
9670 dm_state->context)) {
62f55537 9671
c3537613 9672 return -EINVAL;
9e869063 9673 }
e7b07cee 9674
72529b68
AP
9675 if (dm_old_plane_state->dc_state)
9676 dc_plane_state_release(dm_old_plane_state->dc_state);
9b690ef3 9677
9e869063 9678 dm_new_plane_state->dc_state = NULL;
1dc90497 9679
9e869063 9680 *lock_and_validation_needed = true;
1dc90497 9681
9e869063
LL
9682 } else { /* Add new planes */
9683 struct dc_plane_state *dc_new_plane_state;
1dc90497 9684
9e869063
LL
9685 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9686 return 0;
e7b07cee 9687
9e869063
LL
9688 if (!new_plane_crtc)
9689 return 0;
e7b07cee 9690
9e869063
LL
9691 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9692 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9693
9e869063
LL
9694 if (!dm_new_crtc_state->stream)
9695 return 0;
62f55537 9696
f6ff2a08 9697 if (!needs_reset)
9e869063 9698 return 0;
62f55537 9699
8bf0d9cd 9700 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
9701 if (ret)
9702 return ret;
9703
9e869063 9704 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9705
9e869063
LL
9706 dc_new_plane_state = dc_create_plane_state(dc);
9707 if (!dc_new_plane_state)
9708 return -ENOMEM;
62f55537 9709
35f33086
BL
9710 /* Block top most plane from being a video plane */
9711 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9712 if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
9713 return -EINVAL;
9714 else
9715 *is_top_most_overlay = false;
9716 }
9717
4711c033
LT
9718 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9719 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9720
695af5f9 9721 ret = fill_dc_plane_attributes(
1348969a 9722 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9723 dc_new_plane_state,
9724 new_plane_state,
9725 new_crtc_state);
9726 if (ret) {
9727 dc_plane_state_release(dc_new_plane_state);
9728 return ret;
9729 }
62f55537 9730
9e869063
LL
9731 ret = dm_atomic_get_state(state, &dm_state);
9732 if (ret) {
9733 dc_plane_state_release(dc_new_plane_state);
9734 return ret;
9735 }
eb3dc897 9736
9e869063
LL
9737 /*
9738 * Any atomic check errors that occur after this will
9739 * not need a release. The plane state will be attached
9740 * to the stream, and therefore part of the atomic
9741 * state. It'll be released when the atomic state is
9742 * cleaned.
9743 */
9744 if (!dc_add_plane_to_context(
9745 dc,
9746 dm_new_crtc_state->stream,
9747 dc_new_plane_state,
9748 dm_state->context)) {
62f55537 9749
9e869063
LL
9750 dc_plane_state_release(dc_new_plane_state);
9751 return -EINVAL;
9752 }
8c45c5db 9753
9e869063 9754 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9755
214993e1
ML
9756 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9757
9e869063
LL
9758 /* Tell DC to do a full surface update every time there
9759 * is a plane change. Inefficient, but works for now.
9760 */
9761 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9762
9763 *lock_and_validation_needed = true;
62f55537 9764 }
e7b07cee
HW
9765
9766
62f55537
AG
9767 return ret;
9768}
a87fa993 9769
69cb5629
VZ
9770static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9771 int *src_w, int *src_h)
9772{
9773 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9774 case DRM_MODE_ROTATE_90:
9775 case DRM_MODE_ROTATE_270:
9776 *src_w = plane_state->src_h >> 16;
9777 *src_h = plane_state->src_w >> 16;
9778 break;
9779 case DRM_MODE_ROTATE_0:
9780 case DRM_MODE_ROTATE_180:
9781 default:
9782 *src_w = plane_state->src_w >> 16;
9783 *src_h = plane_state->src_h >> 16;
9784 break;
9785 }
9786}
9787
12f4849a
SS
9788static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9789 struct drm_crtc *crtc,
9790 struct drm_crtc_state *new_crtc_state)
9791{
d1bfbe8a
SS
9792 struct drm_plane *cursor = crtc->cursor, *underlying;
9793 struct drm_plane_state *new_cursor_state, *new_underlying_state;
9794 int i;
9795 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
9796 int cursor_src_w, cursor_src_h;
9797 int underlying_src_w, underlying_src_h;
12f4849a
SS
9798
9799 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9800 * cursor per pipe but it's going to inherit the scaling and
9801 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 9802 * blending properties match the underlying planes'. */
12f4849a 9803
d1bfbe8a
SS
9804 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9805 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
9806 return 0;
9807 }
9808
69cb5629
VZ
9809 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9810 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9811 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 9812
d1bfbe8a
SS
9813 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9814 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9815 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9816 continue;
12f4849a 9817
d1bfbe8a
SS
9818 /* Ignore disabled planes */
9819 if (!new_underlying_state->fb)
9820 continue;
9821
69cb5629
VZ
9822 dm_get_oriented_plane_size(new_underlying_state,
9823 &underlying_src_w, &underlying_src_h);
9824 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9825 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
9826
9827 if (cursor_scale_w != underlying_scale_w ||
9828 cursor_scale_h != underlying_scale_h) {
9829 drm_dbg_atomic(crtc->dev,
9830 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9831 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9832 return -EINVAL;
9833 }
9834
9835 /* If this plane covers the whole CRTC, no need to check planes underneath */
9836 if (new_underlying_state->crtc_x <= 0 &&
9837 new_underlying_state->crtc_y <= 0 &&
9838 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9839 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9840 break;
12f4849a
SS
9841 }
9842
9843 return 0;
9844}
9845
44be939f
ML
9846static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9847{
9848 struct drm_connector *connector;
128f8ed5 9849 struct drm_connector_state *conn_state, *old_conn_state;
44be939f
ML
9850 struct amdgpu_dm_connector *aconnector = NULL;
9851 int i;
128f8ed5
RL
9852 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9853 if (!conn_state->crtc)
9854 conn_state = old_conn_state;
9855
44be939f
ML
9856 if (conn_state->crtc != crtc)
9857 continue;
9858
9859 aconnector = to_amdgpu_dm_connector(connector);
f0127cb1 9860 if (!aconnector->mst_output_port || !aconnector->mst_root)
44be939f
ML
9861 aconnector = NULL;
9862 else
9863 break;
9864 }
9865
9866 if (!aconnector)
9867 return 0;
9868
f0127cb1 9869 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
44be939f
ML
9870}
9871
b8592b48
LL
9872/**
9873 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
c620e79b 9874 *
b8592b48
LL
9875 * @dev: The DRM device
9876 * @state: The atomic state to commit
9877 *
9878 * Validate that the given atomic state is programmable by DC into hardware.
9879 * This involves constructing a &struct dc_state reflecting the new hardware
9880 * state we wish to commit, then querying DC to see if it is programmable. It's
9881 * important not to modify the existing DC state. Otherwise, atomic_check
9882 * may unexpectedly commit hardware changes.
9883 *
9884 * When validating the DC state, it's important that the right locks are
9885 * acquired. For full updates case which removes/adds/updates streams on one
9886 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9887 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9888 * flip using DRMs synchronization events.
b8592b48
LL
9889 *
9890 * Note that DM adds the affected connectors for all CRTCs in state, when that
9891 * might not seem necessary. This is because DC stream creation requires the
9892 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9893 * be possible but non-trivial - a possible TODO item.
9894 *
9895 * Return: -Error code if validation failed.
9896 */
7578ecda
AD
9897static int amdgpu_dm_atomic_check(struct drm_device *dev,
9898 struct drm_atomic_state *state)
62f55537 9899{
1348969a 9900 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9901 struct dm_atomic_state *dm_state = NULL;
62f55537 9902 struct dc *dc = adev->dm.dc;
62f55537 9903 struct drm_connector *connector;
c2cea706 9904 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9905 struct drm_crtc *crtc;
fc9e9920 9906 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9907 struct drm_plane *plane;
9908 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9909 enum dc_status status;
1e88ad0a 9910 int ret, i;
62f55537 9911 bool lock_and_validation_needed = false;
35f33086 9912 bool is_top_most_overlay = true;
214993e1 9913 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
cdf657fc
DA
9914 struct drm_dp_mst_topology_mgr *mgr;
9915 struct drm_dp_mst_topology_state *mst_state;
6513104b 9916 struct dsc_mst_fairness_vars vars[MAX_PIPES];
62f55537 9917
e8a98235 9918 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9919
62f55537 9920 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
9921 if (ret) {
9922 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 9923 goto fail;
68ca1c3e 9924 }
62f55537 9925
c5892a10
SW
9926 /* Check connector changes */
9927 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9928 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9929 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9930
9931 /* Skip connectors that are disabled or part of modeset already. */
c5892a10
SW
9932 if (!new_con_state->crtc)
9933 continue;
9934
9935 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9936 if (IS_ERR(new_crtc_state)) {
68ca1c3e 9937 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
9938 ret = PTR_ERR(new_crtc_state);
9939 goto fail;
9940 }
9941
3c6d1aeb 9942 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
9943 dm_old_con_state->scaling != dm_new_con_state->scaling)
c5892a10
SW
9944 new_crtc_state->connectors_changed = true;
9945 }
9946
349a19b2 9947 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
9948 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9949 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9950 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
9951 if (ret) {
9952 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 9953 goto fail;
68ca1c3e 9954 }
44be939f
ML
9955 }
9956 }
9957 }
1e88ad0a 9958 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9959 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9960
1e88ad0a 9961 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9962 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9963 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9964 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9965 continue;
7bef1af3 9966
03fc4cf4 9967 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
9968 if (ret) {
9969 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 9970 goto fail;
68ca1c3e 9971 }
03fc4cf4 9972
1e88ad0a
S
9973 if (!new_crtc_state->enable)
9974 continue;
fc9e9920 9975
1e88ad0a 9976 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
9977 if (ret) {
9978 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 9979 goto fail;
68ca1c3e 9980 }
fc9e9920 9981
1e88ad0a 9982 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
9983 if (ret) {
9984 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 9985 goto fail;
68ca1c3e 9986 }
115a385c 9987
cbac53f7 9988 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9989 new_crtc_state->mode_changed = true;
e7b07cee
HW
9990 }
9991
2d9e6431
NK
9992 /*
9993 * Add all primary and overlay planes on the CRTC to the state
9994 * whenever a plane is enabled to maintain correct z-ordering
9995 * and to enable fast surface updates.
9996 */
9997 drm_for_each_crtc(crtc, dev) {
9998 bool modified = false;
9999
10000 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10001 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10002 continue;
10003
10004 if (new_plane_state->crtc == crtc ||
10005 old_plane_state->crtc == crtc) {
10006 modified = true;
10007 break;
10008 }
10009 }
10010
10011 if (!modified)
10012 continue;
10013
10014 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10015 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10016 continue;
10017
10018 new_plane_state =
10019 drm_atomic_get_plane_state(state, plane);
10020
10021 if (IS_ERR(new_plane_state)) {
10022 ret = PTR_ERR(new_plane_state);
68ca1c3e 10023 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
10024 goto fail;
10025 }
10026 }
10027 }
10028
22c42b0e
LL
10029 /*
10030 * DC consults the zpos (layer_index in DC terminology) to determine the
10031 * hw plane on which to enable the hw cursor (see
10032 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
10033 * atomic state, so call drm helper to normalize zpos.
10034 */
ac0bb08d
LL
10035 ret = drm_atomic_normalize_zpos(dev, state);
10036 if (ret) {
10037 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
10038 goto fail;
10039 }
22c42b0e 10040
62f55537 10041 /* Remove exiting planes if they are modified */
9e869063
LL
10042 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10043 ret = dm_update_plane_state(dc, state, plane,
10044 old_plane_state,
10045 new_plane_state,
10046 false,
35f33086
BL
10047 &lock_and_validation_needed,
10048 &is_top_most_overlay);
68ca1c3e
S
10049 if (ret) {
10050 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10051 goto fail;
68ca1c3e 10052 }
62f55537
AG
10053 }
10054
10055 /* Disable all crtcs which require disable */
4b9674e5
LL
10056 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10057 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10058 old_crtc_state,
10059 new_crtc_state,
10060 false,
10061 &lock_and_validation_needed);
68ca1c3e
S
10062 if (ret) {
10063 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 10064 goto fail;
68ca1c3e 10065 }
62f55537
AG
10066 }
10067
10068 /* Enable all crtcs which require enable */
4b9674e5
LL
10069 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10070 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10071 old_crtc_state,
10072 new_crtc_state,
10073 true,
10074 &lock_and_validation_needed);
68ca1c3e
S
10075 if (ret) {
10076 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 10077 goto fail;
68ca1c3e 10078 }
62f55537
AG
10079 }
10080
10081 /* Add new/modified planes */
9e869063
LL
10082 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10083 ret = dm_update_plane_state(dc, state, plane,
10084 old_plane_state,
10085 new_plane_state,
10086 true,
35f33086
BL
10087 &lock_and_validation_needed,
10088 &is_top_most_overlay);
68ca1c3e
S
10089 if (ret) {
10090 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10091 goto fail;
68ca1c3e 10092 }
62f55537
AG
10093 }
10094
876fcc42 10095 if (dc_resource_is_dsc_encoding_supported(dc)) {
7cce4cd6
LP
10096 ret = pre_validate_dsc(state, &dm_state, vars);
10097 if (ret != 0)
876fcc42 10098 goto fail;
876fcc42 10099 }
876fcc42 10100
b349f76e
ES
10101 /* Run this here since we want to validate the streams we created */
10102 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
10103 if (ret) {
10104 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 10105 goto fail;
68ca1c3e 10106 }
62f55537 10107
214993e1
ML
10108 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10109 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10110 if (dm_new_crtc_state->mpo_requested)
10111 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
10112 }
10113
12f4849a
SS
10114 /* Check cursor planes scaling */
10115 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10116 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
10117 if (ret) {
10118 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 10119 goto fail;
68ca1c3e 10120 }
12f4849a
SS
10121 }
10122
43d10d30
NK
10123 if (state->legacy_cursor_update) {
10124 /*
10125 * This is a fast cursor update coming from the plane update
10126 * helper, check if it can be done asynchronously for better
10127 * performance.
10128 */
10129 state->async_update =
10130 !drm_atomic_helper_async_check(dev, state);
10131
10132 /*
10133 * Skip the remaining global validation if this is an async
10134 * update. Cursor updates can be done without affecting
10135 * state or bandwidth calcs and this avoids the performance
10136 * penalty of locking the private state object and
10137 * allocating a new dc_state.
10138 */
10139 if (state->async_update)
10140 return 0;
10141 }
10142
ebdd27e1 10143 /* Check scaling and underscan changes*/
1f6010a9 10144 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10145 * new stream into context w\o causing full reset. Need to
10146 * decide how to handle.
10147 */
c2cea706 10148 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10149 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10150 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10151 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10152
10153 /* Skip any modesets/resets */
0bc9706d
LSL
10154 if (!acrtc || drm_atomic_crtc_needs_modeset(
10155 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10156 continue;
10157
b830ebc9 10158 /* Skip any thing not scale or underscan changes */
54d76575 10159 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10160 continue;
10161
10162 lock_and_validation_needed = true;
10163 }
10164
c689e1e3
LP
10165 /* set the slot info for each mst_state based on the link encoding format */
10166 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10167 struct amdgpu_dm_connector *aconnector;
10168 struct drm_connector *connector;
10169 struct drm_connector_list_iter iter;
10170 u8 link_coding_cap;
10171
10172 drm_connector_list_iter_begin(dev, &iter);
10173 drm_for_each_connector_iter(connector, &iter) {
10174 if (connector->index == mst_state->mgr->conn_base_id) {
10175 aconnector = to_amdgpu_dm_connector(connector);
10176 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10177 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10178
10179 break;
10180 }
10181 }
10182 drm_connector_list_iter_end(&iter);
10183 }
c689e1e3 10184
f6d7c7fa
NK
10185 /**
10186 * Streams and planes are reset when there are changes that affect
10187 * bandwidth. Anything that affects bandwidth needs to go through
10188 * DC global validation to ensure that the configuration can be applied
10189 * to hardware.
10190 *
10191 * We have to currently stall out here in atomic_check for outstanding
10192 * commits to finish in this case because our IRQ handlers reference
10193 * DRM state directly - we can end up disabling interrupts too early
10194 * if we don't.
10195 *
10196 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10197 */
f6d7c7fa 10198 if (lock_and_validation_needed) {
eb3dc897 10199 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
10200 if (ret) {
10201 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 10202 goto fail;
68ca1c3e 10203 }
e7b07cee
HW
10204
10205 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
10206 if (ret) {
10207 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 10208 goto fail;
68ca1c3e 10209 }
1dc90497 10210
7cce4cd6
LP
10211 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
10212 if (ret) {
68ca1c3e 10213 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
c18842a2 10214 ret = -EINVAL;
8c20a1ed 10215 goto fail;
68ca1c3e 10216 }
8c20a1ed 10217
6513104b 10218 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
10219 if (ret) {
10220 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 10221 goto fail;
68ca1c3e 10222 }
29b9ba74 10223
ded58c7b
ZL
10224 /*
10225 * Perform validation of MST topology in the state:
10226 * We need to perform MST atomic check before calling
10227 * dc_validate_global_state(), or there is a chance
10228 * to get stuck in an infinite loop and hang eventually.
10229 */
10230 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
10231 if (ret) {
10232 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 10233 goto fail;
68ca1c3e 10234 }
85fb8bb9 10235 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 10236 if (status != DC_OK) {
68ca1c3e 10237 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 10238 dc_status_to_str(status), status);
e7b07cee
HW
10239 ret = -EINVAL;
10240 goto fail;
10241 }
bd200d19 10242 } else {
674e78ac 10243 /*
bd200d19
NK
10244 * The commit is a fast update. Fast updates shouldn't change
10245 * the DC context, affect global validation, and can have their
10246 * commit work done in parallel with other commits not touching
10247 * the same resource. If we have a new DC context as part of
10248 * the DM atomic state from validation we need to free it and
10249 * retain the existing one instead.
fde9f39a
MR
10250 *
10251 * Furthermore, since the DM atomic state only contains the DC
10252 * context and can safely be annulled, we can free the state
10253 * and clear the associated private object now to free
10254 * some memory and avoid a possible use-after-free later.
674e78ac 10255 */
bd200d19 10256
fde9f39a
MR
10257 for (i = 0; i < state->num_private_objs; i++) {
10258 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10259
fde9f39a
MR
10260 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10261 int j = state->num_private_objs-1;
bd200d19 10262
fde9f39a
MR
10263 dm_atomic_destroy_state(obj,
10264 state->private_objs[i].state);
10265
10266 /* If i is not at the end of the array then the
10267 * last element needs to be moved to where i was
10268 * before the array can safely be truncated.
10269 */
10270 if (i != j)
10271 state->private_objs[i] =
10272 state->private_objs[j];
bd200d19 10273
fde9f39a
MR
10274 state->private_objs[j].ptr = NULL;
10275 state->private_objs[j].state = NULL;
10276 state->private_objs[j].old_state = NULL;
10277 state->private_objs[j].new_state = NULL;
10278
10279 state->num_private_objs = j;
10280 break;
10281 }
bd200d19 10282 }
e7b07cee
HW
10283 }
10284
caff0e66
NK
10285 /* Store the overall update type for use later in atomic check. */
10286 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10287 struct dm_crtc_state *dm_new_crtc_state =
10288 to_dm_crtc_state(new_crtc_state);
10289
f6d7c7fa
NK
10290 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10291 UPDATE_TYPE_FULL :
10292 UPDATE_TYPE_FAST;
e7b07cee
HW
10293 }
10294
10295 /* Must be success */
10296 WARN_ON(ret);
e8a98235
RS
10297
10298 trace_amdgpu_dm_atomic_check_finish(state, ret);
10299
e7b07cee
HW
10300 return ret;
10301
10302fail:
10303 if (ret == -EDEADLK)
01e28f9c 10304 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10305 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10306 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10307 else
01e28f9c 10308 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10309
e8a98235
RS
10310 trace_amdgpu_dm_atomic_check_finish(state, ret);
10311
e7b07cee
HW
10312 return ret;
10313}
10314
3ee6b26b
AD
10315static bool is_dp_capable_without_timing_msa(struct dc *dc,
10316 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee 10317{
ae67558b 10318 u8 dpcd_data;
e7b07cee
HW
10319 bool capable = false;
10320
c84dec2f 10321 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10322 dm_helpers_dp_read_dpcd(
10323 NULL,
c84dec2f 10324 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10325 DP_DOWN_STREAM_PORT_COUNT,
10326 &dpcd_data,
10327 sizeof(dpcd_data))) {
10328 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10329 }
10330
10331 return capable;
10332}
f9b4f20c 10333
46db138d
SW
10334static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10335 unsigned int offset,
10336 unsigned int total_length,
ae67558b 10337 u8 *data,
46db138d
SW
10338 unsigned int length,
10339 struct amdgpu_hdmi_vsdb_info *vsdb)
10340{
10341 bool res;
10342 union dmub_rb_cmd cmd;
10343 struct dmub_cmd_send_edid_cea *input;
10344 struct dmub_cmd_edid_cea_output *output;
10345
10346 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10347 return false;
10348
10349 memset(&cmd, 0, sizeof(cmd));
10350
10351 input = &cmd.edid_cea.data.input;
10352
10353 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10354 cmd.edid_cea.header.sub_type = 0;
10355 cmd.edid_cea.header.payload_bytes =
10356 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10357 input->offset = offset;
10358 input->length = length;
eb9e59eb 10359 input->cea_total_length = total_length;
46db138d
SW
10360 memcpy(input->payload, data, length);
10361
e97cc04f 10362 res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
46db138d
SW
10363 if (!res) {
10364 DRM_ERROR("EDID CEA parser failed\n");
10365 return false;
10366 }
10367
10368 output = &cmd.edid_cea.data.output;
10369
10370 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10371 if (!output->ack.success) {
10372 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10373 output->ack.offset);
10374 }
10375 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10376 if (!output->amd_vsdb.vsdb_found)
10377 return false;
10378
10379 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10380 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10381 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10382 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10383 } else {
b76a8062 10384 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
10385 return false;
10386 }
10387
10388 return true;
10389}
10390
10391static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
ae67558b 10392 u8 *edid_ext, int len,
f9b4f20c
SW
10393 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10394{
10395 int i;
f9b4f20c
SW
10396
10397 /* send extension block to DMCU for parsing */
10398 for (i = 0; i < len; i += 8) {
10399 bool res;
10400 int offset;
10401
10402 /* send 8 bytes a time */
46db138d 10403 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
10404 return false;
10405
10406 if (i+8 == len) {
10407 /* EDID block sent completed, expect result */
10408 int version, min_rate, max_rate;
10409
46db138d 10410 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
10411 if (res) {
10412 /* amd vsdb found */
10413 vsdb_info->freesync_supported = 1;
10414 vsdb_info->amd_vsdb_version = version;
10415 vsdb_info->min_refresh_rate_hz = min_rate;
10416 vsdb_info->max_refresh_rate_hz = max_rate;
10417 return true;
10418 }
10419 /* not amd vsdb */
10420 return false;
10421 }
10422
10423 /* check for ack*/
46db138d 10424 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
10425 if (!res)
10426 return false;
10427 }
10428
10429 return false;
10430}
10431
46db138d 10432static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
ae67558b 10433 u8 *edid_ext, int len,
46db138d
SW
10434 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10435{
10436 int i;
10437
10438 /* send extension block to DMCU for parsing */
10439 for (i = 0; i < len; i += 8) {
10440 /* send 8 bytes a time */
10441 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10442 return false;
10443 }
10444
10445 return vsdb_info->freesync_supported;
10446}
10447
10448static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
ae67558b 10449 u8 *edid_ext, int len,
46db138d
SW
10450 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10451{
10452 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
53f4da73 10453 bool ret;
46db138d 10454
53f4da73 10455 mutex_lock(&adev->dm.dc_lock);
46db138d 10456 if (adev->dm.dmub_srv)
53f4da73 10457 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
46db138d 10458 else
53f4da73
SW
10459 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10460 mutex_unlock(&adev->dm.dc_lock);
10461 return ret;
46db138d
SW
10462}
10463
7c7dd774 10464static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10465 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10466{
ae67558b 10467 u8 *edid_ext = NULL;
f9b4f20c
SW
10468 int i;
10469 bool valid_vsdb_found = false;
10470
10471 /*----- drm_find_cea_extension() -----*/
10472 /* No EDID or EDID extensions */
10473 if (edid == NULL || edid->extensions == 0)
7c7dd774 10474 return -ENODEV;
f9b4f20c
SW
10475
10476 /* Find CEA extension */
10477 for (i = 0; i < edid->extensions; i++) {
10478 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10479 if (edid_ext[0] == CEA_EXT)
10480 break;
10481 }
10482
10483 if (i == edid->extensions)
7c7dd774 10484 return -ENODEV;
f9b4f20c
SW
10485
10486 /*----- cea_db_offsets() -----*/
10487 if (edid_ext[0] != CEA_EXT)
7c7dd774 10488 return -ENODEV;
f9b4f20c
SW
10489
10490 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10491
10492 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10493}
10494
c620e79b
RS
10495/**
10496 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
10497 *
41ee1f18
AD
10498 * @connector: Connector to query.
10499 * @edid: EDID from monitor
c620e79b
RS
10500 *
10501 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
10502 * track of some of the display information in the internal data struct used by
10503 * amdgpu_dm. This function checks which type of connector we need to set the
10504 * FreeSync parameters.
10505 */
98e6436d 10506void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
c620e79b 10507 struct edid *edid)
e7b07cee 10508{
eb0709ba 10509 int i = 0;
e7b07cee
HW
10510 struct detailed_timing *timing;
10511 struct detailed_non_pixel *data;
10512 struct detailed_data_monitor_range *range;
c84dec2f
HW
10513 struct amdgpu_dm_connector *amdgpu_dm_connector =
10514 to_amdgpu_dm_connector(connector);
bb47de73 10515 struct dm_connector_state *dm_con_state = NULL;
9ad54467 10516 struct dc_sink *sink;
e7b07cee
HW
10517
10518 struct drm_device *dev = connector->dev;
1348969a 10519 struct amdgpu_device *adev = drm_to_adev(dev);
f9b4f20c 10520 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
c620e79b 10521 bool freesync_capable = false;
5b49da02 10522 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
b830ebc9 10523
8218d7f1
HW
10524 if (!connector->state) {
10525 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10526 goto update;
8218d7f1
HW
10527 }
10528
9b2fdc33
AP
10529 sink = amdgpu_dm_connector->dc_sink ?
10530 amdgpu_dm_connector->dc_sink :
10531 amdgpu_dm_connector->dc_em_sink;
10532
10533 if (!edid || !sink) {
98e6436d
AK
10534 dm_con_state = to_dm_connector_state(connector->state);
10535
10536 amdgpu_dm_connector->min_vfreq = 0;
10537 amdgpu_dm_connector->max_vfreq = 0;
10538 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
10539 connector->display_info.monitor_range.min_vfreq = 0;
10540 connector->display_info.monitor_range.max_vfreq = 0;
10541 freesync_capable = false;
98e6436d 10542
bb47de73 10543 goto update;
98e6436d
AK
10544 }
10545
8218d7f1
HW
10546 dm_con_state = to_dm_connector_state(connector->state);
10547
e7b07cee 10548 if (!adev->dm.freesync_module)
bb47de73 10549 goto update;
f9b4f20c 10550
9b2fdc33
AP
10551 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10552 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
10553 bool edid_check_required = false;
10554
10555 if (edid) {
e7b07cee
HW
10556 edid_check_required = is_dp_capable_without_timing_msa(
10557 adev->dm.dc,
c84dec2f 10558 amdgpu_dm_connector);
e7b07cee 10559 }
e7b07cee 10560
f9b4f20c
SW
10561 if (edid_check_required == true && (edid->version > 1 ||
10562 (edid->version == 1 && edid->revision > 1))) {
10563 for (i = 0; i < 4; i++) {
e7b07cee 10564
f9b4f20c
SW
10565 timing = &edid->detailed_timings[i];
10566 data = &timing->data.other_data;
10567 range = &data->data.range;
10568 /*
10569 * Check if monitor has continuous frequency mode
10570 */
10571 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10572 continue;
10573 /*
10574 * Check for flag range limits only. If flag == 1 then
10575 * no additional timing information provided.
10576 * Default GTF, GTF Secondary curve and CVT are not
10577 * supported
10578 */
10579 if (range->flags != 1)
10580 continue;
a0ffc3fd 10581
f9b4f20c
SW
10582 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10583 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10584 amdgpu_dm_connector->pixel_clock_mhz =
10585 range->pixel_clock_mhz * 10;
a0ffc3fd 10586
f9b4f20c
SW
10587 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10588 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10589
f9b4f20c
SW
10590 break;
10591 }
98e6436d 10592
f9b4f20c
SW
10593 if (amdgpu_dm_connector->max_vfreq -
10594 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10595
f9b4f20c
SW
10596 freesync_capable = true;
10597 }
10598 }
9b2fdc33 10599 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10600 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10601 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10602 timing = &edid->detailed_timings[i];
10603 data = &timing->data.other_data;
10604
10605 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
5b49da02
SJK
10606 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10607 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10608 freesync_capable = true;
10609
10610 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10611 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10612 }
10613 }
10614
10615 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
10616
10617 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
10618 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10619 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
10620
10621 amdgpu_dm_connector->pack_sdp_v1_3 = true;
10622 amdgpu_dm_connector->as_type = as_type;
10623 amdgpu_dm_connector->vsdb_info = vsdb_info;
10624
10625 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
f9b4f20c
SW
10626 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10627 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10628 freesync_capable = true;
10629
10630 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10631 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10632 }
10633 }
bb47de73
NK
10634
10635update:
10636 if (dm_con_state)
10637 dm_con_state->freesync_capable = freesync_capable;
10638
10639 if (connector->vrr_capable_property)
10640 drm_connector_set_vrr_capable_property(connector,
10641 freesync_capable);
e7b07cee
HW
10642}
10643
3d4e52d0
VL
10644void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10645{
1348969a 10646 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10647 struct dc *dc = adev->dm.dc;
10648 int i;
10649
10650 mutex_lock(&adev->dm.dc_lock);
10651 if (dc->current_state) {
10652 for (i = 0; i < dc->current_state->stream_count; ++i)
10653 dc->current_state->streams[i]
10654 ->triggered_crtc_reset.enabled =
10655 adev->dm.force_timing_sync;
10656
10657 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10658 dc_trigger_sync(dc, dc->current_state);
10659 }
10660 mutex_unlock(&adev->dm.dc_lock);
10661}
9d83722d
RS
10662
10663void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
ae67558b 10664 u32 value, const char *func_name)
9d83722d
RS
10665{
10666#ifdef DM_CHECK_ADDR_0
10667 if (address == 0) {
10668 DC_ERR("invalid register write. address = 0");
10669 return;
10670 }
10671#endif
10672 cgs_write_register(ctx->cgs_device, address, value);
10673 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10674}
10675
10676uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10677 const char *func_name)
10678{
ae67558b 10679 u32 value;
9d83722d
RS
10680#ifdef DM_CHECK_ADDR_0
10681 if (address == 0) {
10682 DC_ERR("invalid register read; address = 0\n");
10683 return 0;
10684 }
10685#endif
10686
10687 if (ctx->dmub_srv &&
10688 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10689 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10690 ASSERT(false);
10691 return 0;
10692 }
10693
10694 value = cgs_read_register(ctx->cgs_device, address);
10695
10696 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10697
10698 return value;
10699}
81927e28 10700
ead08b95
SW
10701int amdgpu_dm_process_dmub_aux_transfer_sync(
10702 struct dc_context *ctx,
10703 unsigned int link_index,
10704 struct aux_payload *payload,
10705 enum aux_return_code_type *operation_result)
88f52b1f
JS
10706{
10707 struct amdgpu_device *adev = ctx->driver_context;
88f52b1f 10708 struct dmub_notification *p_notify = adev->dm.dmub_notify;
ead08b95 10709 int ret = -1;
88f52b1f 10710
ead08b95
SW
10711 mutex_lock(&adev->dm.dpia_aux_lock);
10712 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
10713 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10714 goto out;
3335a135 10715 }
ead08b95
SW
10716
10717 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10718 DRM_ERROR("wait_for_completion_timeout timeout!");
10719 *operation_result = AUX_RET_ERROR_TIMEOUT;
10720 goto out;
10721 }
10722
10723 if (p_notify->result != AUX_RET_SUCCESS) {
10724 /*
10725 * Transient states before tunneling is enabled could
10726 * lead to this error. We can ignore this for now.
10727 */
10728 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
10729 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
10730 payload->address, payload->length,
10731 p_notify->result);
88f52b1f 10732 }
ead08b95
SW
10733 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10734 goto out;
10735 }
10736
10737
10738 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10739 if (!payload->write && p_notify->aux_reply.length &&
10740 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
10741
10742 if (payload->length != p_notify->aux_reply.length) {
10743 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
10744 p_notify->aux_reply.length,
10745 payload->address, payload->length);
10746 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10747 goto out;
88f52b1f 10748 }
ead08b95
SW
10749
10750 memcpy(payload->data, p_notify->aux_reply.data,
10751 p_notify->aux_reply.length);
88f52b1f
JS
10752 }
10753
ead08b95
SW
10754 /* success */
10755 ret = p_notify->aux_reply.length;
10756 *operation_result = p_notify->result;
10757out:
0cf8307a 10758 reinit_completion(&adev->dm.dmub_aux_transfer_done);
ead08b95
SW
10759 mutex_unlock(&adev->dm.dpia_aux_lock);
10760 return ret;
88f52b1f
JS
10761}
10762
ead08b95
SW
10763int amdgpu_dm_process_dmub_set_config_sync(
10764 struct dc_context *ctx,
10765 unsigned int link_index,
10766 struct set_config_cmd_payload *payload,
10767 enum set_config_status *operation_result)
81927e28
JS
10768{
10769 struct amdgpu_device *adev = ctx->driver_context;
ead08b95
SW
10770 bool is_cmd_complete;
10771 int ret;
81927e28 10772
ead08b95
SW
10773 mutex_lock(&adev->dm.dpia_aux_lock);
10774 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
10775 link_index, payload, adev->dm.dmub_notify);
88f52b1f 10776
ead08b95
SW
10777 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10778 ret = 0;
10779 *operation_result = adev->dm.dmub_notify->sc_status;
10780 } else {
9e3a50d2 10781 DRM_ERROR("wait_for_completion_timeout timeout!");
ead08b95
SW
10782 ret = -1;
10783 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
81927e28
JS
10784 }
10785
0cf8307a
SW
10786 if (!is_cmd_complete)
10787 reinit_completion(&adev->dm.dmub_aux_transfer_done);
ead08b95
SW
10788 mutex_unlock(&adev->dm.dpia_aux_lock);
10789 return ret;
81927e28 10790}
1edf5ae1
ZL
10791
10792/*
10793 * Check whether seamless boot is supported.
10794 *
10795 * So far we only support seamless boot on CHIP_VANGOGH.
10796 * If everything goes well, we may consider expanding
10797 * seamless boot to other ASICs.
10798 */
10799bool check_seamless_boot_capability(struct amdgpu_device *adev)
10800{
20875141
PY
10801 switch (adev->ip_versions[DCE_HWIP][0]) {
10802 case IP_VERSION(3, 0, 1):
1edf5ae1
ZL
10803 if (!adev->mman.keep_stolen_vga_memory)
10804 return true;
10805 break;
10806 default:
10807 break;
10808 }
10809
10810 return false;
10811}
e97cc04f
JP
10812
10813bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
10814{
10815 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
10816}
10817
10818bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
10819{
10820 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
10821}