drm/amd/display: Create a file dedicated to planes
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
5d945cbc 49#include "amdgpu_dm_plane.h"
52704fca
BL
50#ifdef CONFIG_DRM_AMD_DC_HDCP
51#include "amdgpu_dm_hdcp.h"
6a99099f 52#include <drm/display/drm_hdcp_helper.h>
52704fca 53#endif
e7b07cee 54#include "amdgpu_pm.h"
1f579254 55#include "amdgpu_atombios.h"
4562236b
HW
56
57#include "amd_shared.h"
58#include "amdgpu_dm_irq.h"
59#include "dm_helpers.h"
e7b07cee 60#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
61#if defined(CONFIG_DEBUG_FS)
62#include "amdgpu_dm_debugfs.h"
63#endif
f4594cd1 64#include "amdgpu_dm_psr.h"
4562236b
HW
65
66#include "ivsrcid/ivsrcid_vislands30.h"
67
81927e28 68#include "i2caux_interface.h"
4562236b
HW
69#include <linux/module.h>
70#include <linux/moduleparam.h>
e7b07cee 71#include <linux/types.h>
97028037 72#include <linux/pm_runtime.h>
09d21852 73#include <linux/pci.h>
a94d5569 74#include <linux/firmware.h>
6ce8f316 75#include <linux/component.h>
57b9f338 76#include <linux/dmi.h>
4562236b 77
da68386d 78#include <drm/display/drm_dp_mst_helper.h>
4fc8cb47 79#include <drm/display/drm_hdmi_helper.h>
4562236b 80#include <drm/drm_atomic.h>
674e78ac 81#include <drm/drm_atomic_uapi.h>
4562236b 82#include <drm/drm_atomic_helper.h>
90bb087f 83#include <drm/drm_blend.h>
e7b07cee 84#include <drm/drm_fb_helper.h>
09d21852 85#include <drm/drm_fourcc.h>
e7b07cee 86#include <drm/drm_edid.h>
09d21852 87#include <drm/drm_vblank.h>
6ce8f316 88#include <drm/drm_audio_component.h>
047de3f1 89#include <drm/drm_gem_atomic_helper.h>
4562236b 90
5527cd06 91#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 92
ad941f7a
FX
93#include "dcn/dcn_1_0_offset.h"
94#include "dcn/dcn_1_0_sh_mask.h"
407e7517 95#include "soc15_hw_ip.h"
543036a2 96#include "soc15_common.h"
407e7517 97#include "vega10_ip_offset.h"
ff5ef992
AD
98
99#include "soc15_common.h"
ff5ef992 100
543036a2
AP
101#include "gc/gc_11_0_0_offset.h"
102#include "gc/gc_11_0_0_sh_mask.h"
103
e7b07cee 104#include "modules/inc/mod_freesync.h"
bbf854dc 105#include "modules/power/power_helpers.h"
ecd0136b 106#include "modules/inc/mod_info_packet.h"
e7b07cee 107
743b9786
NK
108#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
110#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
112#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
114#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
116#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
117MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
118#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
119MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
120#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
121MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
122#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
123MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
e850f6b1
RL
124#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
125MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
b5b8ed44
QZ
126#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
127MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
de7cc1b4
PL
128#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
129MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
2200eb9e 130
577359ca
AP
131#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
132MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
133#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
134MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
135
a94d5569
DF
136#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
137MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 138
5ea23931
RL
139#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
140MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
141
8c7aea40
NK
142/* Number of bytes in PSP header for firmware. */
143#define PSP_HEADER_BYTES 0x100
144
145/* Number of bytes in PSP footer for firmware. */
146#define PSP_FOOTER_BYTES 0x100
147
b8592b48
LL
148/**
149 * DOC: overview
150 *
151 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 152 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
153 * requests into DC requests, and DC responses into DRM responses.
154 *
155 * The root control structure is &struct amdgpu_display_manager.
156 */
157
7578ecda
AD
158/* basic init/fini API */
159static int amdgpu_dm_init(struct amdgpu_device *adev);
160static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 161static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 162
0f877894
OV
163static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
164{
165 switch (link->dpcd_caps.dongle_type) {
166 case DISPLAY_DONGLE_NONE:
167 return DRM_MODE_SUBCONNECTOR_Native;
168 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
169 return DRM_MODE_SUBCONNECTOR_VGA;
170 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
171 case DISPLAY_DONGLE_DP_DVI_DONGLE:
172 return DRM_MODE_SUBCONNECTOR_DVID;
173 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
174 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
175 return DRM_MODE_SUBCONNECTOR_HDMIA;
176 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
177 default:
178 return DRM_MODE_SUBCONNECTOR_Unknown;
179 }
180}
181
182static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
183{
184 struct dc_link *link = aconnector->dc_link;
185 struct drm_connector *connector = &aconnector->base;
186 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
187
188 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
189 return;
190
191 if (aconnector->dc_sink)
192 subconnector = get_subconnector_type(link);
193
194 drm_object_property_set_value(&connector->base,
195 connector->dev->mode_config.dp_subconnector_property,
196 subconnector);
197}
198
1f6010a9
DF
199/*
200 * initializes drm_device display related structures, based on the information
7578ecda
AD
201 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
202 * drm_encoder, drm_mode_config
203 *
204 * Returns 0 on success
205 */
206static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
207/* removes and deallocates the drm structures, created by the above function */
208static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
209
7578ecda
AD
210static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
211 struct drm_plane *plane,
212 uint32_t link_index);
213static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
214 struct amdgpu_dm_connector *amdgpu_dm_connector,
215 uint32_t link_index,
216 struct amdgpu_encoder *amdgpu_encoder);
217static int amdgpu_dm_encoder_init(struct drm_device *dev,
218 struct amdgpu_encoder *aencoder,
219 uint32_t link_index);
220
221static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
222
7578ecda
AD
223static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
224
225static int amdgpu_dm_atomic_check(struct drm_device *dev,
226 struct drm_atomic_state *state);
227
e27c41d5 228static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 229static void handle_hpd_rx_irq(void *param);
e27c41d5 230
a85ba005
NC
231static bool
232is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
233 struct drm_crtc_state *new_crtc_state);
4562236b
HW
234/*
235 * dm_vblank_get_counter
236 *
237 * @brief
238 * Get counter for number of vertical blanks
239 *
240 * @param
241 * struct amdgpu_device *adev - [in] desired amdgpu device
242 * int disp_idx - [in] which CRTC to get the counter from
243 *
244 * @return
245 * Counter for vertical blanks
246 */
247static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
248{
249 if (crtc >= adev->mode_info.num_crtc)
250 return 0;
251 else {
252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253
585d450c 254 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 crtc);
4562236b
HW
257 return 0;
258 }
259
585d450c 260 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
261 }
262}
263
264static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 265 u32 *vbl, u32 *position)
4562236b 266{
81c50963
ST
267 uint32_t v_blank_start, v_blank_end, h_position, v_position;
268
4562236b
HW
269 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
270 return -EINVAL;
271 else {
272 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
273
585d450c 274 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
275 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
276 crtc);
4562236b
HW
277 return 0;
278 }
279
81c50963
ST
280 /*
281 * TODO rework base driver to use values directly.
282 * for now parse it back into reg-format
283 */
585d450c 284 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
285 &v_blank_start,
286 &v_blank_end,
287 &h_position,
288 &v_position);
289
e806208d
AG
290 *position = v_position | (h_position << 16);
291 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
292 }
293
294 return 0;
295}
296
297static bool dm_is_idle(void *handle)
298{
299 /* XXX todo */
300 return true;
301}
302
303static int dm_wait_for_idle(void *handle)
304{
305 /* XXX todo */
306 return 0;
307}
308
309static bool dm_check_soft_reset(void *handle)
310{
311 return false;
312}
313
314static int dm_soft_reset(void *handle)
315{
316 /* XXX todo */
317 return 0;
318}
319
3ee6b26b
AD
320static struct amdgpu_crtc *
321get_crtc_by_otg_inst(struct amdgpu_device *adev,
322 int otg_inst)
4562236b 323{
4a580877 324 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
325 struct drm_crtc *crtc;
326 struct amdgpu_crtc *amdgpu_crtc;
327
bcd74374 328 if (WARN_ON(otg_inst == -1))
4562236b 329 return adev->mode_info.crtcs[0];
4562236b
HW
330
331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
332 amdgpu_crtc = to_amdgpu_crtc(crtc);
333
334 if (amdgpu_crtc->otg_inst == otg_inst)
335 return amdgpu_crtc;
336 }
337
338 return NULL;
339}
340
585d450c
AP
341static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
342{
343 return acrtc->dm_irq_params.freesync_config.state ==
344 VRR_STATE_ACTIVE_VARIABLE ||
345 acrtc->dm_irq_params.freesync_config.state ==
346 VRR_STATE_ACTIVE_FIXED;
347}
348
66b0c973
MK
349static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
350{
351 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
352 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
353}
354
a85ba005
NC
355static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
356 struct dm_crtc_state *new_state)
357{
358 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
359 return true;
360 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
361 return true;
362 else
363 return false;
364}
365
b8e8c934
HW
366/**
367 * dm_pflip_high_irq() - Handle pageflip interrupt
368 * @interrupt_params: ignored
369 *
370 * Handles the pageflip interrupt by notifying all interested parties
371 * that the pageflip has been completed.
372 */
4562236b
HW
373static void dm_pflip_high_irq(void *interrupt_params)
374{
4562236b
HW
375 struct amdgpu_crtc *amdgpu_crtc;
376 struct common_irq_params *irq_params = interrupt_params;
377 struct amdgpu_device *adev = irq_params->adev;
378 unsigned long flags;
71bbe51a 379 struct drm_pending_vblank_event *e;
71bbe51a
MK
380 uint32_t vpos, hpos, v_blank_start, v_blank_end;
381 bool vrr_active;
4562236b
HW
382
383 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
384
385 /* IRQ could occur when in initial stage */
1f6010a9 386 /* TODO work and BO cleanup */
4562236b 387 if (amdgpu_crtc == NULL) {
cb2318b7 388 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
389 return;
390 }
391
4a580877 392 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393
394 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 395 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
396 amdgpu_crtc->pflip_status,
397 AMDGPU_FLIP_SUBMITTED,
398 amdgpu_crtc->crtc_id,
399 amdgpu_crtc);
4a580877 400 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
401 return;
402 }
403
71bbe51a
MK
404 /* page flip completed. */
405 e = amdgpu_crtc->event;
406 amdgpu_crtc->event = NULL;
4562236b 407
bcd74374 408 WARN_ON(!e);
1159898a 409
585d450c 410 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
411
412 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
413 if (!vrr_active ||
585d450c 414 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
415 &v_blank_end, &hpos, &vpos) ||
416 (vpos < v_blank_start)) {
417 /* Update to correct count and vblank timestamp if racing with
418 * vblank irq. This also updates to the correct vblank timestamp
419 * even in VRR mode, as scanout is past the front-porch atm.
420 */
421 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 422
71bbe51a
MK
423 /* Wake up userspace by sending the pageflip event with proper
424 * count and timestamp of vblank of flip completion.
425 */
426 if (e) {
427 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
428
429 /* Event sent, so done with vblank for this flip */
430 drm_crtc_vblank_put(&amdgpu_crtc->base);
431 }
432 } else if (e) {
433 /* VRR active and inside front-porch: vblank count and
434 * timestamp for pageflip event will only be up to date after
435 * drm_crtc_handle_vblank() has been executed from late vblank
436 * irq handler after start of back-porch (vline 0). We queue the
437 * pageflip event for send-out by drm_crtc_handle_vblank() with
438 * updated timestamp and count, once it runs after us.
439 *
440 * We need to open-code this instead of using the helper
441 * drm_crtc_arm_vblank_event(), as that helper would
442 * call drm_crtc_accurate_vblank_count(), which we must
443 * not call in VRR mode while we are in front-porch!
444 */
445
446 /* sequence will be replaced by real count during send-out. */
447 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
448 e->pipe = amdgpu_crtc->crtc_id;
449
4a580877 450 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
451 e = NULL;
452 }
4562236b 453
fdd1fe57
MK
454 /* Keep track of vblank of this flip for flip throttling. We use the
455 * cooked hw counter, as that one incremented at start of this vblank
456 * of pageflip completion, so last_flip_vblank is the forbidden count
457 * for queueing new pageflips if vsync + VRR is enabled.
458 */
5d1c59c4 459 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 460 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 461
54f5499a 462 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 463 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 464
cb2318b7
VL
465 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
466 amdgpu_crtc->crtc_id, amdgpu_crtc,
467 vrr_active, (int) !e);
4562236b
HW
468}
469
cc79950b
MD
470static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
471{
472 struct drm_crtc *crtc = &acrtc->base;
473 struct drm_device *dev = crtc->dev;
474 unsigned long flags;
475
476 drm_crtc_handle_vblank(crtc);
477
478 spin_lock_irqsave(&dev->event_lock, flags);
479
480 /* Send completion event for cursor-only commits */
481 if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
482 drm_crtc_send_vblank_event(crtc, acrtc->event);
483 drm_crtc_vblank_put(crtc);
484 acrtc->event = NULL;
485 }
486
487 spin_unlock_irqrestore(&dev->event_lock, flags);
488}
489
d2574c33
MK
490static void dm_vupdate_high_irq(void *interrupt_params)
491{
492 struct common_irq_params *irq_params = interrupt_params;
493 struct amdgpu_device *adev = irq_params->adev;
494 struct amdgpu_crtc *acrtc;
47588233
RS
495 struct drm_device *drm_dev;
496 struct drm_vblank_crtc *vblank;
497 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 498 unsigned long flags;
585d450c 499 int vrr_active;
d2574c33
MK
500
501 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
502
503 if (acrtc) {
585d450c 504 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
505 drm_dev = acrtc->base.dev;
506 vblank = &drm_dev->vblank[acrtc->base.index];
507 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
508 frame_duration_ns = vblank->time - previous_timestamp;
509
510 if (frame_duration_ns > 0) {
511 trace_amdgpu_refresh_rate_track(acrtc->base.index,
512 frame_duration_ns,
513 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
514 atomic64_set(&irq_params->previous_timestamp, vblank->time);
515 }
d2574c33 516
cb2318b7 517 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 518 acrtc->crtc_id,
585d450c 519 vrr_active);
d2574c33
MK
520
521 /* Core vblank handling is done here after end of front-porch in
522 * vrr mode, as vblank timestamping will give valid results
523 * while now done after front-porch. This will also deliver
524 * page-flip completion events that have been queued to us
525 * if a pageflip happened inside front-porch.
526 */
585d450c 527 if (vrr_active) {
cc79950b 528 dm_crtc_handle_vblank(acrtc);
09aef2c4
MK
529
530 /* BTR processing for pre-DCE12 ASICs */
585d450c 531 if (acrtc->dm_irq_params.stream &&
09aef2c4 532 adev->family < AMDGPU_FAMILY_AI) {
4a580877 533 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
534 mod_freesync_handle_v_update(
535 adev->dm.freesync_module,
585d450c
AP
536 acrtc->dm_irq_params.stream,
537 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
538
539 dc_stream_adjust_vmin_vmax(
540 adev->dm.dc,
585d450c
AP
541 acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 543 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
544 }
545 }
d2574c33
MK
546 }
547}
548
b8e8c934
HW
549/**
550 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 551 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
552 *
553 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
554 * event handler.
555 */
4562236b
HW
556static void dm_crtc_high_irq(void *interrupt_params)
557{
558 struct common_irq_params *irq_params = interrupt_params;
559 struct amdgpu_device *adev = irq_params->adev;
4562236b 560 struct amdgpu_crtc *acrtc;
09aef2c4 561 unsigned long flags;
585d450c 562 int vrr_active;
4562236b 563
b57de80a 564 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
565 if (!acrtc)
566 return;
567
585d450c 568 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 569
cb2318b7 570 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 571 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 572
2346ef47
NK
573 /**
574 * Core vblank handling at start of front-porch is only possible
575 * in non-vrr mode, as only there vblank timestamping will give
576 * valid results while done in front-porch. Otherwise defer it
577 * to dm_vupdate_high_irq after end of front-porch.
578 */
585d450c 579 if (!vrr_active)
cc79950b 580 dm_crtc_handle_vblank(acrtc);
2346ef47
NK
581
582 /**
583 * Following stuff must happen at start of vblank, for crc
584 * computation and below-the-range btr support in vrr mode.
585 */
16f17eda 586 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
587
588 /* BTR updates need to happen before VUPDATE on Vega and above. */
589 if (adev->family < AMDGPU_FAMILY_AI)
590 return;
16f17eda 591
4a580877 592 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 593
585d450c
AP
594 if (acrtc->dm_irq_params.stream &&
595 acrtc->dm_irq_params.vrr_params.supported &&
596 acrtc->dm_irq_params.freesync_config.state ==
597 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 598 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
599 acrtc->dm_irq_params.stream,
600 &acrtc->dm_irq_params.vrr_params);
16f17eda 601
585d450c
AP
602 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
603 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
604 }
605
2b5aed9a
MK
606 /*
607 * If there aren't any active_planes then DCH HUBP may be clock-gated.
608 * In that case, pageflip completion interrupts won't fire and pageflip
609 * completion events won't get delivered. Prevent this by sending
610 * pending pageflip events from here if a flip is still pending.
611 *
612 * If any planes are enabled, use dm_pflip_high_irq() instead, to
613 * avoid race conditions between flip programming and completion,
614 * which could cause too early flip completion events.
615 */
2346ef47
NK
616 if (adev->family >= AMDGPU_FAMILY_RV &&
617 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 618 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
619 if (acrtc->event) {
620 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
621 acrtc->event = NULL;
622 drm_crtc_vblank_put(&acrtc->base);
623 }
624 acrtc->pflip_status = AMDGPU_FLIP_NONE;
625 }
626
4a580877 627 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
628}
629
9e1178ef 630#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
631/**
632 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
633 * DCN generation ASICs
48e01bf4 634 * @interrupt_params: interrupt parameters
86bc2219
WL
635 *
636 * Used to set crc window/read out crc value at vertical line 0 position
637 */
86bc2219
WL
638static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
639{
640 struct common_irq_params *irq_params = interrupt_params;
641 struct amdgpu_device *adev = irq_params->adev;
642 struct amdgpu_crtc *acrtc;
643
644 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
645
646 if (!acrtc)
647 return;
648
649 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
650}
433e5dec 651#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 652
e27c41d5 653/**
03f2abb0 654 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
e27c41d5
JS
655 * @adev: amdgpu_device pointer
656 * @notify: dmub notification structure
657 *
658 * Dmub AUX or SET_CONFIG command completion processing callback
659 * Copies dmub notification to DM which is to be read by AUX command.
660 * issuing thread and also signals the event to wake up the thread.
661 */
240e6d25
IB
662static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
663 struct dmub_notification *notify)
e27c41d5
JS
664{
665 if (adev->dm.dmub_notify)
666 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
667 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
668 complete(&adev->dm.dmub_aux_transfer_done);
669}
670
671/**
672 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
673 * @adev: amdgpu_device pointer
674 * @notify: dmub notification structure
675 *
676 * Dmub Hpd interrupt processing callback. Gets displayindex through the
677 * ink index and calls helper to do the processing.
678 */
240e6d25
IB
679static void dmub_hpd_callback(struct amdgpu_device *adev,
680 struct dmub_notification *notify)
e27c41d5
JS
681{
682 struct amdgpu_dm_connector *aconnector;
f6e03f80 683 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
684 struct drm_connector *connector;
685 struct drm_connector_list_iter iter;
686 struct dc_link *link;
687 uint8_t link_index = 0;
978ffac8 688 struct drm_device *dev;
e27c41d5
JS
689
690 if (adev == NULL)
691 return;
692
693 if (notify == NULL) {
694 DRM_ERROR("DMUB HPD callback notification was NULL");
695 return;
696 }
697
698 if (notify->link_index > adev->dm.dc->link_count) {
699 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
700 return;
701 }
702
e27c41d5 703 link_index = notify->link_index;
e27c41d5 704 link = adev->dm.dc->links[link_index];
978ffac8 705 dev = adev->dm.ddev;
e27c41d5
JS
706
707 drm_connector_list_iter_begin(dev, &iter);
708 drm_for_each_connector_iter(connector, &iter) {
709 aconnector = to_amdgpu_dm_connector(connector);
710 if (link && aconnector->dc_link == link) {
711 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 712 hpd_aconnector = aconnector;
e27c41d5
JS
713 break;
714 }
715 }
716 drm_connector_list_iter_end(&iter);
e27c41d5 717
c40a09e5
NK
718 if (hpd_aconnector) {
719 if (notify->type == DMUB_NOTIFICATION_HPD)
720 handle_hpd_irq_helper(hpd_aconnector);
721 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
722 handle_hpd_rx_irq(hpd_aconnector);
723 }
e27c41d5
JS
724}
725
726/**
727 * register_dmub_notify_callback - Sets callback for DMUB notify
728 * @adev: amdgpu_device pointer
729 * @type: Type of dmub notification
730 * @callback: Dmub interrupt callback function
731 * @dmub_int_thread_offload: offload indicator
732 *
733 * API to register a dmub callback handler for a dmub notification
734 * Also sets indicator whether callback processing to be offloaded.
735 * to dmub interrupt handling thread
736 * Return: true if successfully registered, false if there is existing registration
737 */
240e6d25
IB
738static bool register_dmub_notify_callback(struct amdgpu_device *adev,
739 enum dmub_notification_type type,
740 dmub_notify_interrupt_callback_t callback,
741 bool dmub_int_thread_offload)
e27c41d5
JS
742{
743 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
744 adev->dm.dmub_callback[type] = callback;
745 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
746 } else
747 return false;
748
749 return true;
750}
751
752static void dm_handle_hpd_work(struct work_struct *work)
753{
754 struct dmub_hpd_work *dmub_hpd_wrk;
755
756 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
757
758 if (!dmub_hpd_wrk->dmub_notify) {
759 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
760 return;
761 }
762
763 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
764 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
765 dmub_hpd_wrk->dmub_notify);
766 }
094b21c1
JS
767
768 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
769 kfree(dmub_hpd_wrk);
770
771}
772
e25515e2 773#define DMUB_TRACE_MAX_READ 64
81927e28
JS
774/**
775 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
776 * @interrupt_params: used for determining the Outbox instance
777 *
778 * Handles the Outbox Interrupt
779 * event handler.
780 */
81927e28
JS
781static void dm_dmub_outbox1_low_irq(void *interrupt_params)
782{
783 struct dmub_notification notify;
784 struct common_irq_params *irq_params = interrupt_params;
785 struct amdgpu_device *adev = irq_params->adev;
786 struct amdgpu_display_manager *dm = &adev->dm;
787 struct dmcub_trace_buf_entry entry = { 0 };
788 uint32_t count = 0;
e27c41d5 789 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 790 struct dc_link *plink = NULL;
81927e28 791
f6e03f80
JS
792 if (dc_enable_dmub_notifications(adev->dm.dc) &&
793 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 794
f6e03f80
JS
795 do {
796 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
a35faec3 797 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
f6e03f80
JS
798 DRM_ERROR("DM: notify type %d invalid!", notify.type);
799 continue;
800 }
c40a09e5
NK
801 if (!dm->dmub_callback[notify.type]) {
802 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
803 continue;
804 }
f6e03f80 805 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
806 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
807 if (!dmub_hpd_wrk) {
808 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
809 return;
810 }
811 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
812 if (!dmub_hpd_wrk->dmub_notify) {
813 kfree(dmub_hpd_wrk);
814 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
815 return;
816 }
817 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
818 if (dmub_hpd_wrk->dmub_notify)
819 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
820 dmub_hpd_wrk->adev = adev;
821 if (notify.type == DMUB_NOTIFICATION_HPD) {
822 plink = adev->dm.dc->links[notify.link_index];
823 if (plink) {
824 plink->hpd_status =
b97788e5 825 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 826 }
e27c41d5 827 }
f6e03f80
JS
828 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
829 } else {
830 dm->dmub_callback[notify.type](adev, &notify);
831 }
832 } while (notify.pending_notification);
81927e28
JS
833 }
834
835
836 do {
837 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
838 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
839 entry.param0, entry.param1);
840
841 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
842 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
843 } else
844 break;
845
846 count++;
847
848 } while (count <= DMUB_TRACE_MAX_READ);
849
f6e03f80
JS
850 if (count > DMUB_TRACE_MAX_READ)
851 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 852}
86bc2219 853
4562236b
HW
854static int dm_set_clockgating_state(void *handle,
855 enum amd_clockgating_state state)
856{
857 return 0;
858}
859
860static int dm_set_powergating_state(void *handle,
861 enum amd_powergating_state state)
862{
863 return 0;
864}
865
866/* Prototypes of private functions */
867static int dm_early_init(void* handle);
868
a32e24b4 869/* Allocate memory for FBC compressed data */
3e332d3a 870static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 871{
3e332d3a 872 struct drm_device *dev = connector->dev;
1348969a 873 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 874 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
875 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
876 struct drm_display_mode *mode;
42e67c3b
RL
877 unsigned long max_size = 0;
878
879 if (adev->dm.dc->fbc_compressor == NULL)
880 return;
a32e24b4 881
3e332d3a 882 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
883 return;
884
3e332d3a
RL
885 if (compressor->bo_ptr)
886 return;
42e67c3b 887
42e67c3b 888
3e332d3a
RL
889 list_for_each_entry(mode, &connector->modes, head) {
890 if (max_size < mode->htotal * mode->vtotal)
891 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
892 }
893
894 if (max_size) {
895 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 896 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 897 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
898
899 if (r)
42e67c3b
RL
900 DRM_ERROR("DM: Failed to initialize FBC\n");
901 else {
902 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
903 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
904 }
905
a32e24b4
RL
906 }
907
908}
a32e24b4 909
6ce8f316
NK
910static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
911 int pipe, bool *enabled,
912 unsigned char *buf, int max_bytes)
913{
914 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 915 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
916 struct drm_connector *connector;
917 struct drm_connector_list_iter conn_iter;
918 struct amdgpu_dm_connector *aconnector;
919 int ret = 0;
920
921 *enabled = false;
922
923 mutex_lock(&adev->dm.audio_lock);
924
925 drm_connector_list_iter_begin(dev, &conn_iter);
926 drm_for_each_connector_iter(connector, &conn_iter) {
927 aconnector = to_amdgpu_dm_connector(connector);
928 if (aconnector->audio_inst != port)
929 continue;
930
931 *enabled = true;
932 ret = drm_eld_size(connector->eld);
933 memcpy(buf, connector->eld, min(max_bytes, ret));
934
935 break;
936 }
937 drm_connector_list_iter_end(&conn_iter);
938
939 mutex_unlock(&adev->dm.audio_lock);
940
941 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
942
943 return ret;
944}
945
946static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
947 .get_eld = amdgpu_dm_audio_component_get_eld,
948};
949
950static int amdgpu_dm_audio_component_bind(struct device *kdev,
951 struct device *hda_kdev, void *data)
952{
953 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 954 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
955 struct drm_audio_component *acomp = data;
956
957 acomp->ops = &amdgpu_dm_audio_component_ops;
958 acomp->dev = kdev;
959 adev->dm.audio_component = acomp;
960
961 return 0;
962}
963
964static void amdgpu_dm_audio_component_unbind(struct device *kdev,
965 struct device *hda_kdev, void *data)
966{
967 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 968 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
969 struct drm_audio_component *acomp = data;
970
971 acomp->ops = NULL;
972 acomp->dev = NULL;
973 adev->dm.audio_component = NULL;
974}
975
976static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
977 .bind = amdgpu_dm_audio_component_bind,
978 .unbind = amdgpu_dm_audio_component_unbind,
979};
980
981static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
982{
983 int i, ret;
984
985 if (!amdgpu_audio)
986 return 0;
987
988 adev->mode_info.audio.enabled = true;
989
990 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
991
992 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
993 adev->mode_info.audio.pin[i].channels = -1;
994 adev->mode_info.audio.pin[i].rate = -1;
995 adev->mode_info.audio.pin[i].bits_per_sample = -1;
996 adev->mode_info.audio.pin[i].status_bits = 0;
997 adev->mode_info.audio.pin[i].category_code = 0;
998 adev->mode_info.audio.pin[i].connected = false;
999 adev->mode_info.audio.pin[i].id =
1000 adev->dm.dc->res_pool->audios[i]->inst;
1001 adev->mode_info.audio.pin[i].offset = 0;
1002 }
1003
1004 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1005 if (ret < 0)
1006 return ret;
1007
1008 adev->dm.audio_registered = true;
1009
1010 return 0;
1011}
1012
1013static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1014{
1015 if (!amdgpu_audio)
1016 return;
1017
1018 if (!adev->mode_info.audio.enabled)
1019 return;
1020
1021 if (adev->dm.audio_registered) {
1022 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1023 adev->dm.audio_registered = false;
1024 }
1025
1026 /* TODO: Disable audio? */
1027
1028 adev->mode_info.audio.enabled = false;
1029}
1030
dfd84d90 1031static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1032{
1033 struct drm_audio_component *acomp = adev->dm.audio_component;
1034
1035 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1036 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1037
1038 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1039 pin, -1);
1040 }
1041}
1042
743b9786
NK
1043static int dm_dmub_hw_init(struct amdgpu_device *adev)
1044{
743b9786
NK
1045 const struct dmcub_firmware_header_v1_0 *hdr;
1046 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1047 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1048 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1049 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1050 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1051 struct dmub_srv_hw_params hw_params;
1052 enum dmub_status status;
1053 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1054 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1055 bool has_hw_support;
1056
1057 if (!dmub_srv)
1058 /* DMUB isn't supported on the ASIC. */
1059 return 0;
1060
8c7aea40
NK
1061 if (!fb_info) {
1062 DRM_ERROR("No framebuffer info for DMUB service.\n");
1063 return -EINVAL;
1064 }
1065
743b9786
NK
1066 if (!dmub_fw) {
1067 /* Firmware required for DMUB support. */
1068 DRM_ERROR("No firmware provided for DMUB.\n");
1069 return -EINVAL;
1070 }
1071
1072 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1073 if (status != DMUB_STATUS_OK) {
1074 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1075 return -EINVAL;
1076 }
1077
1078 if (!has_hw_support) {
1079 DRM_INFO("DMUB unsupported on ASIC\n");
1080 return 0;
1081 }
1082
47e62dbd
NK
1083 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1084 status = dmub_srv_hw_reset(dmub_srv);
1085 if (status != DMUB_STATUS_OK)
1086 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1087
743b9786
NK
1088 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1089
743b9786
NK
1090 fw_inst_const = dmub_fw->data +
1091 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1092 PSP_HEADER_BYTES;
743b9786
NK
1093
1094 fw_bss_data = dmub_fw->data +
1095 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1096 le32_to_cpu(hdr->inst_const_bytes);
1097
1098 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1099 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1100 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1101
1102 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1103
ddde28a5
HW
1104 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1105 * amdgpu_ucode_init_single_fw will load dmub firmware
1106 * fw_inst_const part to cw0; otherwise, the firmware back door load
1107 * will be done by dm_dmub_hw_init
1108 */
1109 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1111 fw_inst_const_size);
1112 }
1113
a576b345
NK
1114 if (fw_bss_data_size)
1115 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1116 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1117
1118 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1119 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1120 adev->bios_size);
1121
1122 /* Reset regions that need to be reset. */
1123 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1124 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1125
1126 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1127 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1128
1129 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1130 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1131
1132 /* Initialize hardware. */
1133 memset(&hw_params, 0, sizeof(hw_params));
1134 hw_params.fb_base = adev->gmc.fb_start;
1135 hw_params.fb_offset = adev->gmc.aper_base;
1136
31a7f4bb
HW
1137 /* backdoor load firmware and trigger dmub running */
1138 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1139 hw_params.load_inst_const = true;
1140
743b9786
NK
1141 if (dmcu)
1142 hw_params.psp_version = dmcu->psp_version;
1143
8c7aea40
NK
1144 for (i = 0; i < fb_info->num_fb; ++i)
1145 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1146
3b36f50d
TH
1147 switch (adev->ip_versions[DCE_HWIP][0]) {
1148 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1149 hw_params.dpia_supported = true;
7367540b 1150 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
5b109397
JS
1151 break;
1152 default:
1153 break;
1154 }
1155
743b9786
NK
1156 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1157 if (status != DMUB_STATUS_OK) {
1158 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1159 return -EINVAL;
1160 }
1161
1162 /* Wait for firmware load to finish. */
1163 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1164 if (status != DMUB_STATUS_OK)
1165 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1166
1167 /* Init DMCU and ABM if available. */
1168 if (dmcu && abm) {
1169 dmcu->funcs->dmcu_init(dmcu);
1170 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1171 }
1172
051b7887
RL
1173 if (!adev->dm.dc->ctx->dmub_srv)
1174 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1175 if (!adev->dm.dc->ctx->dmub_srv) {
1176 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1177 return -ENOMEM;
1178 }
1179
743b9786
NK
1180 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1181 adev->dm.dmcub_fw_version);
1182
1183 return 0;
1184}
1185
79d6b935
NK
1186static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1187{
1188 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1189 enum dmub_status status;
1190 bool init;
1191
1192 if (!dmub_srv) {
1193 /* DMUB isn't supported on the ASIC. */
1194 return;
1195 }
1196
1197 status = dmub_srv_is_hw_init(dmub_srv, &init);
1198 if (status != DMUB_STATUS_OK)
1199 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1200
1201 if (status == DMUB_STATUS_OK && init) {
1202 /* Wait for firmware load to finish. */
1203 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1204 if (status != DMUB_STATUS_OK)
1205 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1206 } else {
1207 /* Perform the full hardware initialization. */
1208 dm_dmub_hw_init(adev);
1209 }
1210}
1211
c0fb85ae 1212static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1213{
c0fb85ae
YZ
1214 uint64_t pt_base;
1215 uint32_t logical_addr_low;
1216 uint32_t logical_addr_high;
1217 uint32_t agp_base, agp_bot, agp_top;
1218 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1219
a0f884f5
NK
1220 memset(pa_config, 0, sizeof(*pa_config));
1221
c0fb85ae
YZ
1222 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1223 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1224
c0fb85ae
YZ
1225 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1226 /*
1227 * Raven2 has a HW issue that it is unable to use the vram which
1228 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1229 * workaround that increase system aperture high address (add 1)
1230 * to get rid of the VM fault and hardware hang.
1231 */
1232 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1233 else
1234 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1235
c0fb85ae
YZ
1236 agp_base = 0;
1237 agp_bot = adev->gmc.agp_start >> 24;
1238 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1239
c44a22b3 1240
c0fb85ae
YZ
1241 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1242 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1243 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1244 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1245 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1246 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1247
c0fb85ae
YZ
1248 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1249 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1250
1251 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1252 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1253 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1254
1255 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1256 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1257 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1258
1259 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1260 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1261 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1262
1263 pa_config->is_hvm_enabled = 0;
c44a22b3 1264
c44a22b3 1265}
cae5c1ab 1266
09a5df6c 1267static void vblank_control_worker(struct work_struct *work)
ea3b4242 1268{
09a5df6c
NK
1269 struct vblank_control_work *vblank_work =
1270 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1271 struct amdgpu_display_manager *dm = vblank_work->dm;
1272
1273 mutex_lock(&dm->dc_lock);
1274
1275 if (vblank_work->enable)
1276 dm->active_vblank_irq_count++;
5af50b0b 1277 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1278 dm->active_vblank_irq_count--;
1279
2cbcb78c 1280 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1281
4711c033 1282 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1283
7cc191ee
LL
1284 /*
1285 * Control PSR based on vblank requirements from OS
1286 *
1287 * If panel supports PSR SU, there's no need to disable PSR when OS is
1288 * submitting fast atomic commits (we infer this by whether the OS
1289 * requests vblank events). Fast atomic commits will simply trigger a
1290 * full-frame-update (FFU); a specific case of selective-update (SU)
1291 * where the SU region is the full hactive*vactive region. See
1292 * fill_dc_dirty_rects().
1293 */
58aa1c50
NK
1294 if (vblank_work->stream && vblank_work->stream->link) {
1295 if (vblank_work->enable) {
7cc191ee
LL
1296 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1297 vblank_work->stream->link->psr_settings.psr_allow_active)
58aa1c50
NK
1298 amdgpu_dm_psr_disable(vblank_work->stream);
1299 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1300 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1301 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1302 amdgpu_dm_psr_enable(vblank_work->stream);
1303 }
1304 }
1305
ea3b4242 1306 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1307
1308 dc_stream_release(vblank_work->stream);
1309
09a5df6c 1310 kfree(vblank_work);
ea3b4242
QZ
1311}
1312
8e794421
WL
1313static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1314{
1315 struct hpd_rx_irq_offload_work *offload_work;
1316 struct amdgpu_dm_connector *aconnector;
1317 struct dc_link *dc_link;
1318 struct amdgpu_device *adev;
1319 enum dc_connection_type new_connection_type = dc_connection_none;
1320 unsigned long flags;
1321
1322 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1323 aconnector = offload_work->offload_wq->aconnector;
1324
1325 if (!aconnector) {
1326 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1327 goto skip;
1328 }
1329
1330 adev = drm_to_adev(aconnector->base.dev);
1331 dc_link = aconnector->dc_link;
1332
1333 mutex_lock(&aconnector->hpd_lock);
1334 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1335 DRM_ERROR("KMS: Failed to detect connector\n");
1336 mutex_unlock(&aconnector->hpd_lock);
1337
1338 if (new_connection_type == dc_connection_none)
1339 goto skip;
1340
1341 if (amdgpu_in_reset(adev))
1342 goto skip;
1343
1344 mutex_lock(&adev->dm.dc_lock);
1345 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1346 dc_link_dp_handle_automated_test(dc_link);
1347 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1348 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1349 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1350 dc_link_dp_handle_link_loss(dc_link);
1351 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1352 offload_work->offload_wq->is_handling_link_loss = false;
1353 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1354 }
1355 mutex_unlock(&adev->dm.dc_lock);
1356
1357skip:
1358 kfree(offload_work);
1359
1360}
1361
1362static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1363{
1364 int max_caps = dc->caps.max_links;
1365 int i = 0;
1366 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1367
1368 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1369
1370 if (!hpd_rx_offload_wq)
1371 return NULL;
1372
1373
1374 for (i = 0; i < max_caps; i++) {
1375 hpd_rx_offload_wq[i].wq =
1376 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1377
1378 if (hpd_rx_offload_wq[i].wq == NULL) {
1379 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1380 return NULL;
1381 }
1382
1383 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1384 }
1385
1386 return hpd_rx_offload_wq;
1387}
1388
3ce51649
AD
1389struct amdgpu_stutter_quirk {
1390 u16 chip_vendor;
1391 u16 chip_device;
1392 u16 subsys_vendor;
1393 u16 subsys_device;
1394 u8 revision;
1395};
1396
1397static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1398 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1399 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1400 { 0, 0, 0, 0, 0 },
1401};
1402
1403static bool dm_should_disable_stutter(struct pci_dev *pdev)
1404{
1405 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1406
1407 while (p && p->chip_device != 0) {
1408 if (pdev->vendor == p->chip_vendor &&
1409 pdev->device == p->chip_device &&
1410 pdev->subsystem_vendor == p->subsys_vendor &&
1411 pdev->subsystem_device == p->subsys_device &&
1412 pdev->revision == p->revision) {
1413 return true;
1414 }
1415 ++p;
1416 }
1417 return false;
1418}
1419
57b9f338
FZ
1420static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1421 {
1422 .matches = {
1423 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1424 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1425 },
1426 },
1427 {
1428 .matches = {
1429 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1430 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1431 },
1432 },
1433 {
1434 .matches = {
1435 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1436 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1437 },
1438 },
1439 {}
1440};
1441
1442static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1443{
1444 const struct dmi_system_id *dmi_id;
1445
1446 dm->aux_hpd_discon_quirk = false;
1447
1448 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1449 if (dmi_id) {
1450 dm->aux_hpd_discon_quirk = true;
1451 DRM_INFO("aux_hpd_discon_quirk attached\n");
1452 }
1453}
1454
7578ecda 1455static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1456{
1457 struct dc_init_data init_data;
52704fca
BL
1458#ifdef CONFIG_DRM_AMD_DC_HDCP
1459 struct dc_callback_init init_params;
1460#endif
743b9786 1461 int r;
52704fca 1462
4a580877 1463 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1464 adev->dm.adev = adev;
1465
4562236b
HW
1466 /* Zero all the fields */
1467 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1468#ifdef CONFIG_DRM_AMD_DC_HDCP
1469 memset(&init_params, 0, sizeof(init_params));
1470#endif
4562236b 1471
674e78ac 1472 mutex_init(&adev->dm.dc_lock);
6ce8f316 1473 mutex_init(&adev->dm.audio_lock);
ea3b4242 1474 spin_lock_init(&adev->dm.vblank_lock);
674e78ac 1475
4562236b
HW
1476 if(amdgpu_dm_irq_init(adev)) {
1477 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1478 goto error;
1479 }
1480
1481 init_data.asic_id.chip_family = adev->family;
1482
2dc31ca1 1483 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1484 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1485 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1486
770d13b1 1487 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1488 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1489 init_data.asic_id.atombios_base_address =
1490 adev->mode_info.atom_context->bios;
1491
1492 init_data.driver = adev;
1493
1494 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1495
1496 if (!adev->dm.cgs_device) {
1497 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1498 goto error;
1499 }
1500
1501 init_data.cgs_device = adev->dm.cgs_device;
1502
4562236b
HW
1503 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1504
fd546bc5
AD
1505 switch (adev->ip_versions[DCE_HWIP][0]) {
1506 case IP_VERSION(2, 1, 0):
1507 switch (adev->dm.dmcub_fw_version) {
1508 case 0: /* development */
1509 case 0x1: /* linux-firmware.git hash 6d9f399 */
1510 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1511 init_data.flags.disable_dmcu = false;
1512 break;
1513 default:
1514 init_data.flags.disable_dmcu = true;
1515 }
1516 break;
1517 case IP_VERSION(2, 0, 3):
1518 init_data.flags.disable_dmcu = true;
1519 break;
1520 default:
1521 break;
1522 }
1523
60fb100b
AD
1524 switch (adev->asic_type) {
1525 case CHIP_CARRIZO:
1526 case CHIP_STONEY:
1ebcaebd
NK
1527 init_data.flags.gpu_vm_support = true;
1528 break;
60fb100b 1529 default:
1d789535 1530 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
1531 case IP_VERSION(1, 0, 0):
1532 case IP_VERSION(1, 0, 1):
a7f520bf
AD
1533 /* enable S/G on PCO and RV2 */
1534 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1535 (adev->apu_flags & AMD_APU_IS_PICASSO))
1536 init_data.flags.gpu_vm_support = true;
1537 break;
fd546bc5 1538 case IP_VERSION(2, 1, 0):
c08182f2
AD
1539 case IP_VERSION(3, 0, 1):
1540 case IP_VERSION(3, 1, 2):
1541 case IP_VERSION(3, 1, 3):
b5b8ed44 1542 case IP_VERSION(3, 1, 5):
0fe382fb 1543 case IP_VERSION(3, 1, 6):
c08182f2
AD
1544 init_data.flags.gpu_vm_support = true;
1545 break;
c08182f2
AD
1546 default:
1547 break;
1548 }
60fb100b
AD
1549 break;
1550 }
6e227308 1551
a7f520bf
AD
1552 if (init_data.flags.gpu_vm_support)
1553 adev->mode_info.gpu_vm_support = true;
1554
04b94af4
AD
1555 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1556 init_data.flags.fbc_support = true;
1557
d99f38ae
AD
1558 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1559 init_data.flags.multi_mon_pp_mclk_switch = true;
1560
eaf56410
LL
1561 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1562 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1563
1564 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1565 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1566
12320274
AP
1567 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1568 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1569 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1570 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
12320274 1571
7aba117a 1572 init_data.flags.seamless_boot_edp_requested = false;
78ad75f8 1573
1edf5ae1 1574 if (check_seamless_boot_capability(adev)) {
7aba117a 1575 init_data.flags.seamless_boot_edp_requested = true;
1edf5ae1
ZL
1576 init_data.flags.allow_seamless_boot_optimization = true;
1577 DRM_INFO("Seamless boot condition check passed\n");
1578 }
1579
a8201902
LM
1580 init_data.flags.enable_mipi_converter_optimization = true;
1581
e5028e9f 1582 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
2a93292f 1583 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
e5028e9f 1584
0dd79532 1585 INIT_LIST_HEAD(&adev->dm.da_list);
57b9f338
FZ
1586
1587 retrieve_dmi_info(&adev->dm);
1588
4562236b
HW
1589 /* Display Core create. */
1590 adev->dm.dc = dc_create(&init_data);
1591
423788c7 1592 if (adev->dm.dc) {
76121231 1593 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1594 } else {
76121231 1595 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1596 goto error;
1597 }
4562236b 1598
8a791dab
HW
1599 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1600 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1601 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1602 }
1603
f99d8762
HW
1604 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1605 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1606 if (dm_should_disable_stutter(adev->pdev))
1607 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1608
8a791dab
HW
1609 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1610 adev->dm.dc->debug.disable_stutter = true;
1611
2665f63a 1612 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1613 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1614 adev->dm.dc->debug.disable_dsc_edp = true;
1615 }
8a791dab
HW
1616
1617 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1618 adev->dm.dc->debug.disable_clock_gate = true;
1619
cfb979f7
AP
1620 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1621 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1622
743b9786
NK
1623 r = dm_dmub_hw_init(adev);
1624 if (r) {
1625 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1626 goto error;
1627 }
1628
bb6785c1
NK
1629 dc_hardware_init(adev->dm.dc);
1630
8e794421
WL
1631 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1632 if (!adev->dm.hpd_rx_offload_wq) {
1633 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1634 goto error;
1635 }
1636
3ca001af 1637 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1638 struct dc_phy_addr_space_config pa_config;
1639
0b08c54b 1640 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1641
0b08c54b
YZ
1642 // Call the DC init_memory func
1643 dc_setup_system_context(adev->dm.dc, &pa_config);
1644 }
c0fb85ae 1645
4562236b
HW
1646 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1647 if (!adev->dm.freesync_module) {
1648 DRM_ERROR(
1649 "amdgpu: failed to initialize freesync_module.\n");
1650 } else
f1ad2f5e 1651 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1652 adev->dm.freesync_module);
1653
e277adc5
LSL
1654 amdgpu_dm_init_color_mod();
1655
ea3b4242 1656 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1657 adev->dm.vblank_control_workqueue =
1658 create_singlethread_workqueue("dm_vblank_control_workqueue");
1659 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1660 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242 1661 }
ea3b4242 1662
52704fca 1663#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1664 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1665 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1666
96a3b32e
BL
1667 if (!adev->dm.hdcp_workqueue)
1668 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1669 else
1670 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1671
96a3b32e
BL
1672 dc_init_callbacks(adev->dm.dc, &init_params);
1673 }
9a65df19
WL
1674#endif
1675#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1676 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1677#endif
11d526f1 1678 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
81927e28
JS
1679 init_completion(&adev->dm.dmub_aux_transfer_done);
1680 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1681 if (!adev->dm.dmub_notify) {
1682 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1683 goto error;
1684 }
e27c41d5
JS
1685
1686 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1687 if (!adev->dm.delayed_hpd_wq) {
1688 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1689 goto error;
1690 }
1691
81927e28 1692 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1693 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1694 dmub_aux_setconfig_callback, false)) {
1695 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1696 goto error;
1697 }
1698 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1699 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1700 goto error;
1701 }
c40a09e5
NK
1702 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1703 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1704 goto error;
1705 }
81927e28
JS
1706 }
1707
4562236b
HW
1708 if (amdgpu_dm_initialize_drm_device(adev)) {
1709 DRM_ERROR(
1710 "amdgpu: failed to initialize sw for display support.\n");
1711 goto error;
1712 }
1713
11d526f1
SW
1714 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1715 * It is expected that DMUB will resend any pending notifications at this point, for
1716 * example HPD from DPIA.
1717 */
1718 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1719 dc_enable_dmub_outbox(adev->dm.dc);
1720
f74367e4
AD
1721 /* create fake encoders for MST */
1722 dm_dp_create_fake_mst_encoders(adev);
1723
4562236b
HW
1724 /* TODO: Add_display_info? */
1725
1726 /* TODO use dynamic cursor width */
4a580877
LT
1727 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1728 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1729
4a580877 1730 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1731 DRM_ERROR(
1732 "amdgpu: failed to initialize sw for display support.\n");
1733 goto error;
1734 }
1735
c0fb85ae 1736
f1ad2f5e 1737 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1738
1739 return 0;
1740error:
1741 amdgpu_dm_fini(adev);
1742
59d0f396 1743 return -EINVAL;
4562236b
HW
1744}
1745
e9669fb7
AG
1746static int amdgpu_dm_early_fini(void *handle)
1747{
1748 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1749
1750 amdgpu_dm_audio_fini(adev);
1751
1752 return 0;
1753}
1754
7578ecda 1755static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1756{
f74367e4
AD
1757 int i;
1758
09a5df6c
NK
1759 if (adev->dm.vblank_control_workqueue) {
1760 destroy_workqueue(adev->dm.vblank_control_workqueue);
1761 adev->dm.vblank_control_workqueue = NULL;
1762 }
09a5df6c 1763
f74367e4
AD
1764 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1765 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1766 }
1767
4562236b 1768 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1769
9a65df19
WL
1770#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1771 if (adev->dm.crc_rd_wrk) {
1772 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1773 kfree(adev->dm.crc_rd_wrk);
1774 adev->dm.crc_rd_wrk = NULL;
1775 }
1776#endif
52704fca
BL
1777#ifdef CONFIG_DRM_AMD_DC_HDCP
1778 if (adev->dm.hdcp_workqueue) {
e96b1b29 1779 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1780 adev->dm.hdcp_workqueue = NULL;
1781 }
1782
1783 if (adev->dm.dc)
1784 dc_deinit_callbacks(adev->dm.dc);
1785#endif
51ba6912 1786
3beac533 1787 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1788
81927e28
JS
1789 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1790 kfree(adev->dm.dmub_notify);
1791 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1792 destroy_workqueue(adev->dm.delayed_hpd_wq);
1793 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1794 }
1795
743b9786
NK
1796 if (adev->dm.dmub_bo)
1797 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1798 &adev->dm.dmub_bo_gpu_addr,
1799 &adev->dm.dmub_bo_cpu_addr);
52704fca 1800
006c26a0
AG
1801 if (adev->dm.hpd_rx_offload_wq) {
1802 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1803 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1804 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1805 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1806 }
1807 }
1808
1809 kfree(adev->dm.hpd_rx_offload_wq);
1810 adev->dm.hpd_rx_offload_wq = NULL;
1811 }
1812
c8bdf2b6
ED
1813 /* DC Destroy TODO: Replace destroy DAL */
1814 if (adev->dm.dc)
1815 dc_destroy(&adev->dm.dc);
4562236b
HW
1816 /*
1817 * TODO: pageflip, vlank interrupt
1818 *
1819 * amdgpu_dm_irq_fini(adev);
1820 */
1821
1822 if (adev->dm.cgs_device) {
1823 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1824 adev->dm.cgs_device = NULL;
1825 }
1826 if (adev->dm.freesync_module) {
1827 mod_freesync_destroy(adev->dm.freesync_module);
1828 adev->dm.freesync_module = NULL;
1829 }
674e78ac 1830
6ce8f316 1831 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1832 mutex_destroy(&adev->dm.dc_lock);
1833
4562236b
HW
1834 return;
1835}
1836
a94d5569 1837static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1838{
a7669aff 1839 const char *fw_name_dmcu = NULL;
a94d5569
DF
1840 int r;
1841 const struct dmcu_firmware_header_v1_0 *hdr;
1842
1843 switch(adev->asic_type) {
55e56389
MR
1844#if defined(CONFIG_DRM_AMD_DC_SI)
1845 case CHIP_TAHITI:
1846 case CHIP_PITCAIRN:
1847 case CHIP_VERDE:
1848 case CHIP_OLAND:
1849#endif
a94d5569
DF
1850 case CHIP_BONAIRE:
1851 case CHIP_HAWAII:
1852 case CHIP_KAVERI:
1853 case CHIP_KABINI:
1854 case CHIP_MULLINS:
1855 case CHIP_TONGA:
1856 case CHIP_FIJI:
1857 case CHIP_CARRIZO:
1858 case CHIP_STONEY:
1859 case CHIP_POLARIS11:
1860 case CHIP_POLARIS10:
1861 case CHIP_POLARIS12:
1862 case CHIP_VEGAM:
1863 case CHIP_VEGA10:
1864 case CHIP_VEGA12:
1865 case CHIP_VEGA20:
1866 return 0;
5ea23931
RL
1867 case CHIP_NAVI12:
1868 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1869 break;
a94d5569 1870 case CHIP_RAVEN:
a7669aff
HW
1871 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1872 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1873 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1874 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1875 else
a7669aff 1876 return 0;
a94d5569
DF
1877 break;
1878 default:
1d789535 1879 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1880 case IP_VERSION(2, 0, 2):
1881 case IP_VERSION(2, 0, 3):
1882 case IP_VERSION(2, 0, 0):
1883 case IP_VERSION(2, 1, 0):
1884 case IP_VERSION(3, 0, 0):
1885 case IP_VERSION(3, 0, 2):
1886 case IP_VERSION(3, 0, 3):
1887 case IP_VERSION(3, 0, 1):
1888 case IP_VERSION(3, 1, 2):
1889 case IP_VERSION(3, 1, 3):
b5b8ed44 1890 case IP_VERSION(3, 1, 5):
de7cc1b4 1891 case IP_VERSION(3, 1, 6):
577359ca
AP
1892 case IP_VERSION(3, 2, 0):
1893 case IP_VERSION(3, 2, 1):
c08182f2
AD
1894 return 0;
1895 default:
1896 break;
1897 }
a94d5569 1898 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1899 return -EINVAL;
a94d5569
DF
1900 }
1901
1902 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1903 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1904 return 0;
1905 }
1906
1907 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1908 if (r == -ENOENT) {
1909 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1910 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1911 adev->dm.fw_dmcu = NULL;
1912 return 0;
1913 }
1914 if (r) {
1915 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1916 fw_name_dmcu);
1917 return r;
1918 }
1919
1920 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1921 if (r) {
1922 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1923 fw_name_dmcu);
1924 release_firmware(adev->dm.fw_dmcu);
1925 adev->dm.fw_dmcu = NULL;
1926 return r;
1927 }
1928
1929 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1930 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1931 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1932 adev->firmware.fw_size +=
1933 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1934
1935 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1936 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1937 adev->firmware.fw_size +=
1938 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1939
ee6e89c0
DF
1940 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1941
a94d5569
DF
1942 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1943
4562236b
HW
1944 return 0;
1945}
1946
743b9786
NK
1947static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1948{
1949 struct amdgpu_device *adev = ctx;
1950
1951 return dm_read_reg(adev->dm.dc->ctx, address);
1952}
1953
1954static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1955 uint32_t value)
1956{
1957 struct amdgpu_device *adev = ctx;
1958
1959 return dm_write_reg(adev->dm.dc->ctx, address, value);
1960}
1961
1962static int dm_dmub_sw_init(struct amdgpu_device *adev)
1963{
1964 struct dmub_srv_create_params create_params;
8c7aea40
NK
1965 struct dmub_srv_region_params region_params;
1966 struct dmub_srv_region_info region_info;
1967 struct dmub_srv_fb_params fb_params;
1968 struct dmub_srv_fb_info *fb_info;
1969 struct dmub_srv *dmub_srv;
743b9786
NK
1970 const struct dmcub_firmware_header_v1_0 *hdr;
1971 const char *fw_name_dmub;
1972 enum dmub_asic dmub_asic;
1973 enum dmub_status status;
1974 int r;
1975
1d789535 1976 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1977 case IP_VERSION(2, 1, 0):
743b9786
NK
1978 dmub_asic = DMUB_ASIC_DCN21;
1979 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1980 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1981 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1982 break;
c08182f2 1983 case IP_VERSION(3, 0, 0):
1d789535 1984 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1985 dmub_asic = DMUB_ASIC_DCN30;
1986 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1987 } else {
1988 dmub_asic = DMUB_ASIC_DCN30;
1989 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1990 }
79037324 1991 break;
c08182f2 1992 case IP_VERSION(3, 0, 1):
469989ca
RL
1993 dmub_asic = DMUB_ASIC_DCN301;
1994 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1995 break;
c08182f2 1996 case IP_VERSION(3, 0, 2):
2a411205
BL
1997 dmub_asic = DMUB_ASIC_DCN302;
1998 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1999 break;
c08182f2 2000 case IP_VERSION(3, 0, 3):
656fe9b6
AP
2001 dmub_asic = DMUB_ASIC_DCN303;
2002 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
2003 break;
c08182f2
AD
2004 case IP_VERSION(3, 1, 2):
2005 case IP_VERSION(3, 1, 3):
3137f792 2006 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
2007 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
2008 break;
e850f6b1
RL
2009 case IP_VERSION(3, 1, 4):
2010 dmub_asic = DMUB_ASIC_DCN314;
2011 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
2012 break;
b5b8ed44
QZ
2013 case IP_VERSION(3, 1, 5):
2014 dmub_asic = DMUB_ASIC_DCN315;
2015 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
2016 break;
de7cc1b4 2017 case IP_VERSION(3, 1, 6):
868f4357 2018 dmub_asic = DMUB_ASIC_DCN316;
de7cc1b4
PL
2019 fw_name_dmub = FIRMWARE_DCN316_DMUB;
2020 break;
577359ca
AP
2021 case IP_VERSION(3, 2, 0):
2022 dmub_asic = DMUB_ASIC_DCN32;
2023 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2024 break;
2025 case IP_VERSION(3, 2, 1):
2026 dmub_asic = DMUB_ASIC_DCN321;
2027 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2028 break;
743b9786
NK
2029 default:
2030 /* ASIC doesn't support DMUB. */
2031 return 0;
2032 }
2033
743b9786
NK
2034 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2035 if (r) {
2036 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2037 return 0;
2038 }
2039
2040 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2041 if (r) {
2042 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2043 return 0;
2044 }
2045
743b9786 2046 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 2047 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 2048
9a6ed547
NK
2049 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2050 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2051 AMDGPU_UCODE_ID_DMCUB;
2052 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2053 adev->dm.dmub_fw;
2054 adev->firmware.fw_size +=
2055 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 2056
9a6ed547
NK
2057 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2058 adev->dm.dmcub_fw_version);
2059 }
2060
743b9786 2061
8c7aea40
NK
2062 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2063 dmub_srv = adev->dm.dmub_srv;
2064
2065 if (!dmub_srv) {
2066 DRM_ERROR("Failed to allocate DMUB service!\n");
2067 return -ENOMEM;
2068 }
2069
2070 memset(&create_params, 0, sizeof(create_params));
2071 create_params.user_ctx = adev;
2072 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2073 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2074 create_params.asic = dmub_asic;
2075
2076 /* Create the DMUB service. */
2077 status = dmub_srv_create(dmub_srv, &create_params);
2078 if (status != DMUB_STATUS_OK) {
2079 DRM_ERROR("Error creating DMUB service: %d\n", status);
2080 return -EINVAL;
2081 }
2082
2083 /* Calculate the size of all the regions for the DMUB service. */
2084 memset(&region_params, 0, sizeof(region_params));
2085
2086 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2087 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2088 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2089 region_params.vbios_size = adev->bios_size;
0922b899 2090 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
2091 adev->dm.dmub_fw->data +
2092 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 2093 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
2094 region_params.fw_inst_const =
2095 adev->dm.dmub_fw->data +
2096 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2097 PSP_HEADER_BYTES;
8c7aea40
NK
2098
2099 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2100 &region_info);
2101
2102 if (status != DMUB_STATUS_OK) {
2103 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2104 return -EINVAL;
2105 }
2106
2107 /*
2108 * Allocate a framebuffer based on the total size of all the regions.
2109 * TODO: Move this into GART.
2110 */
2111 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2112 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2113 &adev->dm.dmub_bo_gpu_addr,
2114 &adev->dm.dmub_bo_cpu_addr);
2115 if (r)
2116 return r;
2117
2118 /* Rebase the regions on the framebuffer address. */
2119 memset(&fb_params, 0, sizeof(fb_params));
2120 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2121 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2122 fb_params.region_info = &region_info;
2123
2124 adev->dm.dmub_fb_info =
2125 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2126 fb_info = adev->dm.dmub_fb_info;
2127
2128 if (!fb_info) {
2129 DRM_ERROR(
2130 "Failed to allocate framebuffer info for DMUB service!\n");
2131 return -ENOMEM;
2132 }
2133
2134 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2135 if (status != DMUB_STATUS_OK) {
2136 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2137 return -EINVAL;
2138 }
2139
743b9786
NK
2140 return 0;
2141}
2142
a94d5569
DF
2143static int dm_sw_init(void *handle)
2144{
2145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2146 int r;
2147
2148 r = dm_dmub_sw_init(adev);
2149 if (r)
2150 return r;
a94d5569
DF
2151
2152 return load_dmcu_fw(adev);
2153}
2154
4562236b
HW
2155static int dm_sw_fini(void *handle)
2156{
a94d5569
DF
2157 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2158
8c7aea40
NK
2159 kfree(adev->dm.dmub_fb_info);
2160 adev->dm.dmub_fb_info = NULL;
2161
743b9786
NK
2162 if (adev->dm.dmub_srv) {
2163 dmub_srv_destroy(adev->dm.dmub_srv);
2164 adev->dm.dmub_srv = NULL;
2165 }
2166
75e1658e
ND
2167 release_firmware(adev->dm.dmub_fw);
2168 adev->dm.dmub_fw = NULL;
743b9786 2169
75e1658e
ND
2170 release_firmware(adev->dm.fw_dmcu);
2171 adev->dm.fw_dmcu = NULL;
a94d5569 2172
4562236b
HW
2173 return 0;
2174}
2175
7abcf6b5 2176static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2177{
c84dec2f 2178 struct amdgpu_dm_connector *aconnector;
4562236b 2179 struct drm_connector *connector;
f8d2d39e 2180 struct drm_connector_list_iter iter;
7abcf6b5 2181 int ret = 0;
4562236b 2182
f8d2d39e
LP
2183 drm_connector_list_iter_begin(dev, &iter);
2184 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2185 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2186 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2187 aconnector->mst_mgr.aux) {
f1ad2f5e 2188 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2189 aconnector,
2190 aconnector->base.base.id);
7abcf6b5
AG
2191
2192 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2193 if (ret < 0) {
2194 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2195 aconnector->dc_link->type =
2196 dc_connection_single;
2197 break;
7abcf6b5 2198 }
f8d2d39e 2199 }
4562236b 2200 }
f8d2d39e 2201 drm_connector_list_iter_end(&iter);
4562236b 2202
7abcf6b5
AG
2203 return ret;
2204}
2205
2206static int dm_late_init(void *handle)
2207{
42e67c3b 2208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2209
bbf854dc
DF
2210 struct dmcu_iram_parameters params;
2211 unsigned int linear_lut[16];
2212 int i;
17bdb4a8 2213 struct dmcu *dmcu = NULL;
bbf854dc 2214
17bdb4a8
JFZ
2215 dmcu = adev->dm.dc->res_pool->dmcu;
2216
bbf854dc
DF
2217 for (i = 0; i < 16; i++)
2218 linear_lut[i] = 0xFFFF * i / 15;
2219
2220 params.set = 0;
75068994 2221 params.backlight_ramping_override = false;
bbf854dc
DF
2222 params.backlight_ramping_start = 0xCCCC;
2223 params.backlight_ramping_reduction = 0xCCCCCCCC;
2224 params.backlight_lut_array_size = 16;
2225 params.backlight_lut_array = linear_lut;
2226
2ad0cdf9
AK
2227 /* Min backlight level after ABM reduction, Don't allow below 1%
2228 * 0xFFFF x 0.01 = 0x28F
2229 */
2230 params.min_abm_backlight = 0x28F;
5cb32419 2231 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2232 * dmcu object will be null.
2233 * ABM 2.4 and up are implemented on dmcub.
2234 */
2235 if (dmcu) {
2236 if (!dmcu_load_iram(dmcu, params))
2237 return -EINVAL;
2238 } else if (adev->dm.dc->ctx->dmub_srv) {
2239 struct dc_link *edp_links[MAX_NUM_EDP];
2240 int edp_num;
bbf854dc 2241
6e568e43
JW
2242 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2243 for (i = 0; i < edp_num; i++) {
2244 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2245 return -EINVAL;
2246 }
2247 }
bbf854dc 2248
4a580877 2249 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2250}
2251
2252static void s3_handle_mst(struct drm_device *dev, bool suspend)
2253{
c84dec2f 2254 struct amdgpu_dm_connector *aconnector;
4562236b 2255 struct drm_connector *connector;
f8d2d39e 2256 struct drm_connector_list_iter iter;
fe7553be
LP
2257 struct drm_dp_mst_topology_mgr *mgr;
2258 int ret;
2259 bool need_hotplug = false;
4562236b 2260
f8d2d39e
LP
2261 drm_connector_list_iter_begin(dev, &iter);
2262 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2263 aconnector = to_amdgpu_dm_connector(connector);
2264 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2265 aconnector->mst_port)
2266 continue;
2267
2268 mgr = &aconnector->mst_mgr;
2269
2270 if (suspend) {
2271 drm_dp_mst_topology_mgr_suspend(mgr);
2272 } else {
6f85f738 2273 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be 2274 if (ret < 0) {
84a8b390
WL
2275 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2276 aconnector->dc_link);
fe7553be
LP
2277 need_hotplug = true;
2278 }
2279 }
4562236b 2280 }
f8d2d39e 2281 drm_connector_list_iter_end(&iter);
fe7553be
LP
2282
2283 if (need_hotplug)
2284 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2285}
2286
9340dfd3
HW
2287static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2288{
9340dfd3
HW
2289 int ret = 0;
2290
9340dfd3
HW
2291 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2292 * on window driver dc implementation.
2293 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2294 * should be passed to smu during boot up and resume from s3.
2295 * boot up: dc calculate dcn watermark clock settings within dc_create,
2296 * dcn20_resource_construct
2297 * then call pplib functions below to pass the settings to smu:
2298 * smu_set_watermarks_for_clock_ranges
2299 * smu_set_watermarks_table
2300 * navi10_set_watermarks_table
2301 * smu_write_watermarks_table
2302 *
2303 * For Renoir, clock settings of dcn watermark are also fixed values.
2304 * dc has implemented different flow for window driver:
2305 * dc_hardware_init / dc_set_power_state
2306 * dcn10_init_hw
2307 * notify_wm_ranges
2308 * set_wm_ranges
2309 * -- Linux
2310 * smu_set_watermarks_for_clock_ranges
2311 * renoir_set_watermarks_table
2312 * smu_write_watermarks_table
2313 *
2314 * For Linux,
2315 * dc_hardware_init -> amdgpu_dm_init
2316 * dc_set_power_state --> dm_resume
2317 *
2318 * therefore, this function apply to navi10/12/14 but not Renoir
2319 * *
2320 */
1d789535 2321 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2322 case IP_VERSION(2, 0, 2):
2323 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2324 break;
2325 default:
2326 return 0;
2327 }
2328
13f5dbd6 2329 ret = amdgpu_dpm_write_watermarks_table(adev);
e7a95eea
EQ
2330 if (ret) {
2331 DRM_ERROR("Failed to update WMTABLE!\n");
2332 return ret;
9340dfd3
HW
2333 }
2334
9340dfd3
HW
2335 return 0;
2336}
2337
b8592b48
LL
2338/**
2339 * dm_hw_init() - Initialize DC device
28d687ea 2340 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2341 *
2342 * Initialize the &struct amdgpu_display_manager device. This involves calling
2343 * the initializers of each DM component, then populating the struct with them.
2344 *
2345 * Although the function implies hardware initialization, both hardware and
2346 * software are initialized here. Splitting them out to their relevant init
2347 * hooks is a future TODO item.
2348 *
2349 * Some notable things that are initialized here:
2350 *
2351 * - Display Core, both software and hardware
2352 * - DC modules that we need (freesync and color management)
2353 * - DRM software states
2354 * - Interrupt sources and handlers
2355 * - Vblank support
2356 * - Debug FS entries, if enabled
2357 */
4562236b
HW
2358static int dm_hw_init(void *handle)
2359{
2360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2361 /* Create DAL display manager */
2362 amdgpu_dm_init(adev);
4562236b
HW
2363 amdgpu_dm_hpd_init(adev);
2364
4562236b
HW
2365 return 0;
2366}
2367
b8592b48
LL
2368/**
2369 * dm_hw_fini() - Teardown DC device
28d687ea 2370 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2371 *
2372 * Teardown components within &struct amdgpu_display_manager that require
2373 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2374 * were loaded. Also flush IRQ workqueues and disable them.
2375 */
4562236b
HW
2376static int dm_hw_fini(void *handle)
2377{
2378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2379
2380 amdgpu_dm_hpd_fini(adev);
2381
2382 amdgpu_dm_irq_fini(adev);
21de3396 2383 amdgpu_dm_fini(adev);
4562236b
HW
2384 return 0;
2385}
2386
cdaae837
BL
2387
2388static int dm_enable_vblank(struct drm_crtc *crtc);
2389static void dm_disable_vblank(struct drm_crtc *crtc);
2390
2391static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2392 struct dc_state *state, bool enable)
2393{
2394 enum dc_irq_source irq_source;
2395 struct amdgpu_crtc *acrtc;
2396 int rc = -EBUSY;
2397 int i = 0;
2398
2399 for (i = 0; i < state->stream_count; i++) {
2400 acrtc = get_crtc_by_otg_inst(
2401 adev, state->stream_status[i].primary_otg_inst);
2402
2403 if (acrtc && state->stream_status[i].plane_count != 0) {
2404 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2405 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2406 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2407 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2408 if (rc)
2409 DRM_WARN("Failed to %s pflip interrupts\n",
2410 enable ? "enable" : "disable");
2411
2412 if (enable) {
2413 rc = dm_enable_vblank(&acrtc->base);
2414 if (rc)
2415 DRM_WARN("Failed to enable vblank interrupts\n");
2416 } else {
2417 dm_disable_vblank(&acrtc->base);
2418 }
2419
2420 }
2421 }
2422
2423}
2424
dfd84d90 2425static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2426{
2427 struct dc_state *context = NULL;
2428 enum dc_status res = DC_ERROR_UNEXPECTED;
2429 int i;
2430 struct dc_stream_state *del_streams[MAX_PIPES];
2431 int del_streams_count = 0;
2432
2433 memset(del_streams, 0, sizeof(del_streams));
2434
2435 context = dc_create_state(dc);
2436 if (context == NULL)
2437 goto context_alloc_fail;
2438
2439 dc_resource_state_copy_construct_current(dc, context);
2440
2441 /* First remove from context all streams */
2442 for (i = 0; i < context->stream_count; i++) {
2443 struct dc_stream_state *stream = context->streams[i];
2444
2445 del_streams[del_streams_count++] = stream;
2446 }
2447
2448 /* Remove all planes for removed streams and then remove the streams */
2449 for (i = 0; i < del_streams_count; i++) {
2450 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2451 res = DC_FAIL_DETACH_SURFACES;
2452 goto fail;
2453 }
2454
2455 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2456 if (res != DC_OK)
2457 goto fail;
2458 }
2459
cdaae837
BL
2460 res = dc_commit_state(dc, context);
2461
2462fail:
2463 dc_release_state(context);
2464
2465context_alloc_fail:
2466 return res;
2467}
2468
8e794421
WL
2469static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2470{
2471 int i;
2472
2473 if (dm->hpd_rx_offload_wq) {
2474 for (i = 0; i < dm->dc->caps.max_links; i++)
2475 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2476 }
2477}
2478
4562236b
HW
2479static int dm_suspend(void *handle)
2480{
2481 struct amdgpu_device *adev = handle;
2482 struct amdgpu_display_manager *dm = &adev->dm;
2483 int ret = 0;
4562236b 2484
53b3f8f4 2485 if (amdgpu_in_reset(adev)) {
cdaae837 2486 mutex_lock(&dm->dc_lock);
98ab5f35 2487
98ab5f35 2488 dc_allow_idle_optimizations(adev->dm.dc, false);
98ab5f35 2489
cdaae837
BL
2490 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2491
2492 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2493
2494 amdgpu_dm_commit_zero_streams(dm->dc);
2495
2496 amdgpu_dm_irq_suspend(adev);
2497
8e794421
WL
2498 hpd_rx_irq_work_suspend(dm);
2499
cdaae837
BL
2500 return ret;
2501 }
4562236b 2502
d2f0b53b 2503 WARN_ON(adev->dm.cached_state);
4a580877 2504 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2505
4a580877 2506 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2507
4562236b
HW
2508 amdgpu_dm_irq_suspend(adev);
2509
8e794421
WL
2510 hpd_rx_irq_work_suspend(dm);
2511
32f5062d 2512 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2513
1c2075d4 2514 return 0;
4562236b
HW
2515}
2516
17ce8a69 2517struct amdgpu_dm_connector *
1daf8c63
AD
2518amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2519 struct drm_crtc *crtc)
4562236b
HW
2520{
2521 uint32_t i;
c2cea706 2522 struct drm_connector_state *new_con_state;
4562236b
HW
2523 struct drm_connector *connector;
2524 struct drm_crtc *crtc_from_state;
2525
c2cea706
LSL
2526 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2527 crtc_from_state = new_con_state->crtc;
4562236b
HW
2528
2529 if (crtc_from_state == crtc)
c84dec2f 2530 return to_amdgpu_dm_connector(connector);
4562236b
HW
2531 }
2532
2533 return NULL;
2534}
2535
fbbdadf2
BL
2536static void emulated_link_detect(struct dc_link *link)
2537{
2538 struct dc_sink_init_data sink_init_data = { 0 };
2539 struct display_sink_capability sink_caps = { 0 };
2540 enum dc_edid_status edid_status;
2541 struct dc_context *dc_ctx = link->ctx;
2542 struct dc_sink *sink = NULL;
2543 struct dc_sink *prev_sink = NULL;
2544
2545 link->type = dc_connection_none;
2546 prev_sink = link->local_sink;
2547
30164a16
VL
2548 if (prev_sink)
2549 dc_sink_release(prev_sink);
fbbdadf2
BL
2550
2551 switch (link->connector_signal) {
2552 case SIGNAL_TYPE_HDMI_TYPE_A: {
2553 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2554 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2555 break;
2556 }
2557
2558 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2559 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2560 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2561 break;
2562 }
2563
2564 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2565 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2566 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2567 break;
2568 }
2569
2570 case SIGNAL_TYPE_LVDS: {
2571 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2572 sink_caps.signal = SIGNAL_TYPE_LVDS;
2573 break;
2574 }
2575
2576 case SIGNAL_TYPE_EDP: {
2577 sink_caps.transaction_type =
2578 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2579 sink_caps.signal = SIGNAL_TYPE_EDP;
2580 break;
2581 }
2582
2583 case SIGNAL_TYPE_DISPLAY_PORT: {
2584 sink_caps.transaction_type =
2585 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2586 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2587 break;
2588 }
2589
2590 default:
2591 DC_ERROR("Invalid connector type! signal:%d\n",
2592 link->connector_signal);
2593 return;
2594 }
2595
2596 sink_init_data.link = link;
2597 sink_init_data.sink_signal = sink_caps.signal;
2598
2599 sink = dc_sink_create(&sink_init_data);
2600 if (!sink) {
2601 DC_ERROR("Failed to create sink!\n");
2602 return;
2603 }
2604
dcd5fb82 2605 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2606 link->local_sink = sink;
2607
2608 edid_status = dm_helpers_read_local_edid(
2609 link->ctx,
2610 link,
2611 sink);
2612
2613 if (edid_status != EDID_OK)
2614 DC_ERROR("Failed to read EDID");
2615
2616}
2617
cdaae837
BL
2618static void dm_gpureset_commit_state(struct dc_state *dc_state,
2619 struct amdgpu_display_manager *dm)
2620{
2621 struct {
2622 struct dc_surface_update surface_updates[MAX_SURFACES];
2623 struct dc_plane_info plane_infos[MAX_SURFACES];
2624 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2625 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2626 struct dc_stream_update stream_update;
2627 } * bundle;
2628 int k, m;
2629
2630 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2631
2632 if (!bundle) {
2633 dm_error("Failed to allocate update bundle\n");
2634 goto cleanup;
2635 }
2636
2637 for (k = 0; k < dc_state->stream_count; k++) {
2638 bundle->stream_update.stream = dc_state->streams[k];
2639
2640 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2641 bundle->surface_updates[m].surface =
2642 dc_state->stream_status->plane_states[m];
2643 bundle->surface_updates[m].surface->force_full_update =
2644 true;
2645 }
2646 dc_commit_updates_for_stream(
2647 dm->dc, bundle->surface_updates,
2648 dc_state->stream_status->plane_count,
efc8278e 2649 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2650 }
2651
2652cleanup:
2653 kfree(bundle);
2654
2655 return;
2656}
2657
4562236b
HW
2658static int dm_resume(void *handle)
2659{
2660 struct amdgpu_device *adev = handle;
4a580877 2661 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2662 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2663 struct amdgpu_dm_connector *aconnector;
4562236b 2664 struct drm_connector *connector;
f8d2d39e 2665 struct drm_connector_list_iter iter;
4562236b 2666 struct drm_crtc *crtc;
c2cea706 2667 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2668 struct dm_crtc_state *dm_new_crtc_state;
2669 struct drm_plane *plane;
2670 struct drm_plane_state *new_plane_state;
2671 struct dm_plane_state *dm_new_plane_state;
113b7a01 2672 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2673 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2674 struct dc_state *dc_state;
2675 int i, r, j;
4562236b 2676
53b3f8f4 2677 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2678 dc_state = dm->cached_dc_state;
2679
6d63fcc2
NK
2680 /*
2681 * The dc->current_state is backed up into dm->cached_dc_state
2682 * before we commit 0 streams.
2683 *
2684 * DC will clear link encoder assignments on the real state
2685 * but the changes won't propagate over to the copy we made
2686 * before the 0 streams commit.
2687 *
2688 * DC expects that link encoder assignments are *not* valid
32685b32
NK
2689 * when committing a state, so as a workaround we can copy
2690 * off of the current state.
2691 *
2692 * We lose the previous assignments, but we had already
2693 * commit 0 streams anyway.
6d63fcc2 2694 */
32685b32 2695 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
6d63fcc2 2696
cdaae837
BL
2697 r = dm_dmub_hw_init(adev);
2698 if (r)
2699 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2700
2701 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2702 dc_resume(dm->dc);
2703
2704 amdgpu_dm_irq_resume_early(adev);
2705
2706 for (i = 0; i < dc_state->stream_count; i++) {
2707 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2708 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2709 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2710 = 0xffffffff;
2711 }
2712 }
2713
11d526f1
SW
2714 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2715 amdgpu_dm_outbox_init(adev);
2716 dc_enable_dmub_outbox(adev->dm.dc);
2717 }
2718
cdaae837 2719 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2720
cdaae837
BL
2721 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2722
2723 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2724
2725 dc_release_state(dm->cached_dc_state);
2726 dm->cached_dc_state = NULL;
2727
2728 amdgpu_dm_irq_resume_late(adev);
2729
2730 mutex_unlock(&dm->dc_lock);
2731
2732 return 0;
2733 }
113b7a01
LL
2734 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2735 dc_release_state(dm_state->context);
2736 dm_state->context = dc_create_state(dm->dc);
2737 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2738 dc_resource_state_construct(dm->dc, dm_state->context);
2739
8c7aea40 2740 /* Before powering on DC we need to re-initialize DMUB. */
79d6b935 2741 dm_dmub_hw_resume(adev);
8c7aea40 2742
11d526f1
SW
2743 /* Re-enable outbox interrupts for DPIA. */
2744 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2745 amdgpu_dm_outbox_init(adev);
2746 dc_enable_dmub_outbox(adev->dm.dc);
2747 }
2748
a80aa93d
ML
2749 /* power on hardware */
2750 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2751
4562236b
HW
2752 /* program HPD filter */
2753 dc_resume(dm->dc);
2754
4562236b
HW
2755 /*
2756 * early enable HPD Rx IRQ, should be done before set mode as short
2757 * pulse interrupts are used for MST
2758 */
2759 amdgpu_dm_irq_resume_early(adev);
2760
d20ebea8 2761 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2762 s3_handle_mst(ddev, false);
2763
4562236b 2764 /* Do detection*/
f8d2d39e
LP
2765 drm_connector_list_iter_begin(ddev, &iter);
2766 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2767 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2768
2769 /*
2770 * this is the case when traversing through already created
2771 * MST connectors, should be skipped
2772 */
f4346fb3
RL
2773 if (aconnector->dc_link &&
2774 aconnector->dc_link->type == dc_connection_mst_branch)
4562236b
HW
2775 continue;
2776
03ea364c 2777 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2778 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2779 DRM_ERROR("KMS: Failed to detect connector\n");
2780
15c735e7 2781 if (aconnector->base.force && new_connection_type == dc_connection_none) {
fbbdadf2 2782 emulated_link_detect(aconnector->dc_link);
15c735e7
WL
2783 } else {
2784 mutex_lock(&dm->dc_lock);
fbbdadf2 2785 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
15c735e7
WL
2786 mutex_unlock(&dm->dc_lock);
2787 }
3eb4eba4
RL
2788
2789 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2790 aconnector->fake_enable = false;
2791
dcd5fb82
MF
2792 if (aconnector->dc_sink)
2793 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2794 aconnector->dc_sink = NULL;
2795 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2796 mutex_unlock(&aconnector->hpd_lock);
4562236b 2797 }
f8d2d39e 2798 drm_connector_list_iter_end(&iter);
4562236b 2799
1f6010a9 2800 /* Force mode set in atomic commit */
a80aa93d 2801 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2802 new_crtc_state->active_changed = true;
4f346e65 2803
fcb4019e
LSL
2804 /*
2805 * atomic_check is expected to create the dc states. We need to release
2806 * them here, since they were duplicated as part of the suspend
2807 * procedure.
2808 */
a80aa93d 2809 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2810 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2811 if (dm_new_crtc_state->stream) {
2812 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2813 dc_stream_release(dm_new_crtc_state->stream);
2814 dm_new_crtc_state->stream = NULL;
2815 }
2816 }
2817
a80aa93d 2818 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2819 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2820 if (dm_new_plane_state->dc_state) {
2821 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2822 dc_plane_state_release(dm_new_plane_state->dc_state);
2823 dm_new_plane_state->dc_state = NULL;
2824 }
2825 }
2826
2d1af6a1 2827 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2828
a80aa93d 2829 dm->cached_state = NULL;
0a214e2f 2830
9faa4237 2831 amdgpu_dm_irq_resume_late(adev);
4562236b 2832
9340dfd3
HW
2833 amdgpu_dm_smu_write_watermarks_table(adev);
2834
2d1af6a1 2835 return 0;
4562236b
HW
2836}
2837
b8592b48
LL
2838/**
2839 * DOC: DM Lifecycle
2840 *
2841 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2842 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2843 * the base driver's device list to be initialized and torn down accordingly.
2844 *
2845 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2846 */
2847
4562236b
HW
2848static const struct amd_ip_funcs amdgpu_dm_funcs = {
2849 .name = "dm",
2850 .early_init = dm_early_init,
7abcf6b5 2851 .late_init = dm_late_init,
4562236b
HW
2852 .sw_init = dm_sw_init,
2853 .sw_fini = dm_sw_fini,
e9669fb7 2854 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2855 .hw_init = dm_hw_init,
2856 .hw_fini = dm_hw_fini,
2857 .suspend = dm_suspend,
2858 .resume = dm_resume,
2859 .is_idle = dm_is_idle,
2860 .wait_for_idle = dm_wait_for_idle,
2861 .check_soft_reset = dm_check_soft_reset,
2862 .soft_reset = dm_soft_reset,
2863 .set_clockgating_state = dm_set_clockgating_state,
2864 .set_powergating_state = dm_set_powergating_state,
2865};
2866
2867const struct amdgpu_ip_block_version dm_ip_block =
2868{
2869 .type = AMD_IP_BLOCK_TYPE_DCE,
2870 .major = 1,
2871 .minor = 0,
2872 .rev = 0,
2873 .funcs = &amdgpu_dm_funcs,
2874};
2875
ca3268c4 2876
b8592b48
LL
2877/**
2878 * DOC: atomic
2879 *
2880 * *WIP*
2881 */
0a323b84 2882
b3663f70 2883static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2884 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2885 .get_format_info = amd_get_format_info,
366c1baa 2886 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2887 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2888 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2889};
2890
2891static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2892 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2893};
2894
94562810
RS
2895static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2896{
d8791dc7 2897 u32 max_avg, min_cll, max, min, q, r;
94562810
RS
2898 struct amdgpu_dm_backlight_caps *caps;
2899 struct amdgpu_display_manager *dm;
2900 struct drm_connector *conn_base;
2901 struct amdgpu_device *adev;
ec11fe37 2902 struct dc_link *link = NULL;
94562810
RS
2903 static const u8 pre_computed_values[] = {
2904 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2905 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2906 int i;
94562810
RS
2907
2908 if (!aconnector || !aconnector->dc_link)
2909 return;
2910
ec11fe37 2911 link = aconnector->dc_link;
2912 if (link->connector_signal != SIGNAL_TYPE_EDP)
2913 return;
2914
94562810 2915 conn_base = &aconnector->base;
1348969a 2916 adev = drm_to_adev(conn_base->dev);
94562810 2917 dm = &adev->dm;
7fd13bae
AD
2918 for (i = 0; i < dm->num_of_edps; i++) {
2919 if (link == dm->backlight_link[i])
2920 break;
2921 }
2922 if (i >= dm->num_of_edps)
2923 return;
2924 caps = &dm->backlight_caps[i];
94562810
RS
2925 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2926 caps->aux_support = false;
d8791dc7 2927 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
94562810
RS
2928 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2929
d0ae0b64 2930 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2931 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2932 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2933 caps->aux_support = true;
2934
7a46f05e
TI
2935 if (amdgpu_backlight == 0)
2936 caps->aux_support = false;
2937 else if (amdgpu_backlight == 1)
2938 caps->aux_support = true;
2939
94562810
RS
2940 /* From the specification (CTA-861-G), for calculating the maximum
2941 * luminance we need to use:
2942 * Luminance = 50*2**(CV/32)
2943 * Where CV is a one-byte value.
2944 * For calculating this expression we may need float point precision;
2945 * to avoid this complexity level, we take advantage that CV is divided
2946 * by a constant. From the Euclids division algorithm, we know that CV
2947 * can be written as: CV = 32*q + r. Next, we replace CV in the
2948 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2949 * need to pre-compute the value of r/32. For pre-computing the values
2950 * We just used the following Ruby line:
2951 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2952 * The results of the above expressions can be verified at
2953 * pre_computed_values.
2954 */
d8791dc7
RL
2955 q = max_avg >> 5;
2956 r = max_avg % 32;
94562810
RS
2957 max = (1 << q) * pre_computed_values[r];
2958
2959 // min luminance: maxLum * (CV/255)^2 / 100
2960 q = DIV_ROUND_CLOSEST(min_cll, 255);
2961 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2962
2963 caps->aux_max_input_signal = max;
2964 caps->aux_min_input_signal = min;
2965}
2966
97e51c16
HW
2967void amdgpu_dm_update_connector_after_detect(
2968 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2969{
2970 struct drm_connector *connector = &aconnector->base;
2971 struct drm_device *dev = connector->dev;
b73a22d3 2972 struct dc_sink *sink;
4562236b
HW
2973
2974 /* MST handled by drm_mst framework */
2975 if (aconnector->mst_mgr.mst_state == true)
2976 return;
2977
4562236b 2978 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2979 if (sink)
2980 dc_sink_retain(sink);
4562236b 2981
1f6010a9
DF
2982 /*
2983 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2984 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2985 * Skip if already done during boot.
4562236b
HW
2986 */
2987 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2988 && aconnector->dc_em_sink) {
2989
1f6010a9
DF
2990 /*
2991 * For S3 resume with headless use eml_sink to fake stream
2992 * because on resume connector->sink is set to NULL
4562236b
HW
2993 */
2994 mutex_lock(&dev->mode_config.mutex);
2995
2996 if (sink) {
922aa1e1 2997 if (aconnector->dc_sink) {
98e6436d 2998 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2999 /*
3000 * retain and release below are used to
3001 * bump up refcount for sink because the link doesn't point
3002 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
3003 * reshuffle by UMD we will get into unwanted dc_sink release
3004 */
dcd5fb82 3005 dc_sink_release(aconnector->dc_sink);
922aa1e1 3006 }
4562236b 3007 aconnector->dc_sink = sink;
dcd5fb82 3008 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
3009 amdgpu_dm_update_freesync_caps(connector,
3010 aconnector->edid);
4562236b 3011 } else {
98e6436d 3012 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 3013 if (!aconnector->dc_sink) {
4562236b 3014 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 3015 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 3016 }
4562236b
HW
3017 }
3018
3019 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
3020
3021 if (sink)
3022 dc_sink_release(sink);
4562236b
HW
3023 return;
3024 }
3025
3026 /*
3027 * TODO: temporary guard to look for proper fix
3028 * if this sink is MST sink, we should not do anything
3029 */
dcd5fb82
MF
3030 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3031 dc_sink_release(sink);
4562236b 3032 return;
dcd5fb82 3033 }
4562236b
HW
3034
3035 if (aconnector->dc_sink == sink) {
1f6010a9
DF
3036 /*
3037 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3038 * Do nothing!!
3039 */
f1ad2f5e 3040 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 3041 aconnector->connector_id);
dcd5fb82
MF
3042 if (sink)
3043 dc_sink_release(sink);
4562236b
HW
3044 return;
3045 }
3046
f1ad2f5e 3047 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
3048 aconnector->connector_id, aconnector->dc_sink, sink);
3049
3050 mutex_lock(&dev->mode_config.mutex);
3051
1f6010a9
DF
3052 /*
3053 * 1. Update status of the drm connector
3054 * 2. Send an event and let userspace tell us what to do
3055 */
4562236b 3056 if (sink) {
1f6010a9
DF
3057 /*
3058 * TODO: check if we still need the S3 mode update workaround.
3059 * If yes, put it here.
3060 */
c64b0d6b 3061 if (aconnector->dc_sink) {
98e6436d 3062 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
3063 dc_sink_release(aconnector->dc_sink);
3064 }
4562236b
HW
3065
3066 aconnector->dc_sink = sink;
dcd5fb82 3067 dc_sink_retain(aconnector->dc_sink);
900b3cb1 3068 if (sink->dc_edid.length == 0) {
4562236b 3069 aconnector->edid = NULL;
e6142dd5
AP
3070 if (aconnector->dc_link->aux_mode) {
3071 drm_dp_cec_unset_edid(
3072 &aconnector->dm_dp_aux.aux);
3073 }
900b3cb1 3074 } else {
4562236b 3075 aconnector->edid =
e6142dd5 3076 (struct edid *)sink->dc_edid.raw_edid;
4562236b 3077
e6142dd5
AP
3078 if (aconnector->dc_link->aux_mode)
3079 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3080 aconnector->edid);
4562236b 3081 }
e6142dd5 3082
20543be9 3083 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 3084 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 3085 update_connector_ext_caps(aconnector);
4562236b 3086 } else {
e86e8947 3087 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 3088 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 3089 drm_connector_update_edid_property(connector, NULL);
4562236b 3090 aconnector->num_modes = 0;
dcd5fb82 3091 dc_sink_release(aconnector->dc_sink);
4562236b 3092 aconnector->dc_sink = NULL;
5326c452 3093 aconnector->edid = NULL;
0c8620d6
BL
3094#ifdef CONFIG_DRM_AMD_DC_HDCP
3095 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3096 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3097 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3098#endif
4562236b
HW
3099 }
3100
3101 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 3102
0f877894
OV
3103 update_subconnector_property(aconnector);
3104
dcd5fb82
MF
3105 if (sink)
3106 dc_sink_release(sink);
4562236b
HW
3107}
3108
e27c41d5 3109static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 3110{
4562236b
HW
3111 struct drm_connector *connector = &aconnector->base;
3112 struct drm_device *dev = connector->dev;
fbbdadf2 3113 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 3114 struct amdgpu_device *adev = drm_to_adev(dev);
10a36226 3115#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3116 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
10a36226 3117#endif
15c735e7 3118 bool ret = false;
4562236b 3119
b972b4f9
HW
3120 if (adev->dm.disable_hpd_irq)
3121 return;
3122
1f6010a9
DF
3123 /*
3124 * In case of failure or MST no need to update connector status or notify the OS
3125 * since (for MST case) MST does this in its own context.
4562236b
HW
3126 */
3127 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3128
0c8620d6 3129#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3130 if (adev->dm.hdcp_workqueue) {
96a3b32e 3131 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3132 dm_con_state->update_hdcp = true;
3133 }
0c8620d6 3134#endif
2e0ac3d6
HW
3135 if (aconnector->fake_enable)
3136 aconnector->fake_enable = false;
3137
fbbdadf2
BL
3138 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3139 DRM_ERROR("KMS: Failed to detect connector\n");
3140
3141 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3142 emulated_link_detect(aconnector->dc_link);
3143
fbbdadf2
BL
3144 drm_modeset_lock_all(dev);
3145 dm_restore_drm_connector_state(dev, connector);
3146 drm_modeset_unlock_all(dev);
3147
3148 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3149 drm_kms_helper_connector_hotplug_event(connector);
15c735e7
WL
3150 } else {
3151 mutex_lock(&adev->dm.dc_lock);
3152 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3153 mutex_unlock(&adev->dm.dc_lock);
3154 if (ret) {
3155 amdgpu_dm_update_connector_after_detect(aconnector);
fbbdadf2 3156
15c735e7
WL
3157 drm_modeset_lock_all(dev);
3158 dm_restore_drm_connector_state(dev, connector);
3159 drm_modeset_unlock_all(dev);
4562236b 3160
15c735e7
WL
3161 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3162 drm_kms_helper_connector_hotplug_event(connector);
3163 }
4562236b
HW
3164 }
3165 mutex_unlock(&aconnector->hpd_lock);
3166
3167}
3168
e27c41d5
JS
3169static void handle_hpd_irq(void *param)
3170{
3171 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3172
3173 handle_hpd_irq_helper(aconnector);
3174
3175}
3176
8e794421 3177static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3178{
3179 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3180 uint8_t dret;
3181 bool new_irq_handled = false;
3182 int dpcd_addr;
3183 int dpcd_bytes_to_read;
3184
3185 const int max_process_count = 30;
3186 int process_count = 0;
3187
3188 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3189
3190 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3191 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3192 /* DPCD 0x200 - 0x201 for downstream IRQ */
3193 dpcd_addr = DP_SINK_COUNT;
3194 } else {
3195 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3196 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3197 dpcd_addr = DP_SINK_COUNT_ESI;
3198 }
3199
3200 dret = drm_dp_dpcd_read(
3201 &aconnector->dm_dp_aux.aux,
3202 dpcd_addr,
3203 esi,
3204 dpcd_bytes_to_read);
3205
3206 while (dret == dpcd_bytes_to_read &&
3207 process_count < max_process_count) {
3208 uint8_t retry;
3209 dret = 0;
3210
3211 process_count++;
3212
f1ad2f5e 3213 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3214 /* handle HPD short pulse irq */
3215 if (aconnector->mst_mgr.mst_state)
3216 drm_dp_mst_hpd_irq(
3217 &aconnector->mst_mgr,
3218 esi,
3219 &new_irq_handled);
4562236b
HW
3220
3221 if (new_irq_handled) {
3222 /* ACK at DPCD to notify down stream */
3223 const int ack_dpcd_bytes_to_write =
3224 dpcd_bytes_to_read - 1;
3225
3226 for (retry = 0; retry < 3; retry++) {
3227 uint8_t wret;
3228
3229 wret = drm_dp_dpcd_write(
3230 &aconnector->dm_dp_aux.aux,
3231 dpcd_addr + 1,
3232 &esi[1],
3233 ack_dpcd_bytes_to_write);
3234 if (wret == ack_dpcd_bytes_to_write)
3235 break;
3236 }
3237
1f6010a9 3238 /* check if there is new irq to be handled */
4562236b
HW
3239 dret = drm_dp_dpcd_read(
3240 &aconnector->dm_dp_aux.aux,
3241 dpcd_addr,
3242 esi,
3243 dpcd_bytes_to_read);
3244
3245 new_irq_handled = false;
d4a6e8a9 3246 } else {
4562236b 3247 break;
d4a6e8a9 3248 }
4562236b
HW
3249 }
3250
3251 if (process_count == max_process_count)
f1ad2f5e 3252 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3253}
3254
8e794421
WL
3255static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3256 union hpd_irq_data hpd_irq_data)
3257{
3258 struct hpd_rx_irq_offload_work *offload_work =
3259 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3260
3261 if (!offload_work) {
3262 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3263 return;
3264 }
3265
3266 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3267 offload_work->data = hpd_irq_data;
3268 offload_work->offload_wq = offload_wq;
3269
3270 queue_work(offload_wq->wq, &offload_work->work);
3271 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3272}
3273
4562236b
HW
3274static void handle_hpd_rx_irq(void *param)
3275{
c84dec2f 3276 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3277 struct drm_connector *connector = &aconnector->base;
3278 struct drm_device *dev = connector->dev;
53cbf65c 3279 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3280 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3281 bool result = false;
fbbdadf2 3282 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3283 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3284 union hpd_irq_data hpd_irq_data;
8e794421
WL
3285 bool link_loss = false;
3286 bool has_left_work = false;
3287 int idx = aconnector->base.index;
3288 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3289
3290 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3291
b972b4f9
HW
3292 if (adev->dm.disable_hpd_irq)
3293 return;
3294
1f6010a9
DF
3295 /*
3296 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3297 * conflict, after implement i2c helper, this mutex should be
3298 * retired.
3299 */
b86e7eef 3300 mutex_lock(&aconnector->hpd_lock);
4562236b 3301
8e794421
WL
3302 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3303 &link_loss, true, &has_left_work);
3083a984 3304
8e794421
WL
3305 if (!has_left_work)
3306 goto out;
3307
3308 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3309 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3310 goto out;
3311 }
3312
3313 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3314 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3315 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3316 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3317 goto out;
3318 }
3083a984 3319
8e794421
WL
3320 if (link_loss) {
3321 bool skip = false;
d2aa1356 3322
8e794421
WL
3323 spin_lock(&offload_wq->offload_lock);
3324 skip = offload_wq->is_handling_link_loss;
3325
3326 if (!skip)
3327 offload_wq->is_handling_link_loss = true;
3328
3329 spin_unlock(&offload_wq->offload_lock);
3330
3331 if (!skip)
3332 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3333
3334 goto out;
3335 }
3336 }
c8ea79a8 3337
3083a984 3338out:
c8ea79a8 3339 if (result && !is_mst_root_connector) {
4562236b 3340 /* Downstream Port status changed. */
fbbdadf2
BL
3341 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3342 DRM_ERROR("KMS: Failed to detect connector\n");
3343
3344 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3345 emulated_link_detect(dc_link);
3346
3347 if (aconnector->fake_enable)
3348 aconnector->fake_enable = false;
3349
3350 amdgpu_dm_update_connector_after_detect(aconnector);
3351
3352
3353 drm_modeset_lock_all(dev);
3354 dm_restore_drm_connector_state(dev, connector);
3355 drm_modeset_unlock_all(dev);
3356
fc320a6f 3357 drm_kms_helper_connector_hotplug_event(connector);
15c735e7
WL
3358 } else {
3359 bool ret = false;
88ac3dda 3360
15c735e7
WL
3361 mutex_lock(&adev->dm.dc_lock);
3362 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3363 mutex_unlock(&adev->dm.dc_lock);
88ac3dda 3364
15c735e7
WL
3365 if (ret) {
3366 if (aconnector->fake_enable)
3367 aconnector->fake_enable = false;
4562236b 3368
15c735e7 3369 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b 3370
15c735e7
WL
3371 drm_modeset_lock_all(dev);
3372 dm_restore_drm_connector_state(dev, connector);
3373 drm_modeset_unlock_all(dev);
4562236b 3374
15c735e7
WL
3375 drm_kms_helper_connector_hotplug_event(connector);
3376 }
4562236b
HW
3377 }
3378 }
2a0f9270 3379#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3380 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3381 if (adev->dm.hdcp_workqueue)
3382 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3383 }
2a0f9270 3384#endif
4562236b 3385
b86e7eef 3386 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3387 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3388
3389 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3390}
3391
3392static void register_hpd_handlers(struct amdgpu_device *adev)
3393{
4a580877 3394 struct drm_device *dev = adev_to_drm(adev);
4562236b 3395 struct drm_connector *connector;
c84dec2f 3396 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3397 const struct dc_link *dc_link;
3398 struct dc_interrupt_params int_params = {0};
3399
3400 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3401 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3402
3403 list_for_each_entry(connector,
3404 &dev->mode_config.connector_list, head) {
3405
c84dec2f 3406 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3407 dc_link = aconnector->dc_link;
3408
3409 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3410 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3411 int_params.irq_source = dc_link->irq_source_hpd;
3412
3413 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3414 handle_hpd_irq,
3415 (void *) aconnector);
3416 }
3417
3418 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3419
3420 /* Also register for DP short pulse (hpd_rx). */
3421 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3422 int_params.irq_source = dc_link->irq_source_hpd_rx;
3423
3424 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3425 handle_hpd_rx_irq,
3426 (void *) aconnector);
8e794421
WL
3427
3428 if (adev->dm.hpd_rx_offload_wq)
3429 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3430 aconnector;
4562236b
HW
3431 }
3432 }
3433}
3434
55e56389
MR
3435#if defined(CONFIG_DRM_AMD_DC_SI)
3436/* Register IRQ sources and initialize IRQ callbacks */
3437static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3438{
3439 struct dc *dc = adev->dm.dc;
3440 struct common_irq_params *c_irq_params;
3441 struct dc_interrupt_params int_params = {0};
3442 int r;
3443 int i;
3444 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3445
3446 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3447 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3448
3449 /*
3450 * Actions of amdgpu_irq_add_id():
3451 * 1. Register a set() function with base driver.
3452 * Base driver will call set() function to enable/disable an
3453 * interrupt in DC hardware.
3454 * 2. Register amdgpu_dm_irq_handler().
3455 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3456 * coming from DC hardware.
3457 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3458 * for acknowledging and handling. */
3459
3460 /* Use VBLANK interrupt */
3461 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3462 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3463 if (r) {
3464 DRM_ERROR("Failed to add crtc irq id!\n");
3465 return r;
3466 }
3467
3468 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469 int_params.irq_source =
3470 dc_interrupt_to_irq_source(dc, i+1 , 0);
3471
3472 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3473
3474 c_irq_params->adev = adev;
3475 c_irq_params->irq_src = int_params.irq_source;
3476
3477 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478 dm_crtc_high_irq, c_irq_params);
3479 }
3480
3481 /* Use GRPH_PFLIP interrupt */
3482 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3483 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3484 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3485 if (r) {
3486 DRM_ERROR("Failed to add page flip irq id!\n");
3487 return r;
3488 }
3489
3490 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3491 int_params.irq_source =
3492 dc_interrupt_to_irq_source(dc, i, 0);
3493
3494 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3495
3496 c_irq_params->adev = adev;
3497 c_irq_params->irq_src = int_params.irq_source;
3498
3499 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3500 dm_pflip_high_irq, c_irq_params);
3501
3502 }
3503
3504 /* HPD */
3505 r = amdgpu_irq_add_id(adev, client_id,
3506 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3507 if (r) {
3508 DRM_ERROR("Failed to add hpd irq id!\n");
3509 return r;
3510 }
3511
3512 register_hpd_handlers(adev);
3513
3514 return 0;
3515}
3516#endif
3517
4562236b
HW
3518/* Register IRQ sources and initialize IRQ callbacks */
3519static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3520{
3521 struct dc *dc = adev->dm.dc;
3522 struct common_irq_params *c_irq_params;
3523 struct dc_interrupt_params int_params = {0};
3524 int r;
3525 int i;
1ffdeca6 3526 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3527
c08182f2 3528 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3529 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3530
3531 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3532 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3533
1f6010a9
DF
3534 /*
3535 * Actions of amdgpu_irq_add_id():
4562236b
HW
3536 * 1. Register a set() function with base driver.
3537 * Base driver will call set() function to enable/disable an
3538 * interrupt in DC hardware.
3539 * 2. Register amdgpu_dm_irq_handler().
3540 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3541 * coming from DC hardware.
3542 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3543 * for acknowledging and handling. */
3544
b57de80a 3545 /* Use VBLANK interrupt */
e9029155 3546 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3547 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3548 if (r) {
3549 DRM_ERROR("Failed to add crtc irq id!\n");
3550 return r;
3551 }
3552
3553 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3554 int_params.irq_source =
3d761e79 3555 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3556
b57de80a 3557 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3558
3559 c_irq_params->adev = adev;
3560 c_irq_params->irq_src = int_params.irq_source;
3561
3562 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3563 dm_crtc_high_irq, c_irq_params);
3564 }
3565
d2574c33
MK
3566 /* Use VUPDATE interrupt */
3567 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3568 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3569 if (r) {
3570 DRM_ERROR("Failed to add vupdate irq id!\n");
3571 return r;
3572 }
3573
3574 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3575 int_params.irq_source =
3576 dc_interrupt_to_irq_source(dc, i, 0);
3577
3578 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3579
3580 c_irq_params->adev = adev;
3581 c_irq_params->irq_src = int_params.irq_source;
3582
3583 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3584 dm_vupdate_high_irq, c_irq_params);
3585 }
3586
3d761e79 3587 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3588 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3589 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3590 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3591 if (r) {
3592 DRM_ERROR("Failed to add page flip irq id!\n");
3593 return r;
3594 }
3595
3596 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3597 int_params.irq_source =
3598 dc_interrupt_to_irq_source(dc, i, 0);
3599
3600 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3601
3602 c_irq_params->adev = adev;
3603 c_irq_params->irq_src = int_params.irq_source;
3604
3605 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3606 dm_pflip_high_irq, c_irq_params);
3607
3608 }
3609
3610 /* HPD */
2c8ad2d5
AD
3611 r = amdgpu_irq_add_id(adev, client_id,
3612 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3613 if (r) {
3614 DRM_ERROR("Failed to add hpd irq id!\n");
3615 return r;
3616 }
3617
3618 register_hpd_handlers(adev);
3619
3620 return 0;
3621}
3622
ff5ef992
AD
3623/* Register IRQ sources and initialize IRQ callbacks */
3624static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3625{
3626 struct dc *dc = adev->dm.dc;
3627 struct common_irq_params *c_irq_params;
3628 struct dc_interrupt_params int_params = {0};
3629 int r;
3630 int i;
660d5406
WL
3631#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3632 static const unsigned int vrtl_int_srcid[] = {
3633 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3634 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3635 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3636 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3637 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3638 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3639 };
3640#endif
ff5ef992
AD
3641
3642 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3643 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3644
1f6010a9
DF
3645 /*
3646 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3647 * 1. Register a set() function with base driver.
3648 * Base driver will call set() function to enable/disable an
3649 * interrupt in DC hardware.
3650 * 2. Register amdgpu_dm_irq_handler().
3651 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3652 * coming from DC hardware.
3653 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3654 * for acknowledging and handling.
1f6010a9 3655 */
ff5ef992
AD
3656
3657 /* Use VSTARTUP interrupt */
3658 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3659 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3660 i++) {
3760f76c 3661 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3662
3663 if (r) {
3664 DRM_ERROR("Failed to add crtc irq id!\n");
3665 return r;
3666 }
3667
3668 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3669 int_params.irq_source =
3670 dc_interrupt_to_irq_source(dc, i, 0);
3671
3672 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3673
3674 c_irq_params->adev = adev;
3675 c_irq_params->irq_src = int_params.irq_source;
3676
2346ef47
NK
3677 amdgpu_dm_irq_register_interrupt(
3678 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3679 }
3680
86bc2219
WL
3681 /* Use otg vertical line interrupt */
3682#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3683 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3684 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3685 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3686
3687 if (r) {
3688 DRM_ERROR("Failed to add vline0 irq id!\n");
3689 return r;
3690 }
3691
3692 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3693 int_params.irq_source =
660d5406
WL
3694 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3695
3696 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3697 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3698 break;
3699 }
86bc2219
WL
3700
3701 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3702 - DC_IRQ_SOURCE_DC1_VLINE0];
3703
3704 c_irq_params->adev = adev;
3705 c_irq_params->irq_src = int_params.irq_source;
3706
3707 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3708 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3709 }
3710#endif
3711
2346ef47
NK
3712 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3713 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3714 * to trigger at end of each vblank, regardless of state of the lock,
3715 * matching DCE behaviour.
3716 */
3717 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3718 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3719 i++) {
3720 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3721
3722 if (r) {
3723 DRM_ERROR("Failed to add vupdate irq id!\n");
3724 return r;
3725 }
3726
3727 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3728 int_params.irq_source =
3729 dc_interrupt_to_irq_source(dc, i, 0);
3730
3731 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3732
3733 c_irq_params->adev = adev;
3734 c_irq_params->irq_src = int_params.irq_source;
3735
ff5ef992 3736 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3737 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3738 }
3739
ff5ef992
AD
3740 /* Use GRPH_PFLIP interrupt */
3741 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
de95753c 3742 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
ff5ef992 3743 i++) {
3760f76c 3744 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3745 if (r) {
3746 DRM_ERROR("Failed to add page flip irq id!\n");
3747 return r;
3748 }
3749
3750 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3751 int_params.irq_source =
3752 dc_interrupt_to_irq_source(dc, i, 0);
3753
3754 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3755
3756 c_irq_params->adev = adev;
3757 c_irq_params->irq_src = int_params.irq_source;
3758
3759 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3760 dm_pflip_high_irq, c_irq_params);
3761
3762 }
3763
81927e28
JS
3764 /* HPD */
3765 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3766 &adev->hpd_irq);
3767 if (r) {
3768 DRM_ERROR("Failed to add hpd irq id!\n");
3769 return r;
3770 }
a08f16cf 3771
81927e28 3772 register_hpd_handlers(adev);
a08f16cf 3773
81927e28
JS
3774 return 0;
3775}
3776/* Register Outbox IRQ sources and initialize IRQ callbacks */
3777static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3778{
3779 struct dc *dc = adev->dm.dc;
3780 struct common_irq_params *c_irq_params;
3781 struct dc_interrupt_params int_params = {0};
3782 int r, i;
3783
3784 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3785 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3786
3787 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3788 &adev->dmub_outbox_irq);
3789 if (r) {
3790 DRM_ERROR("Failed to add outbox irq id!\n");
3791 return r;
3792 }
3793
3794 if (dc->ctx->dmub_srv) {
3795 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3796 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3797 int_params.irq_source =
81927e28 3798 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3799
81927e28 3800 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3801
3802 c_irq_params->adev = adev;
3803 c_irq_params->irq_src = int_params.irq_source;
3804
3805 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3806 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3807 }
3808
ff5ef992
AD
3809 return 0;
3810}
ff5ef992 3811
eb3dc897
NK
3812/*
3813 * Acquires the lock for the atomic state object and returns
3814 * the new atomic state.
3815 *
3816 * This should only be called during atomic check.
3817 */
17ce8a69
RL
3818int dm_atomic_get_state(struct drm_atomic_state *state,
3819 struct dm_atomic_state **dm_state)
eb3dc897
NK
3820{
3821 struct drm_device *dev = state->dev;
1348969a 3822 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3823 struct amdgpu_display_manager *dm = &adev->dm;
3824 struct drm_private_state *priv_state;
eb3dc897
NK
3825
3826 if (*dm_state)
3827 return 0;
3828
eb3dc897
NK
3829 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3830 if (IS_ERR(priv_state))
3831 return PTR_ERR(priv_state);
3832
3833 *dm_state = to_dm_atomic_state(priv_state);
3834
3835 return 0;
3836}
3837
dfd84d90 3838static struct dm_atomic_state *
eb3dc897
NK
3839dm_atomic_get_new_state(struct drm_atomic_state *state)
3840{
3841 struct drm_device *dev = state->dev;
1348969a 3842 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3843 struct amdgpu_display_manager *dm = &adev->dm;
3844 struct drm_private_obj *obj;
3845 struct drm_private_state *new_obj_state;
3846 int i;
3847
3848 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3849 if (obj->funcs == dm->atomic_obj.funcs)
3850 return to_dm_atomic_state(new_obj_state);
3851 }
3852
3853 return NULL;
3854}
3855
eb3dc897
NK
3856static struct drm_private_state *
3857dm_atomic_duplicate_state(struct drm_private_obj *obj)
3858{
3859 struct dm_atomic_state *old_state, *new_state;
3860
3861 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3862 if (!new_state)
3863 return NULL;
3864
3865 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3866
813d20dc
AW
3867 old_state = to_dm_atomic_state(obj->state);
3868
3869 if (old_state && old_state->context)
3870 new_state->context = dc_copy_state(old_state->context);
3871
eb3dc897
NK
3872 if (!new_state->context) {
3873 kfree(new_state);
3874 return NULL;
3875 }
3876
eb3dc897
NK
3877 return &new_state->base;
3878}
3879
3880static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3881 struct drm_private_state *state)
3882{
3883 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3884
3885 if (dm_state && dm_state->context)
3886 dc_release_state(dm_state->context);
3887
3888 kfree(dm_state);
3889}
3890
3891static struct drm_private_state_funcs dm_atomic_state_funcs = {
3892 .atomic_duplicate_state = dm_atomic_duplicate_state,
3893 .atomic_destroy_state = dm_atomic_destroy_state,
3894};
3895
4562236b
HW
3896static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3897{
eb3dc897 3898 struct dm_atomic_state *state;
4562236b
HW
3899 int r;
3900
3901 adev->mode_info.mode_config_initialized = true;
3902
4a580877
LT
3903 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3904 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3905
4a580877
LT
3906 adev_to_drm(adev)->mode_config.max_width = 16384;
3907 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3908
4a580877 3909 adev_to_drm(adev)->mode_config.preferred_depth = 24;
fc25fd60
AD
3910 /* disable prefer shadow for now due to hibernation issues */
3911 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
1f6010a9 3912 /* indicates support for immediate flip */
4a580877 3913 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3914
4a580877 3915 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3916
eb3dc897
NK
3917 state = kzalloc(sizeof(*state), GFP_KERNEL);
3918 if (!state)
3919 return -ENOMEM;
3920
813d20dc 3921 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3922 if (!state->context) {
3923 kfree(state);
3924 return -ENOMEM;
3925 }
3926
3927 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3928
4a580877 3929 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3930 &adev->dm.atomic_obj,
eb3dc897
NK
3931 &state->base,
3932 &dm_atomic_state_funcs);
3933
3dc9b1ce 3934 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3935 if (r) {
3936 dc_release_state(state->context);
3937 kfree(state);
4562236b 3938 return r;
b67a468a 3939 }
4562236b 3940
6ce8f316 3941 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3942 if (r) {
3943 dc_release_state(state->context);
3944 kfree(state);
6ce8f316 3945 return r;
b67a468a 3946 }
6ce8f316 3947
4562236b
HW
3948 return 0;
3949}
3950
206bbafe
DF
3951#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3952#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3953#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3954
7fd13bae
AD
3955static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3956 int bl_idx)
206bbafe
DF
3957{
3958#if defined(CONFIG_ACPI)
3959 struct amdgpu_dm_backlight_caps caps;
3960
58965855
FS
3961 memset(&caps, 0, sizeof(caps));
3962
7fd13bae 3963 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3964 return;
3965
f9b7f370 3966 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3967 if (caps.caps_valid) {
7fd13bae 3968 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3969 if (caps.aux_support)
3970 return;
7fd13bae
AD
3971 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3972 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3973 } else {
7fd13bae 3974 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3975 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3976 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3977 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3978 }
3979#else
7fd13bae 3980 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3981 return;
3982
7fd13bae
AD
3983 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3984 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3985#endif
3986}
3987
69d9f427
AM
3988static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3989 unsigned *min, unsigned *max)
94562810 3990{
94562810 3991 if (!caps)
69d9f427 3992 return 0;
94562810 3993
69d9f427
AM
3994 if (caps->aux_support) {
3995 // Firmware limits are in nits, DC API wants millinits.
3996 *max = 1000 * caps->aux_max_input_signal;
3997 *min = 1000 * caps->aux_min_input_signal;
94562810 3998 } else {
69d9f427
AM
3999 // Firmware limits are 8-bit, PWM control is 16-bit.
4000 *max = 0x101 * caps->max_input_signal;
4001 *min = 0x101 * caps->min_input_signal;
94562810 4002 }
69d9f427
AM
4003 return 1;
4004}
94562810 4005
69d9f427
AM
4006static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4007 uint32_t brightness)
4008{
4009 unsigned min, max;
94562810 4010
69d9f427
AM
4011 if (!get_brightness_range(caps, &min, &max))
4012 return brightness;
4013
4014 // Rescale 0..255 to min..max
4015 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4016 AMDGPU_MAX_BL_LEVEL);
4017}
4018
4019static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4020 uint32_t brightness)
4021{
4022 unsigned min, max;
4023
4024 if (!get_brightness_range(caps, &min, &max))
4025 return brightness;
4026
4027 if (brightness < min)
4028 return 0;
4029 // Rescale min..max to 0..255
4030 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4031 max - min);
94562810
RS
4032}
4033
4052287a 4034static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 4035 int bl_idx,
3d6c9164 4036 u32 user_brightness)
4562236b 4037{
206bbafe 4038 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
4039 struct dc_link *link;
4040 u32 brightness;
94562810 4041 bool rc;
4562236b 4042
7fd13bae
AD
4043 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4044 caps = dm->backlight_caps[bl_idx];
94562810 4045
7fd13bae 4046 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
4047 /* update scratch register */
4048 if (bl_idx == 0)
4049 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
4050 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4051 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 4052
3d6c9164 4053 /* Change brightness based on AUX property */
118b4627 4054 if (caps.aux_support) {
7fd13bae
AD
4055 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4056 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4057 if (!rc)
4058 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 4059 } else {
7fd13bae
AD
4060 rc = dc_link_set_backlight_level(link, brightness, 0);
4061 if (!rc)
4062 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 4063 }
94562810 4064
4052287a
S
4065 if (rc)
4066 dm->actual_brightness[bl_idx] = user_brightness;
4562236b
HW
4067}
4068
3d6c9164 4069static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 4070{
620a0d27 4071 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4072 int i;
3d6c9164 4073
7fd13bae
AD
4074 for (i = 0; i < dm->num_of_edps; i++) {
4075 if (bd == dm->backlight_dev[i])
4076 break;
4077 }
4078 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4079 i = 0;
4080 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
4081
4082 return 0;
4083}
4084
7fd13bae
AD
4085static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4086 int bl_idx)
3d6c9164 4087{
0ad3e64e 4088 struct amdgpu_dm_backlight_caps caps;
7fd13bae 4089 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 4090
7fd13bae
AD
4091 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4092 caps = dm->backlight_caps[bl_idx];
620a0d27 4093
0ad3e64e 4094 if (caps.aux_support) {
0ad3e64e
AD
4095 u32 avg, peak;
4096 bool rc;
4097
4098 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4099 if (!rc)
7fd13bae 4100 return dm->brightness[bl_idx];
0ad3e64e
AD
4101 return convert_brightness_to_user(&caps, avg);
4102 } else {
7fd13bae 4103 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
4104
4105 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 4106 return dm->brightness[bl_idx];
0ad3e64e
AD
4107 return convert_brightness_to_user(&caps, ret);
4108 }
4562236b
HW
4109}
4110
3d6c9164
AD
4111static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4112{
4113 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4114 int i;
3d6c9164 4115
7fd13bae
AD
4116 for (i = 0; i < dm->num_of_edps; i++) {
4117 if (bd == dm->backlight_dev[i])
4118 break;
4119 }
4120 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4121 i = 0;
4122 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4123}
4124
4562236b 4125static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4126 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4127 .get_brightness = amdgpu_dm_backlight_get_brightness,
4128 .update_status = amdgpu_dm_backlight_update_status,
4129};
4130
7578ecda
AD
4131static void
4132amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4133{
4134 char bl_name[16];
4135 struct backlight_properties props = { 0 };
4136
7fd13bae
AD
4137 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4138 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4139
4562236b 4140 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4141 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4142 props.type = BACKLIGHT_RAW;
4143
4144 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4145 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4146
7fd13bae
AD
4147 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4148 adev_to_drm(dm->adev)->dev,
4149 dm,
4150 &amdgpu_dm_backlight_ops,
4151 &props);
4562236b 4152
7fd13bae 4153 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4154 DRM_ERROR("DM: Backlight registration failed!\n");
4155 else
f1ad2f5e 4156 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4157}
4562236b 4158
df534fff 4159static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4160 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4161 enum drm_plane_type plane_type,
4162 const struct dc_plane_cap *plane_cap)
df534fff 4163{
f180b4bc 4164 struct drm_plane *plane;
df534fff
S
4165 unsigned long possible_crtcs;
4166 int ret = 0;
4167
f180b4bc 4168 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4169 if (!plane) {
4170 DRM_ERROR("KMS: Failed to allocate plane\n");
4171 return -ENOMEM;
4172 }
b2fddb13 4173 plane->type = plane_type;
df534fff
S
4174
4175 /*
b2fddb13
NK
4176 * HACK: IGT tests expect that the primary plane for a CRTC
4177 * can only have one possible CRTC. Only expose support for
4178 * any CRTC if they're not going to be used as a primary plane
4179 * for a CRTC - like overlay or underlay planes.
df534fff
S
4180 */
4181 possible_crtcs = 1 << plane_id;
4182 if (plane_id >= dm->dc->caps.max_streams)
4183 possible_crtcs = 0xff;
4184
cc1fec57 4185 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4186
4187 if (ret) {
4188 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4189 kfree(plane);
df534fff
S
4190 return ret;
4191 }
4192
54087768
NK
4193 if (mode_info)
4194 mode_info->planes[plane_id] = plane;
4195
df534fff
S
4196 return ret;
4197}
4198
89fc8d4e
HW
4199
4200static void register_backlight_device(struct amdgpu_display_manager *dm,
4201 struct dc_link *link)
4202{
89fc8d4e
HW
4203 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4204 link->type != dc_connection_none) {
1f6010a9
DF
4205 /*
4206 * Event if registration failed, we should continue with
89fc8d4e
HW
4207 * DM initialization because not having a backlight control
4208 * is better then a black screen.
4209 */
7fd13bae 4210 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4211 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4212
7fd13bae 4213 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4214 dm->backlight_link[dm->num_of_edps] = link;
4215 dm->num_of_edps++;
4216 }
89fc8d4e 4217 }
89fc8d4e
HW
4218}
4219
4220
1f6010a9
DF
4221/*
4222 * In this architecture, the association
4562236b
HW
4223 * connector -> encoder -> crtc
4224 * id not really requried. The crtc and connector will hold the
4225 * display_index as an abstraction to use with DAL component
4226 *
4227 * Returns 0 on success
4228 */
7578ecda 4229static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4230{
4231 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4232 int32_t i;
c84dec2f 4233 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4234 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4235 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4236 uint32_t link_cnt;
cc1fec57 4237 int32_t primary_planes;
fbbdadf2 4238 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4239 const struct dc_plane_cap *plane;
9470620e 4240 bool psr_feature_enabled = false;
4562236b 4241
d58159de
AD
4242 dm->display_indexes_num = dm->dc->caps.max_streams;
4243 /* Update the actual used number of crtc */
4244 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4245
4562236b 4246 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4247 if (amdgpu_dm_mode_config_init(dm->adev)) {
4248 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4249 return -EINVAL;
4562236b
HW
4250 }
4251
b2fddb13
NK
4252 /* There is one primary plane per CRTC */
4253 primary_planes = dm->dc->caps.max_streams;
54087768 4254 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4255
b2fddb13
NK
4256 /*
4257 * Initialize primary planes, implicit planes for legacy IOCTLS.
4258 * Order is reversed to match iteration order in atomic check.
4259 */
4260 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4261 plane = &dm->dc->caps.planes[i];
4262
b2fddb13 4263 if (initialize_plane(dm, mode_info, i,
cc1fec57 4264 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4265 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4266 goto fail;
d4e13b0d 4267 }
df534fff 4268 }
92f3ac40 4269
0d579c7e
NK
4270 /*
4271 * Initialize overlay planes, index starting after primary planes.
4272 * These planes have a higher DRM index than the primary planes since
4273 * they should be considered as having a higher z-order.
4274 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4275 *
4276 * Only support DCN for now, and only expose one so we don't encourage
4277 * userspace to use up all the pipes.
0d579c7e 4278 */
cc1fec57
NK
4279 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4280 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4281
4282 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4283 continue;
4284
4285 if (!plane->blends_with_above || !plane->blends_with_below)
4286 continue;
4287
ea36ad34 4288 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4289 continue;
4290
54087768 4291 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4292 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4293 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4294 goto fail;
d4e13b0d 4295 }
cc1fec57
NK
4296
4297 /* Only create one overlay plane. */
4298 break;
d4e13b0d 4299 }
4562236b 4300
d4e13b0d 4301 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4302 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4303 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4304 goto fail;
4562236b 4305 }
4562236b 4306
81927e28 4307 /* Use Outbox interrupt */
1d789535 4308 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4309 case IP_VERSION(3, 0, 0):
4310 case IP_VERSION(3, 1, 2):
4311 case IP_VERSION(3, 1, 3):
e850f6b1 4312 case IP_VERSION(3, 1, 4):
b5b8ed44 4313 case IP_VERSION(3, 1, 5):
de7cc1b4 4314 case IP_VERSION(3, 1, 6):
577359ca
AP
4315 case IP_VERSION(3, 2, 0):
4316 case IP_VERSION(3, 2, 1):
c08182f2 4317 case IP_VERSION(2, 1, 0):
81927e28
JS
4318 if (register_outbox_irq_handlers(dm->adev)) {
4319 DRM_ERROR("DM: Failed to initialize IRQ\n");
4320 goto fail;
4321 }
4322 break;
4323 default:
c08182f2 4324 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4325 adev->ip_versions[DCE_HWIP][0]);
81927e28 4326 }
9470620e
NK
4327
4328 /* Determine whether to enable PSR support by default. */
4329 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4330 switch (adev->ip_versions[DCE_HWIP][0]) {
4331 case IP_VERSION(3, 1, 2):
4332 case IP_VERSION(3, 1, 3):
e850f6b1 4333 case IP_VERSION(3, 1, 4):
b5b8ed44 4334 case IP_VERSION(3, 1, 5):
de7cc1b4 4335 case IP_VERSION(3, 1, 6):
577359ca
AP
4336 case IP_VERSION(3, 2, 0):
4337 case IP_VERSION(3, 2, 1):
9470620e
NK
4338 psr_feature_enabled = true;
4339 break;
4340 default:
4341 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4342 break;
4343 }
4344 }
81927e28 4345
4562236b
HW
4346 /* loops over all connectors on the board */
4347 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4348 struct dc_link *link = NULL;
4562236b
HW
4349
4350 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4351 DRM_ERROR(
4352 "KMS: Cannot support more than %d display indexes\n",
4353 AMDGPU_DM_MAX_DISPLAY_INDEX);
4354 continue;
4355 }
4356
4357 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4358 if (!aconnector)
cd8a2ae8 4359 goto fail;
4562236b
HW
4360
4361 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4362 if (!aencoder)
cd8a2ae8 4363 goto fail;
4562236b
HW
4364
4365 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4366 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4367 goto fail;
4562236b
HW
4368 }
4369
4370 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4371 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4372 goto fail;
4562236b
HW
4373 }
4374
89fc8d4e
HW
4375 link = dc_get_link_at_index(dm->dc, i);
4376
fbbdadf2
BL
4377 if (!dc_link_detect_sink(link, &new_connection_type))
4378 DRM_ERROR("KMS: Failed to detect connector\n");
4379
4380 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4381 emulated_link_detect(link);
4382 amdgpu_dm_update_connector_after_detect(aconnector);
15c735e7
WL
4383 } else {
4384 bool ret = false;
fbbdadf2 4385
15c735e7
WL
4386 mutex_lock(&dm->dc_lock);
4387 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4388 mutex_unlock(&dm->dc_lock);
4389
4390 if (ret) {
4391 amdgpu_dm_update_connector_after_detect(aconnector);
4392 register_backlight_device(dm, link);
89fc8d4e 4393
15c735e7
WL
4394 if (dm->num_of_edps)
4395 update_connector_ext_caps(aconnector);
89fc8d4e 4396
15c735e7
WL
4397 if (psr_feature_enabled)
4398 amdgpu_dm_set_psr_caps(link);
89fc8d4e 4399
15c735e7
WL
4400 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4401 * PSR is also supported.
4402 */
4403 if (link->psr_settings.psr_feature_enabled)
4404 adev_to_drm(adev)->vblank_disable_immediate = false;
4405 }
4406 }
4562236b
HW
4407 }
4408
4409 /* Software is initialized. Now we can register interrupt handlers. */
4410 switch (adev->asic_type) {
55e56389
MR
4411#if defined(CONFIG_DRM_AMD_DC_SI)
4412 case CHIP_TAHITI:
4413 case CHIP_PITCAIRN:
4414 case CHIP_VERDE:
4415 case CHIP_OLAND:
4416 if (dce60_register_irq_handlers(dm->adev)) {
4417 DRM_ERROR("DM: Failed to initialize IRQ\n");
4418 goto fail;
4419 }
4420 break;
4421#endif
4562236b
HW
4422 case CHIP_BONAIRE:
4423 case CHIP_HAWAII:
cd4b356f
AD
4424 case CHIP_KAVERI:
4425 case CHIP_KABINI:
4426 case CHIP_MULLINS:
4562236b
HW
4427 case CHIP_TONGA:
4428 case CHIP_FIJI:
4429 case CHIP_CARRIZO:
4430 case CHIP_STONEY:
4431 case CHIP_POLARIS11:
4432 case CHIP_POLARIS10:
b264d345 4433 case CHIP_POLARIS12:
7737de91 4434 case CHIP_VEGAM:
2c8ad2d5 4435 case CHIP_VEGA10:
2325ff30 4436 case CHIP_VEGA12:
1fe6bf2f 4437 case CHIP_VEGA20:
4562236b
HW
4438 if (dce110_register_irq_handlers(dm->adev)) {
4439 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4440 goto fail;
4562236b
HW
4441 }
4442 break;
4443 default:
1d789535 4444 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4445 case IP_VERSION(1, 0, 0):
4446 case IP_VERSION(1, 0, 1):
c08182f2
AD
4447 case IP_VERSION(2, 0, 2):
4448 case IP_VERSION(2, 0, 3):
4449 case IP_VERSION(2, 0, 0):
4450 case IP_VERSION(2, 1, 0):
4451 case IP_VERSION(3, 0, 0):
4452 case IP_VERSION(3, 0, 2):
4453 case IP_VERSION(3, 0, 3):
4454 case IP_VERSION(3, 0, 1):
4455 case IP_VERSION(3, 1, 2):
4456 case IP_VERSION(3, 1, 3):
e850f6b1 4457 case IP_VERSION(3, 1, 4):
b5b8ed44 4458 case IP_VERSION(3, 1, 5):
de7cc1b4 4459 case IP_VERSION(3, 1, 6):
577359ca
AP
4460 case IP_VERSION(3, 2, 0):
4461 case IP_VERSION(3, 2, 1):
c08182f2
AD
4462 if (dcn10_register_irq_handlers(dm->adev)) {
4463 DRM_ERROR("DM: Failed to initialize IRQ\n");
4464 goto fail;
4465 }
4466 break;
4467 default:
2cbc6f42 4468 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4469 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4470 goto fail;
c08182f2 4471 }
2cbc6f42 4472 break;
4562236b
HW
4473 }
4474
4562236b 4475 return 0;
cd8a2ae8 4476fail:
4562236b 4477 kfree(aencoder);
4562236b 4478 kfree(aconnector);
54087768 4479
59d0f396 4480 return -EINVAL;
4562236b
HW
4481}
4482
7578ecda 4483static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4484{
eb3dc897 4485 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4486 return;
4487}
4488
4489/******************************************************************************
4490 * amdgpu_display_funcs functions
4491 *****************************************************************************/
4492
1f6010a9 4493/*
4562236b
HW
4494 * dm_bandwidth_update - program display watermarks
4495 *
4496 * @adev: amdgpu_device pointer
4497 *
4498 * Calculate and program the display watermarks and line buffer allocation.
4499 */
4500static void dm_bandwidth_update(struct amdgpu_device *adev)
4501{
49c07a99 4502 /* TODO: implement later */
4562236b
HW
4503}
4504
39cc5be2 4505static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4506 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4507 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4508 .backlight_set_level = NULL, /* never called for DC */
4509 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4510 .hpd_sense = NULL,/* called unconditionally */
4511 .hpd_set_polarity = NULL, /* called unconditionally */
4512 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4513 .page_flip_get_scanoutpos =
4514 dm_crtc_get_scanoutpos,/* called unconditionally */
4515 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4516 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4517};
4518
4519#if defined(CONFIG_DEBUG_KERNEL_DC)
4520
3ee6b26b
AD
4521static ssize_t s3_debug_store(struct device *device,
4522 struct device_attribute *attr,
4523 const char *buf,
4524 size_t count)
4562236b
HW
4525{
4526 int ret;
4527 int s3_state;
ef1de361 4528 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4529 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4530
4531 ret = kstrtoint(buf, 0, &s3_state);
4532
4533 if (ret == 0) {
4534 if (s3_state) {
4535 dm_resume(adev);
4a580877 4536 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4537 } else
4538 dm_suspend(adev);
4539 }
4540
4541 return ret == 0 ? count : 0;
4542}
4543
4544DEVICE_ATTR_WO(s3_debug);
4545
4546#endif
4547
4548static int dm_early_init(void *handle)
4549{
4550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4551
4562236b 4552 switch (adev->asic_type) {
55e56389
MR
4553#if defined(CONFIG_DRM_AMD_DC_SI)
4554 case CHIP_TAHITI:
4555 case CHIP_PITCAIRN:
4556 case CHIP_VERDE:
4557 adev->mode_info.num_crtc = 6;
4558 adev->mode_info.num_hpd = 6;
4559 adev->mode_info.num_dig = 6;
4560 break;
4561 case CHIP_OLAND:
4562 adev->mode_info.num_crtc = 2;
4563 adev->mode_info.num_hpd = 2;
4564 adev->mode_info.num_dig = 2;
4565 break;
4566#endif
4562236b
HW
4567 case CHIP_BONAIRE:
4568 case CHIP_HAWAII:
4569 adev->mode_info.num_crtc = 6;
4570 adev->mode_info.num_hpd = 6;
4571 adev->mode_info.num_dig = 6;
4562236b 4572 break;
cd4b356f
AD
4573 case CHIP_KAVERI:
4574 adev->mode_info.num_crtc = 4;
4575 adev->mode_info.num_hpd = 6;
4576 adev->mode_info.num_dig = 7;
cd4b356f
AD
4577 break;
4578 case CHIP_KABINI:
4579 case CHIP_MULLINS:
4580 adev->mode_info.num_crtc = 2;
4581 adev->mode_info.num_hpd = 6;
4582 adev->mode_info.num_dig = 6;
cd4b356f 4583 break;
4562236b
HW
4584 case CHIP_FIJI:
4585 case CHIP_TONGA:
4586 adev->mode_info.num_crtc = 6;
4587 adev->mode_info.num_hpd = 6;
4588 adev->mode_info.num_dig = 7;
4562236b
HW
4589 break;
4590 case CHIP_CARRIZO:
4591 adev->mode_info.num_crtc = 3;
4592 adev->mode_info.num_hpd = 6;
4593 adev->mode_info.num_dig = 9;
4562236b
HW
4594 break;
4595 case CHIP_STONEY:
4596 adev->mode_info.num_crtc = 2;
4597 adev->mode_info.num_hpd = 6;
4598 adev->mode_info.num_dig = 9;
4562236b
HW
4599 break;
4600 case CHIP_POLARIS11:
b264d345 4601 case CHIP_POLARIS12:
4562236b
HW
4602 adev->mode_info.num_crtc = 5;
4603 adev->mode_info.num_hpd = 5;
4604 adev->mode_info.num_dig = 5;
4562236b
HW
4605 break;
4606 case CHIP_POLARIS10:
7737de91 4607 case CHIP_VEGAM:
4562236b
HW
4608 adev->mode_info.num_crtc = 6;
4609 adev->mode_info.num_hpd = 6;
4610 adev->mode_info.num_dig = 6;
4562236b 4611 break;
2c8ad2d5 4612 case CHIP_VEGA10:
2325ff30 4613 case CHIP_VEGA12:
1fe6bf2f 4614 case CHIP_VEGA20:
2c8ad2d5
AD
4615 adev->mode_info.num_crtc = 6;
4616 adev->mode_info.num_hpd = 6;
4617 adev->mode_info.num_dig = 6;
4618 break;
4562236b 4619 default:
cae5c1ab 4620
1d789535 4621 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4622 case IP_VERSION(2, 0, 2):
4623 case IP_VERSION(3, 0, 0):
4624 adev->mode_info.num_crtc = 6;
4625 adev->mode_info.num_hpd = 6;
4626 adev->mode_info.num_dig = 6;
4627 break;
4628 case IP_VERSION(2, 0, 0):
4629 case IP_VERSION(3, 0, 2):
4630 adev->mode_info.num_crtc = 5;
4631 adev->mode_info.num_hpd = 5;
4632 adev->mode_info.num_dig = 5;
4633 break;
4634 case IP_VERSION(2, 0, 3):
4635 case IP_VERSION(3, 0, 3):
4636 adev->mode_info.num_crtc = 2;
4637 adev->mode_info.num_hpd = 2;
4638 adev->mode_info.num_dig = 2;
4639 break;
559f591d
AD
4640 case IP_VERSION(1, 0, 0):
4641 case IP_VERSION(1, 0, 1):
c08182f2
AD
4642 case IP_VERSION(3, 0, 1):
4643 case IP_VERSION(2, 1, 0):
4644 case IP_VERSION(3, 1, 2):
4645 case IP_VERSION(3, 1, 3):
e850f6b1 4646 case IP_VERSION(3, 1, 4):
b5b8ed44 4647 case IP_VERSION(3, 1, 5):
de7cc1b4 4648 case IP_VERSION(3, 1, 6):
577359ca
AP
4649 case IP_VERSION(3, 2, 0):
4650 case IP_VERSION(3, 2, 1):
c08182f2
AD
4651 adev->mode_info.num_crtc = 4;
4652 adev->mode_info.num_hpd = 4;
4653 adev->mode_info.num_dig = 4;
4654 break;
4655 default:
2cbc6f42 4656 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4657 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4658 return -EINVAL;
c08182f2 4659 }
2cbc6f42 4660 break;
4562236b
HW
4661 }
4662
c8dd5715
MD
4663 amdgpu_dm_set_irq_funcs(adev);
4664
39cc5be2
AD
4665 if (adev->mode_info.funcs == NULL)
4666 adev->mode_info.funcs = &dm_display_funcs;
4667
1f6010a9
DF
4668 /*
4669 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4670 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4671 * amdgpu_device_init()
4672 */
4562236b
HW
4673#if defined(CONFIG_DEBUG_KERNEL_DC)
4674 device_create_file(
4a580877 4675 adev_to_drm(adev)->dev,
4562236b
HW
4676 &dev_attr_s3_debug);
4677#endif
4678
4679 return 0;
4680}
4681
9b690ef3 4682static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4683 struct dc_stream_state *new_stream,
4684 struct dc_stream_state *old_stream)
9b690ef3 4685{
2afda735 4686 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4687}
4688
4689static bool modereset_required(struct drm_crtc_state *crtc_state)
4690{
2afda735 4691 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4692}
4693
7578ecda 4694static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4695{
4696 drm_encoder_cleanup(encoder);
4697 kfree(encoder);
4698}
4699
4700static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4701 .destroy = amdgpu_dm_encoder_destroy,
4702};
4703
5d945cbc
RS
4704static int
4705fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4706 const enum surface_pixel_format format,
4707 enum dc_color_space *color_space)
6300b3bd 4708{
5d945cbc 4709 bool full_range;
6300b3bd 4710
5d945cbc
RS
4711 *color_space = COLOR_SPACE_SRGB;
4712
4713 /* DRM color properties only affect non-RGB formats. */
4714 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4715 return 0;
4716
4717 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4718
4719 switch (plane_state->color_encoding) {
4720 case DRM_COLOR_YCBCR_BT601:
4721 if (full_range)
4722 *color_space = COLOR_SPACE_YCBCR601;
4723 else
4724 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
6300b3bd
MK
4725 break;
4726
5d945cbc
RS
4727 case DRM_COLOR_YCBCR_BT709:
4728 if (full_range)
4729 *color_space = COLOR_SPACE_YCBCR709;
4730 else
4731 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
6300b3bd
MK
4732 break;
4733
5d945cbc
RS
4734 case DRM_COLOR_YCBCR_BT2020:
4735 if (full_range)
4736 *color_space = COLOR_SPACE_2020_YCBCR;
4737 else
4738 return -EINVAL;
6300b3bd 4739 break;
6300b3bd 4740
5d945cbc
RS
4741 default:
4742 return -EINVAL;
4743 }
6300b3bd 4744
5d945cbc 4745 return 0;
6300b3bd
MK
4746}
4747
5d945cbc
RS
4748static int
4749fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4750 const struct drm_plane_state *plane_state,
4751 const uint64_t tiling_flags,
4752 struct dc_plane_info *plane_info,
4753 struct dc_plane_address *address,
4754 bool tmz_surface,
4755 bool force_disable_dcc)
e7b07cee 4756{
5d945cbc
RS
4757 const struct drm_framebuffer *fb = plane_state->fb;
4758 const struct amdgpu_framebuffer *afb =
4759 to_amdgpu_framebuffer(plane_state->fb);
4760 int ret;
e7b07cee 4761
5d945cbc 4762 memset(plane_info, 0, sizeof(*plane_info));
e7b07cee 4763
5d945cbc
RS
4764 switch (fb->format->format) {
4765 case DRM_FORMAT_C8:
4766 plane_info->format =
4767 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4768 break;
4769 case DRM_FORMAT_RGB565:
4770 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4771 break;
4772 case DRM_FORMAT_XRGB8888:
4773 case DRM_FORMAT_ARGB8888:
4774 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4775 break;
4776 case DRM_FORMAT_XRGB2101010:
4777 case DRM_FORMAT_ARGB2101010:
4778 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4779 break;
4780 case DRM_FORMAT_XBGR2101010:
4781 case DRM_FORMAT_ABGR2101010:
4782 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4783 break;
4784 case DRM_FORMAT_XBGR8888:
4785 case DRM_FORMAT_ABGR8888:
4786 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4787 break;
4788 case DRM_FORMAT_NV21:
4789 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4790 break;
4791 case DRM_FORMAT_NV12:
4792 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4793 break;
4794 case DRM_FORMAT_P010:
4795 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4796 break;
4797 case DRM_FORMAT_XRGB16161616F:
4798 case DRM_FORMAT_ARGB16161616F:
4799 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4800 break;
4801 case DRM_FORMAT_XBGR16161616F:
4802 case DRM_FORMAT_ABGR16161616F:
4803 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4804 break;
4805 case DRM_FORMAT_XRGB16161616:
4806 case DRM_FORMAT_ARGB16161616:
4807 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4808 break;
4809 case DRM_FORMAT_XBGR16161616:
4810 case DRM_FORMAT_ABGR16161616:
4811 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4812 break;
4813 default:
4814 DRM_ERROR(
4815 "Unsupported screen format %p4cc\n",
4816 &fb->format->format);
d89f6048 4817 return -EINVAL;
5d945cbc 4818 }
d89f6048 4819
5d945cbc
RS
4820 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4821 case DRM_MODE_ROTATE_0:
4822 plane_info->rotation = ROTATION_ANGLE_0;
4823 break;
4824 case DRM_MODE_ROTATE_90:
4825 plane_info->rotation = ROTATION_ANGLE_90;
4826 break;
4827 case DRM_MODE_ROTATE_180:
4828 plane_info->rotation = ROTATION_ANGLE_180;
4829 break;
4830 case DRM_MODE_ROTATE_270:
4831 plane_info->rotation = ROTATION_ANGLE_270;
4832 break;
4833 default:
4834 plane_info->rotation = ROTATION_ANGLE_0;
4835 break;
4836 }
695af5f9 4837
695af5f9 4838
5d945cbc
RS
4839 plane_info->visible = true;
4840 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
e7b07cee 4841
5d945cbc 4842 plane_info->layer_index = 0;
e7b07cee 4843
5d945cbc
RS
4844 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4845 &plane_info->color_space);
4846 if (ret)
4847 return ret;
e7b07cee 4848
5d945cbc
RS
4849 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4850 plane_info->rotation, tiling_flags,
4851 &plane_info->tiling_info,
4852 &plane_info->plane_size,
4853 &plane_info->dcc, address,
4854 tmz_surface, force_disable_dcc);
4855 if (ret)
4856 return ret;
e7b07cee 4857
5d945cbc
RS
4858 fill_blending_from_plane_state(
4859 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4860 &plane_info->global_alpha, &plane_info->global_alpha_value);
e7b07cee 4861
5d945cbc
RS
4862 return 0;
4863}
e7b07cee 4864
5d945cbc
RS
4865static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4866 struct dc_plane_state *dc_plane_state,
4867 struct drm_plane_state *plane_state,
4868 struct drm_crtc_state *crtc_state)
4869{
4870 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4871 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4872 struct dc_scaling_info scaling_info;
4873 struct dc_plane_info plane_info;
4874 int ret;
4875 bool force_disable_dcc = false;
6300b3bd 4876
5d945cbc
RS
4877 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4878 if (ret)
4879 return ret;
e7b07cee 4880
5d945cbc
RS
4881 dc_plane_state->src_rect = scaling_info.src_rect;
4882 dc_plane_state->dst_rect = scaling_info.dst_rect;
4883 dc_plane_state->clip_rect = scaling_info.clip_rect;
4884 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
6491f0c0 4885
5d945cbc
RS
4886 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4887 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4888 afb->tiling_flags,
4889 &plane_info,
4890 &dc_plane_state->address,
4891 afb->tmz_surface,
4892 force_disable_dcc);
4893 if (ret)
4894 return ret;
6491f0c0 4895
5d945cbc
RS
4896 dc_plane_state->format = plane_info.format;
4897 dc_plane_state->color_space = plane_info.color_space;
4898 dc_plane_state->format = plane_info.format;
4899 dc_plane_state->plane_size = plane_info.plane_size;
4900 dc_plane_state->rotation = plane_info.rotation;
4901 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4902 dc_plane_state->stereo_format = plane_info.stereo_format;
4903 dc_plane_state->tiling_info = plane_info.tiling_info;
4904 dc_plane_state->visible = plane_info.visible;
4905 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4906 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4907 dc_plane_state->global_alpha = plane_info.global_alpha;
4908 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4909 dc_plane_state->dcc = plane_info.dcc;
4910 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4911 dc_plane_state->flip_int_enabled = true;
6491f0c0 4912
695af5f9 4913 /*
5d945cbc
RS
4914 * Always set input transfer function, since plane state is refreshed
4915 * every time.
695af5f9 4916 */
5d945cbc
RS
4917 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4918 if (ret)
4919 return ret;
e7b07cee 4920
695af5f9 4921 return 0;
4562236b 4922}
695af5f9 4923
5d945cbc
RS
4924/**
4925 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4926 *
4927 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4928 * remote fb
4929 * @old_plane_state: Old state of @plane
4930 * @new_plane_state: New state of @plane
4931 * @crtc_state: New state of CRTC connected to the @plane
4932 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4933 *
4934 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4935 * (referred to as "damage clips" in DRM nomenclature) that require updating on
4936 * the eDP remote buffer. The responsibility of specifying the dirty regions is
4937 * amdgpu_dm's.
4938 *
4939 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4940 * plane with regions that require flushing to the eDP remote buffer. In
4941 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4942 * implicitly provide damage clips without any client support via the plane
4943 * bounds.
4944 *
4945 * Today, amdgpu_dm only supports the MPO and cursor usecase.
4946 *
4947 * TODO: Also enable for FB_DAMAGE_CLIPS
4948 */
4949static void fill_dc_dirty_rects(struct drm_plane *plane,
4950 struct drm_plane_state *old_plane_state,
4951 struct drm_plane_state *new_plane_state,
4952 struct drm_crtc_state *crtc_state,
4953 struct dc_flip_addrs *flip_addrs)
4954{
4955 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4956 struct rect *dirty_rects = flip_addrs->dirty_rects;
4957 uint32_t num_clips;
4958 bool bb_changed;
4959 bool fb_changed;
4960 uint32_t i = 0;
e7b07cee 4961
5d945cbc 4962 flip_addrs->dirty_rect_count = 0;
7cc191ee
LL
4963
4964 /*
4965 * Cursor plane has it's own dirty rect update interface. See
4966 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4967 */
4968 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4969 return;
4970
4971 /*
4972 * Today, we only consider MPO use-case for PSR SU. If MPO not
4973 * requested, and there is a plane update, do FFU.
4974 */
4975 if (!dm_crtc_state->mpo_requested) {
4976 dirty_rects[0].x = 0;
4977 dirty_rects[0].y = 0;
4978 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
4979 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
4980 flip_addrs->dirty_rect_count = 1;
4981 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4982 new_plane_state->plane->base.id,
4983 dm_crtc_state->base.mode.crtc_hdisplay,
4984 dm_crtc_state->base.mode.crtc_vdisplay);
4985 return;
4986 }
4987
4988 /*
4989 * MPO is requested. Add entire plane bounding box to dirty rects if
4990 * flipped to or damaged.
4991 *
4992 * If plane is moved or resized, also add old bounding box to dirty
4993 * rects.
4994 */
4995 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4996 fb_changed = old_plane_state->fb->base.id !=
4997 new_plane_state->fb->base.id;
4998 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4999 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5000 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5001 old_plane_state->crtc_h != new_plane_state->crtc_h);
5002
5003 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5004 new_plane_state->plane->base.id,
5005 bb_changed, fb_changed, num_clips);
5006
5007 if (num_clips || fb_changed || bb_changed) {
5008 dirty_rects[i].x = new_plane_state->crtc_x;
5009 dirty_rects[i].y = new_plane_state->crtc_y;
5010 dirty_rects[i].width = new_plane_state->crtc_w;
5011 dirty_rects[i].height = new_plane_state->crtc_h;
5012 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5013 new_plane_state->plane->base.id,
5014 dirty_rects[i].x, dirty_rects[i].y,
5015 dirty_rects[i].width, dirty_rects[i].height);
5016 i += 1;
5017 }
5018
5019 /* Add old plane bounding-box if plane is moved or resized */
5020 if (bb_changed) {
5021 dirty_rects[i].x = old_plane_state->crtc_x;
5022 dirty_rects[i].y = old_plane_state->crtc_y;
5023 dirty_rects[i].width = old_plane_state->crtc_w;
5024 dirty_rects[i].height = old_plane_state->crtc_h;
5025 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5026 old_plane_state->plane->base.id,
5027 dirty_rects[i].x, dirty_rects[i].y,
5028 dirty_rects[i].width, dirty_rects[i].height);
5029 i += 1;
5030 }
5031
5032 flip_addrs->dirty_rect_count = i;
5033}
5034
3ee6b26b
AD
5035static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5036 const struct dm_connector_state *dm_state,
5037 struct dc_stream_state *stream)
e7b07cee
HW
5038{
5039 enum amdgpu_rmx_type rmx_type;
5040
5041 struct rect src = { 0 }; /* viewport in composition space*/
5042 struct rect dst = { 0 }; /* stream addressable area */
5043
5044 /* no mode. nothing to be done */
5045 if (!mode)
5046 return;
5047
5048 /* Full screen scaling by default */
5049 src.width = mode->hdisplay;
5050 src.height = mode->vdisplay;
5051 dst.width = stream->timing.h_addressable;
5052 dst.height = stream->timing.v_addressable;
5053
f4791779
HW
5054 if (dm_state) {
5055 rmx_type = dm_state->scaling;
5056 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5057 if (src.width * dst.height <
5058 src.height * dst.width) {
5059 /* height needs less upscaling/more downscaling */
5060 dst.width = src.width *
5061 dst.height / src.height;
5062 } else {
5063 /* width needs less upscaling/more downscaling */
5064 dst.height = src.height *
5065 dst.width / src.width;
5066 }
5067 } else if (rmx_type == RMX_CENTER) {
5068 dst = src;
e7b07cee 5069 }
e7b07cee 5070
f4791779
HW
5071 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5072 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5073
f4791779
HW
5074 if (dm_state->underscan_enable) {
5075 dst.x += dm_state->underscan_hborder / 2;
5076 dst.y += dm_state->underscan_vborder / 2;
5077 dst.width -= dm_state->underscan_hborder;
5078 dst.height -= dm_state->underscan_vborder;
5079 }
e7b07cee
HW
5080 }
5081
5082 stream->src = src;
5083 stream->dst = dst;
5084
4711c033
LT
5085 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5086 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5087
5088}
5089
3ee6b26b 5090static enum dc_color_depth
42ba01fc 5091convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5092 bool is_y420, int requested_bpc)
e7b07cee 5093{
1bc22f20 5094 uint8_t bpc;
01c22997 5095
1bc22f20
SW
5096 if (is_y420) {
5097 bpc = 8;
5098
5099 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5100 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5101 bpc = 16;
5102 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5103 bpc = 12;
5104 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5105 bpc = 10;
5106 } else {
5107 bpc = (uint8_t)connector->display_info.bpc;
5108 /* Assume 8 bpc by default if no bpc is specified. */
5109 bpc = bpc ? bpc : 8;
5110 }
e7b07cee 5111
cbd14ae7 5112 if (requested_bpc > 0) {
01c22997
NK
5113 /*
5114 * Cap display bpc based on the user requested value.
5115 *
5116 * The value for state->max_bpc may not correctly updated
5117 * depending on when the connector gets added to the state
5118 * or if this was called outside of atomic check, so it
5119 * can't be used directly.
5120 */
cbd14ae7 5121 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5122
1825fd34
NK
5123 /* Round down to the nearest even number. */
5124 bpc = bpc - (bpc & 1);
5125 }
07e3a1cf 5126
e7b07cee
HW
5127 switch (bpc) {
5128 case 0:
1f6010a9
DF
5129 /*
5130 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5131 * EDID revision before 1.4
5132 * TODO: Fix edid parsing
5133 */
5134 return COLOR_DEPTH_888;
5135 case 6:
5136 return COLOR_DEPTH_666;
5137 case 8:
5138 return COLOR_DEPTH_888;
5139 case 10:
5140 return COLOR_DEPTH_101010;
5141 case 12:
5142 return COLOR_DEPTH_121212;
5143 case 14:
5144 return COLOR_DEPTH_141414;
5145 case 16:
5146 return COLOR_DEPTH_161616;
5147 default:
5148 return COLOR_DEPTH_UNDEFINED;
5149 }
5150}
5151
3ee6b26b
AD
5152static enum dc_aspect_ratio
5153get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5154{
e11d4147
LSL
5155 /* 1-1 mapping, since both enums follow the HDMI spec. */
5156 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5157}
5158
3ee6b26b
AD
5159static enum dc_color_space
5160get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5161{
5162 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5163
5164 switch (dc_crtc_timing->pixel_encoding) {
5165 case PIXEL_ENCODING_YCBCR422:
5166 case PIXEL_ENCODING_YCBCR444:
5167 case PIXEL_ENCODING_YCBCR420:
5168 {
5169 /*
5170 * 27030khz is the separation point between HDTV and SDTV
5171 * according to HDMI spec, we use YCbCr709 and YCbCr601
5172 * respectively
5173 */
380604e2 5174 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5175 if (dc_crtc_timing->flags.Y_ONLY)
5176 color_space =
5177 COLOR_SPACE_YCBCR709_LIMITED;
5178 else
5179 color_space = COLOR_SPACE_YCBCR709;
5180 } else {
5181 if (dc_crtc_timing->flags.Y_ONLY)
5182 color_space =
5183 COLOR_SPACE_YCBCR601_LIMITED;
5184 else
5185 color_space = COLOR_SPACE_YCBCR601;
5186 }
5187
5188 }
5189 break;
5190 case PIXEL_ENCODING_RGB:
5191 color_space = COLOR_SPACE_SRGB;
5192 break;
5193
5194 default:
5195 WARN_ON(1);
5196 break;
5197 }
5198
5199 return color_space;
5200}
5201
ea117312
TA
5202static bool adjust_colour_depth_from_display_info(
5203 struct dc_crtc_timing *timing_out,
5204 const struct drm_display_info *info)
400443e8 5205{
ea117312 5206 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5207 int normalized_clk;
400443e8 5208 do {
380604e2 5209 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5210 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5211 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5212 normalized_clk /= 2;
5213 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5214 switch (depth) {
5215 case COLOR_DEPTH_888:
5216 break;
400443e8
ML
5217 case COLOR_DEPTH_101010:
5218 normalized_clk = (normalized_clk * 30) / 24;
5219 break;
5220 case COLOR_DEPTH_121212:
5221 normalized_clk = (normalized_clk * 36) / 24;
5222 break;
5223 case COLOR_DEPTH_161616:
5224 normalized_clk = (normalized_clk * 48) / 24;
5225 break;
5226 default:
ea117312
TA
5227 /* The above depths are the only ones valid for HDMI. */
5228 return false;
400443e8 5229 }
ea117312
TA
5230 if (normalized_clk <= info->max_tmds_clock) {
5231 timing_out->display_color_depth = depth;
5232 return true;
5233 }
5234 } while (--depth > COLOR_DEPTH_666);
5235 return false;
400443e8 5236}
e7b07cee 5237
42ba01fc
NK
5238static void fill_stream_properties_from_drm_display_mode(
5239 struct dc_stream_state *stream,
5240 const struct drm_display_mode *mode_in,
5241 const struct drm_connector *connector,
5242 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5243 const struct dc_stream_state *old_stream,
5244 int requested_bpc)
e7b07cee
HW
5245{
5246 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5247 const struct drm_display_info *info = &connector->display_info;
d4252eee 5248 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5249 struct hdmi_vendor_infoframe hv_frame;
5250 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5251
acf83f86
WL
5252 memset(&hv_frame, 0, sizeof(hv_frame));
5253 memset(&avi_frame, 0, sizeof(avi_frame));
5254
e7b07cee
HW
5255 timing_out->h_border_left = 0;
5256 timing_out->h_border_right = 0;
5257 timing_out->v_border_top = 0;
5258 timing_out->v_border_bottom = 0;
5259 /* TODO: un-hardcode */
fe61a2f1 5260 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5261 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5262 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5263 else if (drm_mode_is_420_also(info, mode_in)
5264 && aconnector->force_yuv420_output)
5265 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
c03d0b52 5266 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
ceb3dbb4 5267 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5268 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5269 else
5270 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5271
5272 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5273 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5274 connector,
5275 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5276 requested_bpc);
e7b07cee
HW
5277 timing_out->scan_type = SCANNING_TYPE_NODATA;
5278 timing_out->hdmi_vic = 0;
b333730d 5279
5d945cbc 5280 if (old_stream) {
b333730d
BL
5281 timing_out->vic = old_stream->timing.vic;
5282 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5283 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5284 } else {
5285 timing_out->vic = drm_match_cea_mode(mode_in);
5286 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5287 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5288 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5289 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5290 }
e7b07cee 5291
1cb1d477
WL
5292 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5293 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5294 timing_out->vic = avi_frame.video_code;
5295 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5296 timing_out->hdmi_vic = hv_frame.vic;
5297 }
5298
fe8858bb
NC
5299 if (is_freesync_video_mode(mode_in, aconnector)) {
5300 timing_out->h_addressable = mode_in->hdisplay;
5301 timing_out->h_total = mode_in->htotal;
5302 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5303 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5304 timing_out->v_total = mode_in->vtotal;
5305 timing_out->v_addressable = mode_in->vdisplay;
5306 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5307 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5308 timing_out->pix_clk_100hz = mode_in->clock * 10;
5309 } else {
5310 timing_out->h_addressable = mode_in->crtc_hdisplay;
5311 timing_out->h_total = mode_in->crtc_htotal;
5312 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5313 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5314 timing_out->v_total = mode_in->crtc_vtotal;
5315 timing_out->v_addressable = mode_in->crtc_vdisplay;
5316 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5317 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5318 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5319 }
a85ba005 5320
e7b07cee 5321 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5322
5323 stream->output_color_space = get_output_color_space(timing_out);
5324
e43a432c
AK
5325 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5326 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5327 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5328 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5329 drm_mode_is_420_also(info, mode_in) &&
5330 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5331 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5332 adjust_colour_depth_from_display_info(timing_out, info);
5333 }
5334 }
e7b07cee
HW
5335}
5336
3ee6b26b
AD
5337static void fill_audio_info(struct audio_info *audio_info,
5338 const struct drm_connector *drm_connector,
5339 const struct dc_sink *dc_sink)
e7b07cee
HW
5340{
5341 int i = 0;
5342 int cea_revision = 0;
5343 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5344
5345 audio_info->manufacture_id = edid_caps->manufacturer_id;
5346 audio_info->product_id = edid_caps->product_id;
5347
5348 cea_revision = drm_connector->display_info.cea_rev;
5349
090afc1e 5350 strscpy(audio_info->display_name,
d2b2562c 5351 edid_caps->display_name,
090afc1e 5352 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5353
b830ebc9 5354 if (cea_revision >= 3) {
e7b07cee
HW
5355 audio_info->mode_count = edid_caps->audio_mode_count;
5356
5357 for (i = 0; i < audio_info->mode_count; ++i) {
5358 audio_info->modes[i].format_code =
5359 (enum audio_format_code)
5360 (edid_caps->audio_modes[i].format_code);
5361 audio_info->modes[i].channel_count =
5362 edid_caps->audio_modes[i].channel_count;
5363 audio_info->modes[i].sample_rates.all =
5364 edid_caps->audio_modes[i].sample_rate;
5365 audio_info->modes[i].sample_size =
5366 edid_caps->audio_modes[i].sample_size;
5367 }
5368 }
5369
5370 audio_info->flags.all = edid_caps->speaker_flags;
5371
5372 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5373 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5374 audio_info->video_latency = drm_connector->video_latency[0];
5375 audio_info->audio_latency = drm_connector->audio_latency[0];
5376 }
5377
5378 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5379
5380}
5381
3ee6b26b
AD
5382static void
5383copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5384 struct drm_display_mode *dst_mode)
e7b07cee
HW
5385{
5386 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5387 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5388 dst_mode->crtc_clock = src_mode->crtc_clock;
5389 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5390 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5391 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5392 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5393 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5394 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5395 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5396 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5397 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5398 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5399 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5400}
5401
3ee6b26b
AD
5402static void
5403decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5404 const struct drm_display_mode *native_mode,
5405 bool scale_enabled)
e7b07cee
HW
5406{
5407 if (scale_enabled) {
5408 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5409 } else if (native_mode->clock == drm_mode->clock &&
5410 native_mode->htotal == drm_mode->htotal &&
5411 native_mode->vtotal == drm_mode->vtotal) {
5412 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5413 } else {
5414 /* no scaling nor amdgpu inserted, no need to patch */
5415 }
5416}
5417
aed15309
ML
5418static struct dc_sink *
5419create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5420{
2e0ac3d6 5421 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5422 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5423 sink_init_data.link = aconnector->dc_link;
5424 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5425
5426 sink = dc_sink_create(&sink_init_data);
423788c7 5427 if (!sink) {
2e0ac3d6 5428 DRM_ERROR("Failed to create sink!\n");
aed15309 5429 return NULL;
423788c7 5430 }
2e0ac3d6 5431 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5432
aed15309 5433 return sink;
2e0ac3d6
HW
5434}
5435
fa2123db
ML
5436static void set_multisync_trigger_params(
5437 struct dc_stream_state *stream)
5438{
ec372186
ML
5439 struct dc_stream_state *master = NULL;
5440
fa2123db 5441 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5442 master = stream->triggered_crtc_reset.event_source;
5443 stream->triggered_crtc_reset.event =
5444 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5445 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5446 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5447 }
5448}
5449
5450static void set_master_stream(struct dc_stream_state *stream_set[],
5451 int stream_count)
5452{
5453 int j, highest_rfr = 0, master_stream = 0;
5454
5455 for (j = 0; j < stream_count; j++) {
5456 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5457 int refresh_rate = 0;
5458
380604e2 5459 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5460 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5461 if (refresh_rate > highest_rfr) {
5462 highest_rfr = refresh_rate;
5463 master_stream = j;
5464 }
5465 }
5466 }
5467 for (j = 0; j < stream_count; j++) {
03736f4c 5468 if (stream_set[j])
fa2123db
ML
5469 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5470 }
5471}
5472
5473static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5474{
5475 int i = 0;
ec372186 5476 struct dc_stream_state *stream;
fa2123db
ML
5477
5478 if (context->stream_count < 2)
5479 return;
5480 for (i = 0; i < context->stream_count ; i++) {
5481 if (!context->streams[i])
5482 continue;
1f6010a9
DF
5483 /*
5484 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5485 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5486 * For now it's set to false
fa2123db 5487 */
fa2123db 5488 }
ec372186 5489
fa2123db 5490 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5491
5492 for (i = 0; i < context->stream_count ; i++) {
5493 stream = context->streams[i];
5494
5495 if (!stream)
5496 continue;
5497
5498 set_multisync_trigger_params(stream);
5499 }
fa2123db
ML
5500}
5501
5d945cbc
RS
5502/**
5503 * DOC: FreeSync Video
5504 *
5505 * When a userspace application wants to play a video, the content follows a
5506 * standard format definition that usually specifies the FPS for that format.
5507 * The below list illustrates some video format and the expected FPS,
5508 * respectively:
5509 *
5510 * - TV/NTSC (23.976 FPS)
5511 * - Cinema (24 FPS)
5512 * - TV/PAL (25 FPS)
5513 * - TV/NTSC (29.97 FPS)
5514 * - TV/NTSC (30 FPS)
5515 * - Cinema HFR (48 FPS)
5516 * - TV/PAL (50 FPS)
5517 * - Commonly used (60 FPS)
5518 * - Multiples of 24 (48,72,96 FPS)
5519 *
5520 * The list of standards video format is not huge and can be added to the
5521 * connector modeset list beforehand. With that, userspace can leverage
5522 * FreeSync to extends the front porch in order to attain the target refresh
5523 * rate. Such a switch will happen seamlessly, without screen blanking or
5524 * reprogramming of the output in any other way. If the userspace requests a
5525 * modesetting change compatible with FreeSync modes that only differ in the
5526 * refresh rate, DC will skip the full update and avoid blink during the
5527 * transition. For example, the video player can change the modesetting from
5528 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5529 * causing any display blink. This same concept can be applied to a mode
5530 * setting change.
5531 */
5532static struct drm_display_mode *
5533get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5534 bool use_probed_modes)
5535{
5536 struct drm_display_mode *m, *m_pref = NULL;
5537 u16 current_refresh, highest_refresh;
5538 struct list_head *list_head = use_probed_modes ?
5539 &aconnector->base.probed_modes :
5540 &aconnector->base.modes;
5541
5542 if (aconnector->freesync_vid_base.clock != 0)
5543 return &aconnector->freesync_vid_base;
5544
5545 /* Find the preferred mode */
5546 list_for_each_entry (m, list_head, head) {
5547 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5548 m_pref = m;
5549 break;
5550 }
5551 }
5552
5553 if (!m_pref) {
5554 /* Probably an EDID with no preferred mode. Fallback to first entry */
5555 m_pref = list_first_entry_or_null(
5556 &aconnector->base.modes, struct drm_display_mode, head);
5557 if (!m_pref) {
5558 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5559 return NULL;
5560 }
5561 }
5562
5563 highest_refresh = drm_mode_vrefresh(m_pref);
5564
5565 /*
5566 * Find the mode with highest refresh rate with same resolution.
5567 * For some monitors, preferred mode is not the mode with highest
5568 * supported refresh rate.
5569 */
5570 list_for_each_entry (m, list_head, head) {
5571 current_refresh = drm_mode_vrefresh(m);
5572
5573 if (m->hdisplay == m_pref->hdisplay &&
5574 m->vdisplay == m_pref->vdisplay &&
5575 highest_refresh < current_refresh) {
5576 highest_refresh = current_refresh;
5577 m_pref = m;
5578 }
5579 }
5580
5581 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5582 return m_pref;
5583}
5584
5585static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5586 struct amdgpu_dm_connector *aconnector)
5587{
5588 struct drm_display_mode *high_mode;
5589 int timing_diff;
5590
5591 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5592 if (!high_mode || !mode)
5593 return false;
5594
5595 timing_diff = high_mode->vtotal - mode->vtotal;
5596
5597 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5598 high_mode->hdisplay != mode->hdisplay ||
5599 high_mode->vdisplay != mode->vdisplay ||
5600 high_mode->hsync_start != mode->hsync_start ||
5601 high_mode->hsync_end != mode->hsync_end ||
5602 high_mode->htotal != mode->htotal ||
5603 high_mode->hskew != mode->hskew ||
5604 high_mode->vscan != mode->vscan ||
5605 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5606 high_mode->vsync_end - mode->vsync_end != timing_diff)
5607 return false;
5608 else
5609 return true;
5610}
5611
ea2be5c0 5612#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2 5613static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5d945cbc
RS
5614 struct dc_sink *sink, struct dc_stream_state *stream,
5615 struct dsc_dec_dpcd_caps *dsc_caps)
998b7ad2
FZ
5616{
5617 stream->timing.flags.DSC = 0;
63ad5371 5618 dsc_caps->is_dsc_supported = false;
998b7ad2 5619
2665f63a 5620 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5d945cbc 5621 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
5622 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5623 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5624 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5625 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5626 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5627 dsc_caps);
998b7ad2
FZ
5628 }
5629}
5630
5d945cbc 5631
2665f63a
ML
5632static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5633 struct dc_sink *sink, struct dc_stream_state *stream,
5634 struct dsc_dec_dpcd_caps *dsc_caps,
5635 uint32_t max_dsc_target_bpp_limit_override)
5636{
5637 const struct dc_link_settings *verified_link_cap = NULL;
5638 uint32_t link_bw_in_kbps;
5639 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5640 struct dc *dc = sink->ctx->dc;
5641 struct dc_dsc_bw_range bw_range = {0};
5642 struct dc_dsc_config dsc_cfg = {0};
5643
5644 verified_link_cap = dc_link_get_link_cap(stream->link);
5645 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5646 edp_min_bpp_x16 = 8 * 16;
5647 edp_max_bpp_x16 = 8 * 16;
5648
5649 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5650 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5651
5652 if (edp_max_bpp_x16 < edp_min_bpp_x16)
5653 edp_min_bpp_x16 = edp_max_bpp_x16;
5654
5655 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5656 dc->debug.dsc_min_slice_height_override,
5657 edp_min_bpp_x16, edp_max_bpp_x16,
5658 dsc_caps,
5659 &stream->timing,
5660 &bw_range)) {
5661
5662 if (bw_range.max_kbps < link_bw_in_kbps) {
5663 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5664 dsc_caps,
5665 dc->debug.dsc_min_slice_height_override,
5666 max_dsc_target_bpp_limit_override,
5667 0,
5668 &stream->timing,
5669 &dsc_cfg)) {
5670 stream->timing.dsc_cfg = dsc_cfg;
5671 stream->timing.flags.DSC = 1;
5672 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5673 }
5674 return;
5675 }
5676 }
5677
5678 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5679 dsc_caps,
5680 dc->debug.dsc_min_slice_height_override,
5681 max_dsc_target_bpp_limit_override,
5682 link_bw_in_kbps,
5683 &stream->timing,
5684 &dsc_cfg)) {
5685 stream->timing.dsc_cfg = dsc_cfg;
5686 stream->timing.flags.DSC = 1;
5687 }
5688}
5689
5d945cbc 5690
998b7ad2 5691static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5d945cbc
RS
5692 struct dc_sink *sink, struct dc_stream_state *stream,
5693 struct dsc_dec_dpcd_caps *dsc_caps)
998b7ad2
FZ
5694{
5695 struct drm_connector *drm_connector = &aconnector->base;
5696 uint32_t link_bandwidth_kbps;
f1c1a982 5697 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 5698 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
5699 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5700 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
5701
5702 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5703 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
5704 if (stream->link && stream->link->local_sink)
5705 max_dsc_target_bpp_limit_override =
5706 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
de7cc1b4 5707
998b7ad2
FZ
5708 /* Set DSC policy according to dsc_clock_en */
5709 dc_dsc_policy_set_enable_dsc_when_not_needed(
5710 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5711
2665f63a
ML
5712 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
5713 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5714
5715 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5716
5717 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
5718 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5719 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
5720 dsc_caps,
5721 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 5722 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
5723 link_bandwidth_kbps,
5724 &stream->timing,
5725 &stream->timing.dsc_cfg)) {
50b1f44e 5726 stream->timing.flags.DSC = 1;
5d945cbc 5727 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
50b1f44e
FZ
5728 }
5729 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5730 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5731 max_supported_bw_in_kbps = link_bandwidth_kbps;
5732 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5733
5734 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5735 max_supported_bw_in_kbps > 0 &&
5736 dsc_max_supported_bw_in_kbps > 0)
5737 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5738 dsc_caps,
5739 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5740 max_dsc_target_bpp_limit_override,
5741 dsc_max_supported_bw_in_kbps,
5742 &stream->timing,
5743 &stream->timing.dsc_cfg)) {
5744 stream->timing.flags.DSC = 1;
5745 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5746 __func__, drm_connector->name);
5747 }
998b7ad2
FZ
5748 }
5749 }
5750
5751 /* Overwrite the stream flag if DSC is enabled through debugfs */
5752 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5753 stream->timing.flags.DSC = 1;
5754
5d945cbc
RS
5755 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5756 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
a85ba005 5757
5d945cbc
RS
5758 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5759 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
a85ba005 5760
5d945cbc
RS
5761 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5762 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
a85ba005 5763}
5d945cbc 5764#endif /* CONFIG_DRM_AMD_DC_DCN */
a85ba005 5765
f11d9373 5766static struct dc_stream_state *
3ee6b26b
AD
5767create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5768 const struct drm_display_mode *drm_mode,
b333730d 5769 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5770 const struct dc_stream_state *old_stream,
5771 int requested_bpc)
e7b07cee
HW
5772{
5773 struct drm_display_mode *preferred_mode = NULL;
391ef035 5774 struct drm_connector *drm_connector;
42ba01fc
NK
5775 const struct drm_connector_state *con_state =
5776 dm_state ? &dm_state->base : NULL;
0971c40e 5777 struct dc_stream_state *stream = NULL;
e7b07cee 5778 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
5779 struct drm_display_mode saved_mode;
5780 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5781 bool native_mode_found = false;
b0781603
NK
5782 bool recalculate_timing = false;
5783 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5784 int mode_refresh;
58124bf8 5785 int preferred_refresh = 0;
defeb878 5786#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 5787 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 5788#endif
5d945cbc 5789
aed15309 5790 struct dc_sink *sink = NULL;
a85ba005
NC
5791
5792 memset(&saved_mode, 0, sizeof(saved_mode));
5793
b830ebc9 5794 if (aconnector == NULL) {
e7b07cee 5795 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5796 return stream;
e7b07cee
HW
5797 }
5798
e7b07cee 5799 drm_connector = &aconnector->base;
2e0ac3d6 5800
f4ac176e 5801 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5802 sink = create_fake_sink(aconnector);
5803 if (!sink)
5804 return stream;
aed15309
ML
5805 } else {
5806 sink = aconnector->dc_sink;
dcd5fb82 5807 dc_sink_retain(sink);
f4ac176e 5808 }
2e0ac3d6 5809
aed15309 5810 stream = dc_create_stream_for_sink(sink);
4562236b 5811
b830ebc9 5812 if (stream == NULL) {
e7b07cee 5813 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5814 goto finish;
e7b07cee
HW
5815 }
5816
ceb3dbb4
JL
5817 stream->dm_stream_context = aconnector;
5818
4a36fcba
WL
5819 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5820 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5821
e7b07cee
HW
5822 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5823 /* Search for preferred mode */
5824 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5825 native_mode_found = true;
5826 break;
5827 }
5828 }
5829 if (!native_mode_found)
5830 preferred_mode = list_first_entry_or_null(
5831 &aconnector->base.modes,
5832 struct drm_display_mode,
5833 head);
5834
b333730d
BL
5835 mode_refresh = drm_mode_vrefresh(&mode);
5836
b830ebc9 5837 if (preferred_mode == NULL) {
1f6010a9
DF
5838 /*
5839 * This may not be an error, the use case is when we have no
e7b07cee
HW
5840 * usermode calls to reset and set mode upon hotplug. In this
5841 * case, we call set mode ourselves to restore the previous mode
5842 * and the modelist may not be filled in in time.
5843 */
f1ad2f5e 5844 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5845 } else {
de05abe6 5846 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
a85ba005
NC
5847 if (recalculate_timing) {
5848 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
426c89aa
VS
5849 drm_mode_copy(&saved_mode, &mode);
5850 drm_mode_copy(&mode, freesync_mode);
a85ba005
NC
5851 } else {
5852 decide_crtc_timing_for_drm_display_mode(
5d945cbc 5853 &mode, preferred_mode, scale);
a85ba005 5854
b0781603
NK
5855 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5856 }
e7b07cee
HW
5857 }
5858
a85ba005
NC
5859 if (recalculate_timing)
5860 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 5861 else if (!dm_state)
f783577c
JFZ
5862 drm_mode_set_crtcinfo(&mode, 0);
5863
5d945cbc 5864 /*
b333730d
BL
5865 * If scaling is enabled and refresh rate didn't change
5866 * we copy the vic and polarities of the old timings
5867 */
b0781603 5868 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
5869 fill_stream_properties_from_drm_display_mode(
5870 stream, &mode, &aconnector->base, con_state, NULL,
5871 requested_bpc);
b333730d 5872 else
a85ba005
NC
5873 fill_stream_properties_from_drm_display_mode(
5874 stream, &mode, &aconnector->base, con_state, old_stream,
5875 requested_bpc);
b333730d 5876
defeb878 5877#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5878 /* SST DSC determination policy */
5879 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5880 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5881 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
5882#endif
5883
e7b07cee
HW
5884 update_stream_scaling_settings(&mode, dm_state, stream);
5885
5886 fill_audio_info(
5887 &stream->audio_info,
5888 drm_connector,
aed15309 5889 sink);
e7b07cee 5890
ceb3dbb4 5891 update_stream_signal(stream, sink);
9182b4cb 5892
d832fc3b 5893 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5894 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5895
8a488f5d
RL
5896 if (stream->link->psr_settings.psr_feature_enabled) {
5897 //
5898 // should decide stream support vsc sdp colorimetry capability
5899 // before building vsc info packet
5900 //
5901 stream->use_vsc_sdp_for_colorimetry = false;
5902 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5903 stream->use_vsc_sdp_for_colorimetry =
5904 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5905 } else {
5906 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5907 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5908 }
0c5a0bbb 5909 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
1a365683
RL
5910 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5911
8c322309 5912 }
aed15309 5913finish:
dcd5fb82 5914 dc_sink_release(sink);
9e3efe3e 5915
e7b07cee
HW
5916 return stream;
5917}
5918
7578ecda 5919static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5920{
5921 drm_crtc_cleanup(crtc);
5922 kfree(crtc);
5923}
5924
5925static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5926 struct drm_crtc_state *state)
e7b07cee
HW
5927{
5928 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5929
5930 /* TODO Destroy dc_stream objects are stream object is flattened */
5931 if (cur->stream)
5932 dc_stream_release(cur->stream);
5933
5934
5935 __drm_atomic_helper_crtc_destroy_state(state);
5936
5937
5938 kfree(state);
5939}
5940
5941static void dm_crtc_reset_state(struct drm_crtc *crtc)
5942{
5943 struct dm_crtc_state *state;
5944
5945 if (crtc->state)
5946 dm_crtc_destroy_state(crtc, crtc->state);
5947
5948 state = kzalloc(sizeof(*state), GFP_KERNEL);
5949 if (WARN_ON(!state))
5950 return;
5951
1f8a52ec 5952 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5953}
5954
5955static struct drm_crtc_state *
5956dm_crtc_duplicate_state(struct drm_crtc *crtc)
5957{
5958 struct dm_crtc_state *state, *cur;
5959
5960 cur = to_dm_crtc_state(crtc->state);
5961
5962 if (WARN_ON(!crtc->state))
5963 return NULL;
5964
2004f45e 5965 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5966 if (!state)
5967 return NULL;
e7b07cee
HW
5968
5969 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5970
5971 if (cur->stream) {
5972 state->stream = cur->stream;
5973 dc_stream_retain(state->stream);
5974 }
5975
d6ef9b41 5976 state->active_planes = cur->active_planes;
98e6436d 5977 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5978 state->abm_level = cur->abm_level;
bb47de73
NK
5979 state->vrr_supported = cur->vrr_supported;
5980 state->freesync_config = cur->freesync_config;
cf020d49
NK
5981 state->cm_has_degamma = cur->cm_has_degamma;
5982 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
7cc191ee 5983 state->mpo_requested = cur->mpo_requested;
e7b07cee
HW
5984 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5985
5986 return &state->base;
5987}
5988
c0c87382 5989#ifdef CONFIG_DEBUG_FS
e69231c4 5990static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
5991{
5992 crtc_debugfs_init(crtc);
5993
5994 return 0;
5995}
c0c87382 5996#endif
86bc2219 5997
d2574c33
MK
5998static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5999{
6000 enum dc_irq_source irq_source;
6001 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6002 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6003 int rc;
6004
6005 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6006
6007 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6008
4711c033
LT
6009 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6010 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6011 return rc;
6012}
589d2739
HW
6013
6014static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6015{
6016 enum dc_irq_source irq_source;
6017 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6018 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6019 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
71338cb4 6020 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6021 struct vblank_control_work *work;
d2574c33
MK
6022 int rc = 0;
6023
6024 if (enable) {
6025 /* vblank irq on -> Only need vupdate irq in vrr mode */
6026 if (amdgpu_dm_vrr_active(acrtc_state))
6027 rc = dm_set_vupdate_irq(crtc, true);
6028 } else {
6029 /* vblank irq off -> vupdate irq off */
6030 rc = dm_set_vupdate_irq(crtc, false);
6031 }
6032
6033 if (rc)
6034 return rc;
589d2739
HW
6035
6036 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6037
6038 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6039 return -EBUSY;
6040
98ab5f35
BL
6041 if (amdgpu_in_reset(adev))
6042 return 0;
6043
06dd1888
NK
6044 if (dm->vblank_control_workqueue) {
6045 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6046 if (!work)
6047 return -ENOMEM;
09a5df6c 6048
06dd1888
NK
6049 INIT_WORK(&work->work, vblank_control_worker);
6050 work->dm = dm;
6051 work->acrtc = acrtc;
6052 work->enable = enable;
09a5df6c 6053
06dd1888
NK
6054 if (acrtc_state->stream) {
6055 dc_stream_retain(acrtc_state->stream);
6056 work->stream = acrtc_state->stream;
6057 }
58aa1c50 6058
06dd1888
NK
6059 queue_work(dm->vblank_control_workqueue, &work->work);
6060 }
71338cb4 6061
71338cb4 6062 return 0;
589d2739
HW
6063}
6064
6065static int dm_enable_vblank(struct drm_crtc *crtc)
6066{
6067 return dm_set_vblank(crtc, true);
6068}
6069
6070static void dm_disable_vblank(struct drm_crtc *crtc)
6071{
6072 dm_set_vblank(crtc, false);
6073}
6074
faf26f2b 6075/* Implemented only the options currently available for the driver */
e7b07cee
HW
6076static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6077 .reset = dm_crtc_reset_state,
6078 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6079 .set_config = drm_atomic_helper_set_config,
6080 .page_flip = drm_atomic_helper_page_flip,
6081 .atomic_duplicate_state = dm_crtc_duplicate_state,
6082 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6083 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6084 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6085 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6086 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6087 .enable_vblank = dm_enable_vblank,
6088 .disable_vblank = dm_disable_vblank,
e3eff4b5 6089 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
c0c87382 6090#if defined(CONFIG_DEBUG_FS)
86bc2219 6091 .late_register = amdgpu_dm_crtc_late_register,
c0c87382 6092#endif
e7b07cee
HW
6093};
6094
6095static enum drm_connector_status
6096amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6097{
6098 bool connected;
c84dec2f 6099 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6100
1f6010a9
DF
6101 /*
6102 * Notes:
e7b07cee
HW
6103 * 1. This interface is NOT called in context of HPD irq.
6104 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6105 * makes it a bad place for *any* MST-related activity.
6106 */
e7b07cee 6107
8580d60b
HW
6108 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6109 !aconnector->fake_enable)
e7b07cee
HW
6110 connected = (aconnector->dc_sink != NULL);
6111 else
5d945cbc
RS
6112 connected = (aconnector->base.force == DRM_FORCE_ON ||
6113 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
e7b07cee 6114
0f877894
OV
6115 update_subconnector_property(aconnector);
6116
e7b07cee
HW
6117 return (connected ? connector_status_connected :
6118 connector_status_disconnected);
6119}
6120
3ee6b26b
AD
6121int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6122 struct drm_connector_state *connector_state,
6123 struct drm_property *property,
6124 uint64_t val)
e7b07cee
HW
6125{
6126 struct drm_device *dev = connector->dev;
1348969a 6127 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6128 struct dm_connector_state *dm_old_state =
6129 to_dm_connector_state(connector->state);
6130 struct dm_connector_state *dm_new_state =
6131 to_dm_connector_state(connector_state);
6132
6133 int ret = -EINVAL;
6134
6135 if (property == dev->mode_config.scaling_mode_property) {
6136 enum amdgpu_rmx_type rmx_type;
6137
6138 switch (val) {
6139 case DRM_MODE_SCALE_CENTER:
6140 rmx_type = RMX_CENTER;
6141 break;
6142 case DRM_MODE_SCALE_ASPECT:
6143 rmx_type = RMX_ASPECT;
6144 break;
6145 case DRM_MODE_SCALE_FULLSCREEN:
6146 rmx_type = RMX_FULL;
6147 break;
6148 case DRM_MODE_SCALE_NONE:
6149 default:
6150 rmx_type = RMX_OFF;
6151 break;
6152 }
6153
6154 if (dm_old_state->scaling == rmx_type)
6155 return 0;
6156
6157 dm_new_state->scaling = rmx_type;
6158 ret = 0;
6159 } else if (property == adev->mode_info.underscan_hborder_property) {
6160 dm_new_state->underscan_hborder = val;
6161 ret = 0;
6162 } else if (property == adev->mode_info.underscan_vborder_property) {
6163 dm_new_state->underscan_vborder = val;
6164 ret = 0;
6165 } else if (property == adev->mode_info.underscan_property) {
6166 dm_new_state->underscan_enable = val;
6167 ret = 0;
c1ee92f9
DF
6168 } else if (property == adev->mode_info.abm_level_property) {
6169 dm_new_state->abm_level = val;
6170 ret = 0;
e7b07cee
HW
6171 }
6172
6173 return ret;
6174}
6175
3ee6b26b
AD
6176int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6177 const struct drm_connector_state *state,
6178 struct drm_property *property,
6179 uint64_t *val)
e7b07cee
HW
6180{
6181 struct drm_device *dev = connector->dev;
1348969a 6182 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6183 struct dm_connector_state *dm_state =
6184 to_dm_connector_state(state);
6185 int ret = -EINVAL;
6186
6187 if (property == dev->mode_config.scaling_mode_property) {
6188 switch (dm_state->scaling) {
6189 case RMX_CENTER:
6190 *val = DRM_MODE_SCALE_CENTER;
6191 break;
6192 case RMX_ASPECT:
6193 *val = DRM_MODE_SCALE_ASPECT;
6194 break;
6195 case RMX_FULL:
6196 *val = DRM_MODE_SCALE_FULLSCREEN;
6197 break;
6198 case RMX_OFF:
6199 default:
6200 *val = DRM_MODE_SCALE_NONE;
6201 break;
6202 }
6203 ret = 0;
6204 } else if (property == adev->mode_info.underscan_hborder_property) {
6205 *val = dm_state->underscan_hborder;
6206 ret = 0;
6207 } else if (property == adev->mode_info.underscan_vborder_property) {
6208 *val = dm_state->underscan_vborder;
6209 ret = 0;
6210 } else if (property == adev->mode_info.underscan_property) {
6211 *val = dm_state->underscan_enable;
6212 ret = 0;
c1ee92f9
DF
6213 } else if (property == adev->mode_info.abm_level_property) {
6214 *val = dm_state->abm_level;
6215 ret = 0;
e7b07cee 6216 }
c1ee92f9 6217
e7b07cee
HW
6218 return ret;
6219}
6220
526c654a
ED
6221static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6222{
6223 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6224
6225 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6226}
6227
7578ecda 6228static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6229{
c84dec2f 6230 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6231 const struct dc_link *link = aconnector->dc_link;
1348969a 6232 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6233 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6234 int i;
ada8ce15 6235
5dff80bd 6236 /*
5d945cbc 6237 * Call only if mst_mgr was initialized before since it's not done
5dff80bd
AG
6238 * for all connector types.
6239 */
6240 if (aconnector->mst_mgr.dev)
6241 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6242
5d945cbc
RS
6243#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6244 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6245 for (i = 0; i < dm->num_of_edps; i++) {
6246 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6247 backlight_device_unregister(dm->backlight_dev[i]);
6248 dm->backlight_dev[i] = NULL;
6249 }
e7b07cee 6250 }
5d945cbc 6251#endif
dcd5fb82
MF
6252
6253 if (aconnector->dc_em_sink)
6254 dc_sink_release(aconnector->dc_em_sink);
6255 aconnector->dc_em_sink = NULL;
6256 if (aconnector->dc_sink)
6257 dc_sink_release(aconnector->dc_sink);
6258 aconnector->dc_sink = NULL;
6259
e86e8947 6260 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6261 drm_connector_unregister(connector);
6262 drm_connector_cleanup(connector);
526c654a
ED
6263 if (aconnector->i2c) {
6264 i2c_del_adapter(&aconnector->i2c->base);
6265 kfree(aconnector->i2c);
6266 }
7daec99f 6267 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6268
e7b07cee
HW
6269 kfree(connector);
6270}
6271
6272void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6273{
6274 struct dm_connector_state *state =
6275 to_dm_connector_state(connector->state);
6276
df099b9b
LSL
6277 if (connector->state)
6278 __drm_atomic_helper_connector_destroy_state(connector->state);
6279
e7b07cee
HW
6280 kfree(state);
6281
6282 state = kzalloc(sizeof(*state), GFP_KERNEL);
6283
6284 if (state) {
6285 state->scaling = RMX_OFF;
6286 state->underscan_enable = false;
6287 state->underscan_hborder = 0;
6288 state->underscan_vborder = 0;
01933ba4 6289 state->base.max_requested_bpc = 8;
3261e013
ML
6290 state->vcpi_slots = 0;
6291 state->pbn = 0;
5d945cbc 6292
c3e50f89
NK
6293 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6294 state->abm_level = amdgpu_dm_abm_level;
6295
df099b9b 6296 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6297 }
6298}
6299
3ee6b26b
AD
6300struct drm_connector_state *
6301amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6302{
6303 struct dm_connector_state *state =
6304 to_dm_connector_state(connector->state);
6305
6306 struct dm_connector_state *new_state =
6307 kmemdup(state, sizeof(*state), GFP_KERNEL);
6308
98e6436d
AK
6309 if (!new_state)
6310 return NULL;
e7b07cee 6311
98e6436d
AK
6312 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6313
6314 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6315 new_state->abm_level = state->abm_level;
922454c2
NK
6316 new_state->scaling = state->scaling;
6317 new_state->underscan_enable = state->underscan_enable;
6318 new_state->underscan_hborder = state->underscan_hborder;
6319 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6320 new_state->vcpi_slots = state->vcpi_slots;
6321 new_state->pbn = state->pbn;
98e6436d 6322 return &new_state->base;
e7b07cee
HW
6323}
6324
14f04fa4
AD
6325static int
6326amdgpu_dm_connector_late_register(struct drm_connector *connector)
6327{
6328 struct amdgpu_dm_connector *amdgpu_dm_connector =
6329 to_amdgpu_dm_connector(connector);
00a8037e 6330 int r;
14f04fa4 6331
00a8037e
AD
6332 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6333 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6334 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6335 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6336 if (r)
6337 return r;
6338 }
6339
6340#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6341 connector_debugfs_init(amdgpu_dm_connector);
6342#endif
6343
6344 return 0;
6345}
6346
e7b07cee
HW
6347static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6348 .reset = amdgpu_dm_connector_funcs_reset,
6349 .detect = amdgpu_dm_connector_detect,
6350 .fill_modes = drm_helper_probe_single_connector_modes,
6351 .destroy = amdgpu_dm_connector_destroy,
6352 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6353 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6354 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6355 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6356 .late_register = amdgpu_dm_connector_late_register,
526c654a 6357 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6358};
6359
e7b07cee
HW
6360static int get_modes(struct drm_connector *connector)
6361{
6362 return amdgpu_dm_connector_get_modes(connector);
6363}
6364
c84dec2f 6365static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6366{
6367 struct dc_sink_init_data init_params = {
6368 .link = aconnector->dc_link,
6369 .sink_signal = SIGNAL_TYPE_VIRTUAL
6370 };
70e8ffc5 6371 struct edid *edid;
e7b07cee 6372
a89ff457 6373 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6374 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6375 aconnector->base.name);
6376
6377 aconnector->base.force = DRM_FORCE_OFF;
6378 aconnector->base.override_edid = false;
6379 return;
6380 }
6381
70e8ffc5
HW
6382 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6383
e7b07cee
HW
6384 aconnector->edid = edid;
6385
6386 aconnector->dc_em_sink = dc_link_add_remote_sink(
6387 aconnector->dc_link,
6388 (uint8_t *)edid,
6389 (edid->extensions + 1) * EDID_LENGTH,
6390 &init_params);
6391
dcd5fb82 6392 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6393 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6394 aconnector->dc_link->local_sink :
6395 aconnector->dc_em_sink;
dcd5fb82
MF
6396 dc_sink_retain(aconnector->dc_sink);
6397 }
e7b07cee
HW
6398}
6399
c84dec2f 6400static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6401{
6402 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6403
1f6010a9
DF
6404 /*
6405 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6406 * Those settings have to be != 0 to get initial modeset
6407 */
6408 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6409 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6410 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6411 }
6412
6413
6414 aconnector->base.override_edid = true;
6415 create_eml_sink(aconnector);
6416}
6417
17ce8a69 6418struct dc_stream_state *
cbd14ae7
SW
6419create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6420 const struct drm_display_mode *drm_mode,
6421 const struct dm_connector_state *dm_state,
6422 const struct dc_stream_state *old_stream)
6423{
6424 struct drm_connector *connector = &aconnector->base;
1348969a 6425 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6426 struct dc_stream_state *stream;
4b7da34b
SW
6427 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6428 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6429 enum dc_status dc_result = DC_OK;
6430
6431 do {
6432 stream = create_stream_for_sink(aconnector, drm_mode,
6433 dm_state, old_stream,
6434 requested_bpc);
6435 if (stream == NULL) {
6436 DRM_ERROR("Failed to create stream for sink!\n");
6437 break;
6438 }
6439
e9a7d236
RS
6440 dc_result = dc_validate_stream(adev->dm.dc, stream);
6441 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
f04d275d 6442 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6443
cbd14ae7 6444 if (dc_result != DC_OK) {
74a16675 6445 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6446 drm_mode->hdisplay,
6447 drm_mode->vdisplay,
6448 drm_mode->clock,
74a16675
RS
6449 dc_result,
6450 dc_status_to_str(dc_result));
cbd14ae7
SW
6451
6452 dc_stream_release(stream);
6453 stream = NULL;
6454 requested_bpc -= 2; /* lower bpc to retry validation */
6455 }
6456
6457 } while (stream == NULL && requested_bpc >= 6);
6458
68eb3ae3
WS
6459 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6460 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6461
6462 aconnector->force_yuv420_output = true;
6463 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6464 dm_state, old_stream);
6465 aconnector->force_yuv420_output = false;
6466 }
6467
cbd14ae7
SW
6468 return stream;
6469}
6470
ba9ca088 6471enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6472 struct drm_display_mode *mode)
e7b07cee
HW
6473{
6474 int result = MODE_ERROR;
6475 struct dc_sink *dc_sink;
e7b07cee 6476 /* TODO: Unhardcode stream count */
0971c40e 6477 struct dc_stream_state *stream;
c84dec2f 6478 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6479
6480 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6481 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6482 return result;
6483
1f6010a9
DF
6484 /*
6485 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6486 * EDID mgmt
6487 */
6488 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6489 !aconnector->dc_em_sink)
6490 handle_edid_mgmt(aconnector);
6491
c84dec2f 6492 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6493
ad975f44
VL
6494 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6495 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6496 DRM_ERROR("dc_sink is NULL!\n");
6497 goto fail;
6498 }
6499
cbd14ae7
SW
6500 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6501 if (stream) {
6502 dc_stream_release(stream);
e7b07cee 6503 result = MODE_OK;
cbd14ae7 6504 }
e7b07cee
HW
6505
6506fail:
6507 /* TODO: error handling*/
6508 return result;
6509}
6510
88694af9
NK
6511static int fill_hdr_info_packet(const struct drm_connector_state *state,
6512 struct dc_info_packet *out)
6513{
6514 struct hdmi_drm_infoframe frame;
6515 unsigned char buf[30]; /* 26 + 4 */
6516 ssize_t len;
6517 int ret, i;
6518
6519 memset(out, 0, sizeof(*out));
6520
6521 if (!state->hdr_output_metadata)
6522 return 0;
6523
6524 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6525 if (ret)
6526 return ret;
6527
6528 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6529 if (len < 0)
6530 return (int)len;
6531
6532 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6533 if (len != 30)
6534 return -EINVAL;
6535
6536 /* Prepare the infopacket for DC. */
6537 switch (state->connector->connector_type) {
6538 case DRM_MODE_CONNECTOR_HDMIA:
6539 out->hb0 = 0x87; /* type */
6540 out->hb1 = 0x01; /* version */
6541 out->hb2 = 0x1A; /* length */
6542 out->sb[0] = buf[3]; /* checksum */
6543 i = 1;
6544 break;
6545
6546 case DRM_MODE_CONNECTOR_DisplayPort:
6547 case DRM_MODE_CONNECTOR_eDP:
6548 out->hb0 = 0x00; /* sdp id, zero */
6549 out->hb1 = 0x87; /* type */
6550 out->hb2 = 0x1D; /* payload len - 1 */
6551 out->hb3 = (0x13 << 2); /* sdp version */
6552 out->sb[0] = 0x01; /* version */
6553 out->sb[1] = 0x1A; /* length */
6554 i = 2;
6555 break;
6556
6557 default:
6558 return -EINVAL;
6559 }
6560
6561 memcpy(&out->sb[i], &buf[4], 26);
6562 out->valid = true;
6563
6564 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6565 sizeof(out->sb), false);
6566
6567 return 0;
6568}
6569
88694af9
NK
6570static int
6571amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6572 struct drm_atomic_state *state)
88694af9 6573{
51e857af
SP
6574 struct drm_connector_state *new_con_state =
6575 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6576 struct drm_connector_state *old_con_state =
6577 drm_atomic_get_old_connector_state(state, conn);
6578 struct drm_crtc *crtc = new_con_state->crtc;
6579 struct drm_crtc_state *new_crtc_state;
6580 int ret;
6581
e8a98235
RS
6582 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6583
88694af9
NK
6584 if (!crtc)
6585 return 0;
6586
72921cdf 6587 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6588 struct dc_info_packet hdr_infopacket;
6589
6590 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6591 if (ret)
6592 return ret;
6593
6594 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6595 if (IS_ERR(new_crtc_state))
6596 return PTR_ERR(new_crtc_state);
6597
6598 /*
6599 * DC considers the stream backends changed if the
6600 * static metadata changes. Forcing the modeset also
6601 * gives a simple way for userspace to switch from
b232d4ed
NK
6602 * 8bpc to 10bpc when setting the metadata to enter
6603 * or exit HDR.
6604 *
6605 * Changing the static metadata after it's been
6606 * set is permissible, however. So only force a
6607 * modeset if we're entering or exiting HDR.
88694af9 6608 */
b232d4ed
NK
6609 new_crtc_state->mode_changed =
6610 !old_con_state->hdr_output_metadata ||
6611 !new_con_state->hdr_output_metadata;
88694af9
NK
6612 }
6613
6614 return 0;
6615}
6616
e7b07cee
HW
6617static const struct drm_connector_helper_funcs
6618amdgpu_dm_connector_helper_funcs = {
6619 /*
1f6010a9 6620 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6621 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6622 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6623 * in get_modes call back, not just return the modes count
6624 */
e7b07cee
HW
6625 .get_modes = get_modes,
6626 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6627 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6628};
6629
6630static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6631{
6632}
6633
d6ef9b41 6634static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6635{
6636 struct drm_atomic_state *state = new_crtc_state->state;
6637 struct drm_plane *plane;
6638 int num_active = 0;
6639
6640 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6641 struct drm_plane_state *new_plane_state;
6642
6643 /* Cursor planes are "fake". */
6644 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6645 continue;
6646
6647 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6648
6649 if (!new_plane_state) {
6650 /*
6651 * The plane is enable on the CRTC and hasn't changed
6652 * state. This means that it previously passed
6653 * validation and is therefore enabled.
6654 */
6655 num_active += 1;
6656 continue;
6657 }
6658
6659 /* We need a framebuffer to be considered enabled. */
6660 num_active += (new_plane_state->fb != NULL);
6661 }
6662
d6ef9b41
NK
6663 return num_active;
6664}
6665
8fe684e9
NK
6666static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6667 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6668{
6669 struct dm_crtc_state *dm_new_crtc_state =
6670 to_dm_crtc_state(new_crtc_state);
6671
6672 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6673
6674 if (!dm_new_crtc_state->stream)
6675 return;
6676
6677 dm_new_crtc_state->active_planes =
6678 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6679}
6680
3ee6b26b 6681static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5d945cbc 6682 struct drm_atomic_state *state)
e7b07cee 6683{
29b77ad7 6684 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
5d945cbc 6685 crtc);
1348969a 6686 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6687 struct dc *dc = adev->dm.dc;
29b77ad7 6688 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6689 int ret = -EINVAL;
6690
5b8c5969 6691 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6692
29b77ad7 6693 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6694
bcd74374 6695 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
5d945cbc 6696 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
6697 return ret;
6698 }
6699
bc92c065 6700 /*
b836a274
MD
6701 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6702 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6703 * planes are disabled, which is not supported by the hardware. And there is legacy
6704 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6705 */
29b77ad7 6706 if (crtc_state->enable &&
5d945cbc 6707 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
ea9522f5 6708 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6709 return -EINVAL;
ea9522f5 6710 }
c14a005c 6711
b836a274
MD
6712 /* In some use cases, like reset, no stream is attached */
6713 if (!dm_crtc_state->stream)
6714 return 0;
6715
62c933f9 6716 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6717 return 0;
6718
ea9522f5 6719 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6720 return ret;
6721}
6722
3ee6b26b
AD
6723static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6724 const struct drm_display_mode *mode,
6725 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6726{
6727 return true;
6728}
6729
6730static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6731 .disable = dm_crtc_helper_disable,
6732 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6733 .mode_fixup = dm_crtc_helper_mode_fixup,
6734 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6735};
6736
6737static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6738{
6739
6740}
6741
f04d275d 6742int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
3261e013
ML
6743{
6744 switch (display_color_depth) {
5d945cbc
RS
6745 case COLOR_DEPTH_666:
6746 return 6;
6747 case COLOR_DEPTH_888:
6748 return 8;
6749 case COLOR_DEPTH_101010:
6750 return 10;
6751 case COLOR_DEPTH_121212:
6752 return 12;
6753 case COLOR_DEPTH_141414:
6754 return 14;
6755 case COLOR_DEPTH_161616:
6756 return 16;
6757 default:
6758 break;
6759 }
3261e013
ML
6760 return 0;
6761}
6762
3ee6b26b
AD
6763static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6764 struct drm_crtc_state *crtc_state,
6765 struct drm_connector_state *conn_state)
e7b07cee 6766{
3261e013
ML
6767 struct drm_atomic_state *state = crtc_state->state;
6768 struct drm_connector *connector = conn_state->connector;
6769 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6770 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6771 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6772 struct drm_dp_mst_topology_mgr *mst_mgr;
6773 struct drm_dp_mst_port *mst_port;
6774 enum dc_color_depth color_depth;
6775 int clock, bpp = 0;
1bc22f20 6776 bool is_y420 = false;
3261e013
ML
6777
6778 if (!aconnector->port || !aconnector->dc_sink)
6779 return 0;
6780
6781 mst_port = aconnector->port;
6782 mst_mgr = &aconnector->mst_port->mst_mgr;
6783
6784 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6785 return 0;
6786
6787 if (!state->duplicated) {
cbd14ae7 6788 int max_bpc = conn_state->max_requested_bpc;
1bc22f20 6789 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5d945cbc 6790 aconnector->force_yuv420_output;
cbd14ae7
SW
6791 color_depth = convert_color_depth_from_display_info(connector,
6792 is_y420,
6793 max_bpc);
3261e013
ML
6794 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6795 clock = adjusted_mode->clock;
dc48529f 6796 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6797 }
6798 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6799 mst_mgr,
6800 mst_port,
1c6c1cb5 6801 dm_new_connector_state->pbn,
03ca9600 6802 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6803 if (dm_new_connector_state->vcpi_slots < 0) {
6804 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6805 return dm_new_connector_state->vcpi_slots;
6806 }
e7b07cee
HW
6807 return 0;
6808}
6809
6810const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6811 .disable = dm_encoder_helper_disable,
6812 .atomic_check = dm_encoder_helper_atomic_check
6813};
6814
d9fe1a4c 6815#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 6816static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
6817 struct dc_state *dc_state,
6818 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
6819{
6820 struct dc_stream_state *stream = NULL;
6821 struct drm_connector *connector;
5760dcb9 6822 struct drm_connector_state *new_con_state;
29b9ba74
ML
6823 struct amdgpu_dm_connector *aconnector;
6824 struct dm_connector_state *dm_conn_state;
a550bb16
HW
6825 int i, j;
6826 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 6827
5760dcb9 6828 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
6829
6830 aconnector = to_amdgpu_dm_connector(connector);
6831
6832 if (!aconnector->port)
6833 continue;
6834
6835 if (!new_con_state || !new_con_state->crtc)
6836 continue;
6837
6838 dm_conn_state = to_dm_connector_state(new_con_state);
6839
6840 for (j = 0; j < dc_state->stream_count; j++) {
6841 stream = dc_state->streams[j];
6842 if (!stream)
6843 continue;
6844
5d945cbc 6845 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
29b9ba74
ML
6846 break;
6847
6848 stream = NULL;
6849 }
6850
6851 if (!stream)
6852 continue;
6853
29b9ba74 6854 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
6855 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6856 for (j = 0; j < dc_state->stream_count; j++) {
6857 if (vars[j].aconnector == aconnector) {
6858 pbn = vars[j].pbn;
6859 break;
6860 }
6861 }
6862
a550bb16
HW
6863 if (j == dc_state->stream_count)
6864 continue;
6865
6866 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6867
6868 if (stream->timing.flags.DSC != 1) {
6869 dm_conn_state->pbn = pbn;
6870 dm_conn_state->vcpi_slots = slot_num;
6871
6872 drm_dp_mst_atomic_enable_dsc(state,
6873 aconnector->port,
6874 dm_conn_state->pbn,
6875 0,
6876 false);
6877 continue;
6878 }
6879
29b9ba74
ML
6880 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6881 aconnector->port,
6882 pbn, pbn_div,
6883 true);
6884 if (vcpi < 0)
6885 return vcpi;
6886
6887 dm_conn_state->pbn = pbn;
6888 dm_conn_state->vcpi_slots = vcpi;
6889 }
6890 return 0;
6891}
d9fe1a4c 6892#endif
29b9ba74 6893
7578ecda
AD
6894static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6895 struct drm_plane *plane,
6896 uint32_t crtc_index)
e7b07cee
HW
6897{
6898 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6899 struct drm_plane *cursor_plane;
e7b07cee
HW
6900
6901 int res = -ENOMEM;
6902
6903 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6904 if (!cursor_plane)
6905 goto fail;
6906
f180b4bc 6907 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6908 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6909
6910 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6911 if (!acrtc)
6912 goto fail;
6913
6914 res = drm_crtc_init_with_planes(
6915 dm->ddev,
6916 &acrtc->base,
6917 plane,
f180b4bc 6918 cursor_plane,
e7b07cee
HW
6919 &amdgpu_dm_crtc_funcs, NULL);
6920
6921 if (res)
6922 goto fail;
6923
6924 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6925
96719c54
HW
6926 /* Create (reset) the plane state */
6927 if (acrtc->base.funcs->reset)
6928 acrtc->base.funcs->reset(&acrtc->base);
6929
e7b07cee
HW
6930 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6931 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6932
6933 acrtc->crtc_id = crtc_index;
6934 acrtc->base.enabled = false;
c37e2d29 6935 acrtc->otg_inst = -1;
e7b07cee
HW
6936
6937 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6938 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6939 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6940 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 6941
e7b07cee
HW
6942 return 0;
6943
6944fail:
b830ebc9
HW
6945 kfree(acrtc);
6946 kfree(cursor_plane);
e7b07cee
HW
6947 return res;
6948}
6949
6950
6951static int to_drm_connector_type(enum signal_type st)
6952{
6953 switch (st) {
6954 case SIGNAL_TYPE_HDMI_TYPE_A:
6955 return DRM_MODE_CONNECTOR_HDMIA;
6956 case SIGNAL_TYPE_EDP:
6957 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6958 case SIGNAL_TYPE_LVDS:
6959 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6960 case SIGNAL_TYPE_RGB:
6961 return DRM_MODE_CONNECTOR_VGA;
6962 case SIGNAL_TYPE_DISPLAY_PORT:
6963 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6964 return DRM_MODE_CONNECTOR_DisplayPort;
6965 case SIGNAL_TYPE_DVI_DUAL_LINK:
6966 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6967 return DRM_MODE_CONNECTOR_DVID;
6968 case SIGNAL_TYPE_VIRTUAL:
6969 return DRM_MODE_CONNECTOR_VIRTUAL;
6970
6971 default:
6972 return DRM_MODE_CONNECTOR_Unknown;
6973 }
6974}
6975
2b4c1c05
DV
6976static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6977{
62afb4ad
JRS
6978 struct drm_encoder *encoder;
6979
6980 /* There is only one encoder per connector */
6981 drm_connector_for_each_possible_encoder(connector, encoder)
6982 return encoder;
6983
6984 return NULL;
2b4c1c05
DV
6985}
6986
e7b07cee
HW
6987static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6988{
e7b07cee
HW
6989 struct drm_encoder *encoder;
6990 struct amdgpu_encoder *amdgpu_encoder;
6991
2b4c1c05 6992 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6993
6994 if (encoder == NULL)
6995 return;
6996
6997 amdgpu_encoder = to_amdgpu_encoder(encoder);
6998
6999 amdgpu_encoder->native_mode.clock = 0;
7000
7001 if (!list_empty(&connector->probed_modes)) {
7002 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7003
e7b07cee 7004 list_for_each_entry(preferred_mode,
b830ebc9
HW
7005 &connector->probed_modes,
7006 head) {
7007 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7008 amdgpu_encoder->native_mode = *preferred_mode;
7009
e7b07cee
HW
7010 break;
7011 }
7012
7013 }
7014}
7015
3ee6b26b
AD
7016static struct drm_display_mode *
7017amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7018 char *name,
7019 int hdisplay, int vdisplay)
e7b07cee
HW
7020{
7021 struct drm_device *dev = encoder->dev;
7022 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7023 struct drm_display_mode *mode = NULL;
7024 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7025
7026 mode = drm_mode_duplicate(dev, native_mode);
7027
b830ebc9 7028 if (mode == NULL)
e7b07cee
HW
7029 return NULL;
7030
7031 mode->hdisplay = hdisplay;
7032 mode->vdisplay = vdisplay;
7033 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7034 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7035
7036 return mode;
7037
7038}
7039
7040static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7041 struct drm_connector *connector)
e7b07cee
HW
7042{
7043 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7044 struct drm_display_mode *mode = NULL;
7045 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7046 struct amdgpu_dm_connector *amdgpu_dm_connector =
7047 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7048 int i;
7049 int n;
7050 struct mode_size {
7051 char name[DRM_DISPLAY_MODE_LEN];
7052 int w;
7053 int h;
b830ebc9 7054 } common_modes[] = {
e7b07cee
HW
7055 { "640x480", 640, 480},
7056 { "800x600", 800, 600},
7057 { "1024x768", 1024, 768},
7058 { "1280x720", 1280, 720},
7059 { "1280x800", 1280, 800},
7060 {"1280x1024", 1280, 1024},
7061 { "1440x900", 1440, 900},
7062 {"1680x1050", 1680, 1050},
7063 {"1600x1200", 1600, 1200},
7064 {"1920x1080", 1920, 1080},
7065 {"1920x1200", 1920, 1200}
7066 };
7067
b830ebc9 7068 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7069
7070 for (i = 0; i < n; i++) {
7071 struct drm_display_mode *curmode = NULL;
7072 bool mode_existed = false;
7073
7074 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7075 common_modes[i].h > native_mode->vdisplay ||
7076 (common_modes[i].w == native_mode->hdisplay &&
7077 common_modes[i].h == native_mode->vdisplay))
7078 continue;
e7b07cee
HW
7079
7080 list_for_each_entry(curmode, &connector->probed_modes, head) {
7081 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7082 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7083 mode_existed = true;
7084 break;
7085 }
7086 }
7087
7088 if (mode_existed)
7089 continue;
7090
7091 mode = amdgpu_dm_create_common_mode(encoder,
7092 common_modes[i].name, common_modes[i].w,
7093 common_modes[i].h);
588a7017
ZQ
7094 if (!mode)
7095 continue;
7096
e7b07cee 7097 drm_mode_probed_add(connector, mode);
c84dec2f 7098 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7099 }
7100}
7101
d77de788
SS
7102static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7103{
7104 struct drm_encoder *encoder;
7105 struct amdgpu_encoder *amdgpu_encoder;
7106 const struct drm_display_mode *native_mode;
7107
7108 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7109 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7110 return;
7111
7112 encoder = amdgpu_dm_connector_to_encoder(connector);
7113 if (!encoder)
7114 return;
7115
7116 amdgpu_encoder = to_amdgpu_encoder(encoder);
7117
7118 native_mode = &amdgpu_encoder->native_mode;
7119 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7120 return;
7121
7122 drm_connector_set_panel_orientation_with_quirk(connector,
7123 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7124 native_mode->hdisplay,
7125 native_mode->vdisplay);
7126}
7127
3ee6b26b
AD
7128static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7129 struct edid *edid)
e7b07cee 7130{
c84dec2f
HW
7131 struct amdgpu_dm_connector *amdgpu_dm_connector =
7132 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7133
7134 if (edid) {
7135 /* empty probed_modes */
7136 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7137 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7138 drm_add_edid_modes(connector, edid);
7139
f1e5e913
YMM
7140 /* sorting the probed modes before calling function
7141 * amdgpu_dm_get_native_mode() since EDID can have
7142 * more than one preferred mode. The modes that are
7143 * later in the probed mode list could be of higher
7144 * and preferred resolution. For example, 3840x2160
7145 * resolution in base EDID preferred timing and 4096x2160
7146 * preferred resolution in DID extension block later.
7147 */
7148 drm_mode_sort(&connector->probed_modes);
e7b07cee 7149 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7150
7151 /* Freesync capabilities are reset by calling
7152 * drm_add_edid_modes() and need to be
7153 * restored here.
7154 */
7155 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
7156
7157 amdgpu_set_panel_orientation(connector);
a8d8d3dc 7158 } else {
c84dec2f 7159 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7160 }
e7b07cee
HW
7161}
7162
a85ba005
NC
7163static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7164 struct drm_display_mode *mode)
7165{
7166 struct drm_display_mode *m;
7167
7168 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7169 if (drm_mode_equal(m, mode))
7170 return true;
7171 }
7172
7173 return false;
7174}
7175
7176static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7177{
7178 const struct drm_display_mode *m;
7179 struct drm_display_mode *new_mode;
7180 uint i;
7181 uint32_t new_modes_count = 0;
7182
7183 /* Standard FPS values
7184 *
12cdff6b
SC
7185 * 23.976 - TV/NTSC
7186 * 24 - Cinema
7187 * 25 - TV/PAL
7188 * 29.97 - TV/NTSC
7189 * 30 - TV/NTSC
7190 * 48 - Cinema HFR
7191 * 50 - TV/PAL
7192 * 60 - Commonly used
7193 * 48,72,96,120 - Multiples of 24
a85ba005 7194 */
9ce5ed6e
CIK
7195 static const uint32_t common_rates[] = {
7196 23976, 24000, 25000, 29970, 30000,
12cdff6b 7197 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 7198 };
a85ba005
NC
7199
7200 /*
7201 * Find mode with highest refresh rate with the same resolution
7202 * as the preferred mode. Some monitors report a preferred mode
7203 * with lower resolution than the highest refresh rate supported.
7204 */
7205
7206 m = get_highest_refresh_rate_mode(aconnector, true);
7207 if (!m)
7208 return 0;
7209
7210 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7211 uint64_t target_vtotal, target_vtotal_diff;
7212 uint64_t num, den;
7213
7214 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7215 continue;
7216
7217 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7218 common_rates[i] > aconnector->max_vfreq * 1000)
7219 continue;
7220
7221 num = (unsigned long long)m->clock * 1000 * 1000;
7222 den = common_rates[i] * (unsigned long long)m->htotal;
7223 target_vtotal = div_u64(num, den);
7224 target_vtotal_diff = target_vtotal - m->vtotal;
7225
7226 /* Check for illegal modes */
7227 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7228 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7229 m->vtotal + target_vtotal_diff < m->vsync_end)
7230 continue;
7231
7232 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7233 if (!new_mode)
7234 goto out;
7235
7236 new_mode->vtotal += (u16)target_vtotal_diff;
7237 new_mode->vsync_start += (u16)target_vtotal_diff;
7238 new_mode->vsync_end += (u16)target_vtotal_diff;
7239 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7240 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7241
7242 if (!is_duplicate_mode(aconnector, new_mode)) {
7243 drm_mode_probed_add(&aconnector->base, new_mode);
7244 new_modes_count += 1;
7245 } else
7246 drm_mode_destroy(aconnector->base.dev, new_mode);
7247 }
7248 out:
7249 return new_modes_count;
7250}
7251
7252static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7253 struct edid *edid)
7254{
7255 struct amdgpu_dm_connector *amdgpu_dm_connector =
7256 to_amdgpu_dm_connector(connector);
7257
de05abe6 7258 if (!edid)
a85ba005 7259 return;
fe8858bb 7260
a85ba005
NC
7261 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7262 amdgpu_dm_connector->num_modes +=
7263 add_fs_modes(amdgpu_dm_connector);
7264}
7265
7578ecda 7266static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7267{
c84dec2f
HW
7268 struct amdgpu_dm_connector *amdgpu_dm_connector =
7269 to_amdgpu_dm_connector(connector);
e7b07cee 7270 struct drm_encoder *encoder;
c84dec2f 7271 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7272
2b4c1c05 7273 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7274
5c0e6840 7275 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7276 amdgpu_dm_connector->num_modes =
7277 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7278 } else {
7279 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7280 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7281 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7282 }
3e332d3a 7283 amdgpu_dm_fbc_init(connector);
5099114b 7284
c84dec2f 7285 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7286}
7287
3ee6b26b
AD
7288void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7289 struct amdgpu_dm_connector *aconnector,
7290 int connector_type,
7291 struct dc_link *link,
7292 int link_index)
e7b07cee 7293{
1348969a 7294 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7295
f04bee34
NK
7296 /*
7297 * Some of the properties below require access to state, like bpc.
7298 * Allocate some default initial connector state with our reset helper.
7299 */
7300 if (aconnector->base.funcs->reset)
7301 aconnector->base.funcs->reset(&aconnector->base);
7302
e7b07cee
HW
7303 aconnector->connector_id = link_index;
7304 aconnector->dc_link = link;
7305 aconnector->base.interlace_allowed = false;
7306 aconnector->base.doublescan_allowed = false;
7307 aconnector->base.stereo_allowed = false;
7308 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7309 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7310 aconnector->audio_inst = -1;
e7b07cee
HW
7311 mutex_init(&aconnector->hpd_lock);
7312
1f6010a9
DF
7313 /*
7314 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7315 * which means HPD hot plug not supported
7316 */
e7b07cee
HW
7317 switch (connector_type) {
7318 case DRM_MODE_CONNECTOR_HDMIA:
7319 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7320 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7321 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7322 break;
7323 case DRM_MODE_CONNECTOR_DisplayPort:
7324 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
d715c9a2 7325 link->link_enc = link_enc_cfg_get_link_enc(link);
7b201d53 7326 ASSERT(link->link_enc);
f6e03f80
JS
7327 if (link->link_enc)
7328 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7329 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7330 break;
7331 case DRM_MODE_CONNECTOR_DVID:
7332 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7333 break;
7334 default:
7335 break;
7336 }
7337
7338 drm_object_attach_property(&aconnector->base.base,
7339 dm->ddev->mode_config.scaling_mode_property,
7340 DRM_MODE_SCALE_NONE);
7341
7342 drm_object_attach_property(&aconnector->base.base,
7343 adev->mode_info.underscan_property,
7344 UNDERSCAN_OFF);
7345 drm_object_attach_property(&aconnector->base.base,
7346 adev->mode_info.underscan_hborder_property,
7347 0);
7348 drm_object_attach_property(&aconnector->base.base,
7349 adev->mode_info.underscan_vborder_property,
7350 0);
1825fd34 7351
8c61b31e
JFZ
7352 if (!aconnector->mst_port)
7353 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7354
4a8ca46b
RL
7355 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7356 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7357 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7358
c1ee92f9 7359 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7360 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7361 drm_object_attach_property(&aconnector->base.base,
7362 adev->mode_info.abm_level_property, 0);
7363 }
bb47de73
NK
7364
7365 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7366 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7367 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 7368 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 7369
8c61b31e
JFZ
7370 if (!aconnector->mst_port)
7371 drm_connector_attach_vrr_capable_property(&aconnector->base);
7372
0c8620d6 7373#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7374 if (adev->dm.hdcp_workqueue)
53e108aa 7375 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7376#endif
bb47de73 7377 }
e7b07cee
HW
7378}
7379
7578ecda
AD
7380static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7381 struct i2c_msg *msgs, int num)
e7b07cee
HW
7382{
7383 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7384 struct ddc_service *ddc_service = i2c->ddc_service;
7385 struct i2c_command cmd;
7386 int i;
7387 int result = -EIO;
7388
b830ebc9 7389 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7390
7391 if (!cmd.payloads)
7392 return result;
7393
7394 cmd.number_of_payloads = num;
7395 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7396 cmd.speed = 100;
7397
7398 for (i = 0; i < num; i++) {
7399 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7400 cmd.payloads[i].address = msgs[i].addr;
7401 cmd.payloads[i].length = msgs[i].len;
7402 cmd.payloads[i].data = msgs[i].buf;
7403 }
7404
c85e6e54
DF
7405 if (dc_submit_i2c(
7406 ddc_service->ctx->dc,
22676bc5 7407 ddc_service->link->link_index,
e7b07cee
HW
7408 &cmd))
7409 result = num;
7410
7411 kfree(cmd.payloads);
7412 return result;
7413}
7414
7578ecda 7415static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7416{
7417 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7418}
7419
7420static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7421 .master_xfer = amdgpu_dm_i2c_xfer,
7422 .functionality = amdgpu_dm_i2c_func,
7423};
7424
3ee6b26b
AD
7425static struct amdgpu_i2c_adapter *
7426create_i2c(struct ddc_service *ddc_service,
7427 int link_index,
7428 int *res)
e7b07cee
HW
7429{
7430 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7431 struct amdgpu_i2c_adapter *i2c;
7432
b830ebc9 7433 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7434 if (!i2c)
7435 return NULL;
e7b07cee
HW
7436 i2c->base.owner = THIS_MODULE;
7437 i2c->base.class = I2C_CLASS_DDC;
7438 i2c->base.dev.parent = &adev->pdev->dev;
7439 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7440 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7441 i2c_set_adapdata(&i2c->base, i2c);
7442 i2c->ddc_service = ddc_service;
7443
7444 return i2c;
7445}
7446
89fc8d4e 7447
1f6010a9
DF
7448/*
7449 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7450 * dc_link which will be represented by this aconnector.
7451 */
7578ecda
AD
7452static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7453 struct amdgpu_dm_connector *aconnector,
7454 uint32_t link_index,
7455 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7456{
7457 int res = 0;
7458 int connector_type;
7459 struct dc *dc = dm->dc;
7460 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7461 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7462
7463 link->priv = aconnector;
e7b07cee 7464
f1ad2f5e 7465 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7466
7467 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7468 if (!i2c) {
7469 DRM_ERROR("Failed to create i2c adapter data\n");
7470 return -ENOMEM;
7471 }
7472
e7b07cee
HW
7473 aconnector->i2c = i2c;
7474 res = i2c_add_adapter(&i2c->base);
7475
7476 if (res) {
7477 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7478 goto out_free;
7479 }
7480
7481 connector_type = to_drm_connector_type(link->connector_signal);
7482
17165de2 7483 res = drm_connector_init_with_ddc(
e7b07cee
HW
7484 dm->ddev,
7485 &aconnector->base,
7486 &amdgpu_dm_connector_funcs,
17165de2
AP
7487 connector_type,
7488 &i2c->base);
e7b07cee
HW
7489
7490 if (res) {
7491 DRM_ERROR("connector_init failed\n");
7492 aconnector->connector_id = -1;
7493 goto out_free;
7494 }
7495
7496 drm_connector_helper_add(
7497 &aconnector->base,
7498 &amdgpu_dm_connector_helper_funcs);
7499
7500 amdgpu_dm_connector_init_helper(
7501 dm,
7502 aconnector,
7503 connector_type,
7504 link,
7505 link_index);
7506
cde4c44d 7507 drm_connector_attach_encoder(
e7b07cee
HW
7508 &aconnector->base, &aencoder->base);
7509
e7b07cee
HW
7510 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7511 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7512 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7513
e7b07cee
HW
7514out_free:
7515 if (res) {
7516 kfree(i2c);
7517 aconnector->i2c = NULL;
7518 }
7519 return res;
7520}
7521
7522int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7523{
7524 switch (adev->mode_info.num_crtc) {
7525 case 1:
7526 return 0x1;
7527 case 2:
7528 return 0x3;
7529 case 3:
7530 return 0x7;
7531 case 4:
7532 return 0xf;
7533 case 5:
7534 return 0x1f;
7535 case 6:
7536 default:
7537 return 0x3f;
7538 }
7539}
7540
7578ecda
AD
7541static int amdgpu_dm_encoder_init(struct drm_device *dev,
7542 struct amdgpu_encoder *aencoder,
7543 uint32_t link_index)
e7b07cee 7544{
1348969a 7545 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7546
7547 int res = drm_encoder_init(dev,
7548 &aencoder->base,
7549 &amdgpu_dm_encoder_funcs,
7550 DRM_MODE_ENCODER_TMDS,
7551 NULL);
7552
7553 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7554
7555 if (!res)
7556 aencoder->encoder_id = link_index;
7557 else
7558 aencoder->encoder_id = -1;
7559
7560 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7561
7562 return res;
7563}
7564
3ee6b26b
AD
7565static void manage_dm_interrupts(struct amdgpu_device *adev,
7566 struct amdgpu_crtc *acrtc,
7567 bool enable)
e7b07cee
HW
7568{
7569 /*
8fe684e9
NK
7570 * We have no guarantee that the frontend index maps to the same
7571 * backend index - some even map to more than one.
7572 *
7573 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7574 */
7575 int irq_type =
734dd01d 7576 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7577 adev,
7578 acrtc->crtc_id);
7579
7580 if (enable) {
7581 drm_crtc_vblank_on(&acrtc->base);
7582 amdgpu_irq_get(
7583 adev,
7584 &adev->pageflip_irq,
7585 irq_type);
86bc2219
WL
7586#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7587 amdgpu_irq_get(
7588 adev,
7589 &adev->vline0_irq,
7590 irq_type);
7591#endif
e7b07cee 7592 } else {
86bc2219
WL
7593#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7594 amdgpu_irq_put(
7595 adev,
7596 &adev->vline0_irq,
7597 irq_type);
7598#endif
e7b07cee
HW
7599 amdgpu_irq_put(
7600 adev,
7601 &adev->pageflip_irq,
7602 irq_type);
7603 drm_crtc_vblank_off(&acrtc->base);
7604 }
7605}
7606
8fe684e9
NK
7607static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7608 struct amdgpu_crtc *acrtc)
7609{
7610 int irq_type =
7611 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7612
7613 /**
7614 * This reads the current state for the IRQ and force reapplies
7615 * the setting to hardware.
7616 */
7617 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7618}
7619
3ee6b26b
AD
7620static bool
7621is_scaling_state_different(const struct dm_connector_state *dm_state,
7622 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7623{
7624 if (dm_state->scaling != old_dm_state->scaling)
7625 return true;
7626 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7627 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7628 return true;
7629 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7630 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7631 return true;
b830ebc9
HW
7632 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7633 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7634 return true;
e7b07cee
HW
7635 return false;
7636}
7637
0c8620d6
BL
7638#ifdef CONFIG_DRM_AMD_DC_HDCP
7639static bool is_content_protection_different(struct drm_connector_state *state,
7640 const struct drm_connector_state *old_state,
7641 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7642{
7643 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7644 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7645
31c0ed90 7646 /* Handle: Type0/1 change */
53e108aa
BL
7647 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7648 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7649 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7650 return true;
7651 }
7652
31c0ed90
BL
7653 /* CP is being re enabled, ignore this
7654 *
7655 * Handles: ENABLED -> DESIRED
7656 */
0c8620d6
BL
7657 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7658 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7659 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7660 return false;
7661 }
7662
31c0ed90
BL
7663 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7664 *
7665 * Handles: UNDESIRED -> ENABLED
7666 */
0c8620d6
BL
7667 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7668 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7669 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7670
0d9a947b
QZ
7671 /* Stream removed and re-enabled
7672 *
7673 * Can sometimes overlap with the HPD case,
7674 * thus set update_hdcp to false to avoid
7675 * setting HDCP multiple times.
7676 *
7677 * Handles: DESIRED -> DESIRED (Special case)
7678 */
7679 if (!(old_state->crtc && old_state->crtc->enabled) &&
7680 state->crtc && state->crtc->enabled &&
7681 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7682 dm_con_state->update_hdcp = false;
7683 return true;
7684 }
7685
7686 /* Hot-plug, headless s3, dpms
7687 *
7688 * Only start HDCP if the display is connected/enabled.
7689 * update_hdcp flag will be set to false until the next
7690 * HPD comes in.
31c0ed90
BL
7691 *
7692 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7693 */
97f6c917
BL
7694 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7695 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7696 dm_con_state->update_hdcp = false;
0c8620d6 7697 return true;
97f6c917 7698 }
0c8620d6 7699
31c0ed90
BL
7700 /*
7701 * Handles: UNDESIRED -> UNDESIRED
7702 * DESIRED -> DESIRED
7703 * ENABLED -> ENABLED
7704 */
0c8620d6
BL
7705 if (old_state->content_protection == state->content_protection)
7706 return false;
7707
31c0ed90
BL
7708 /*
7709 * Handles: UNDESIRED -> DESIRED
7710 * DESIRED -> UNDESIRED
7711 * ENABLED -> UNDESIRED
7712 */
97f6c917 7713 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7714 return true;
7715
31c0ed90
BL
7716 /*
7717 * Handles: DESIRED -> ENABLED
7718 */
0c8620d6
BL
7719 return false;
7720}
7721
0c8620d6 7722#endif
3ee6b26b
AD
7723static void remove_stream(struct amdgpu_device *adev,
7724 struct amdgpu_crtc *acrtc,
7725 struct dc_stream_state *stream)
e7b07cee
HW
7726{
7727 /* this is the update mode case */
e7b07cee
HW
7728
7729 acrtc->otg_inst = -1;
7730 acrtc->enabled = false;
7731}
7732
e7b07cee
HW
7733static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7734{
7735
7736 assert_spin_locked(&acrtc->base.dev->event_lock);
7737 WARN_ON(acrtc->event);
7738
7739 acrtc->event = acrtc->base.state->event;
7740
7741 /* Set the flip status */
7742 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7743
7744 /* Mark this event as consumed */
7745 acrtc->base.state->event = NULL;
7746
cb2318b7
VL
7747 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7748 acrtc->crtc_id);
e7b07cee
HW
7749}
7750
bb47de73
NK
7751static void update_freesync_state_on_stream(
7752 struct amdgpu_display_manager *dm,
7753 struct dm_crtc_state *new_crtc_state,
180db303
NK
7754 struct dc_stream_state *new_stream,
7755 struct dc_plane_state *surface,
7756 u32 flip_timestamp_in_us)
bb47de73 7757{
09aef2c4 7758 struct mod_vrr_params vrr_params;
bb47de73 7759 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7760 struct amdgpu_device *adev = dm->adev;
585d450c 7761 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7762 unsigned long flags;
4cda3243 7763 bool pack_sdp_v1_3 = false;
bb47de73
NK
7764
7765 if (!new_stream)
7766 return;
7767
7768 /*
7769 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7770 * For now it's sufficient to just guard against these conditions.
7771 */
7772
7773 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7774 return;
7775
4a580877 7776 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7777 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7778
180db303
NK
7779 if (surface) {
7780 mod_freesync_handle_preflip(
7781 dm->freesync_module,
7782 surface,
7783 new_stream,
7784 flip_timestamp_in_us,
7785 &vrr_params);
09aef2c4
MK
7786
7787 if (adev->family < AMDGPU_FAMILY_AI &&
7788 amdgpu_dm_vrr_active(new_crtc_state)) {
7789 mod_freesync_handle_v_update(dm->freesync_module,
7790 new_stream, &vrr_params);
e63e2491
EB
7791
7792 /* Need to call this before the frame ends. */
7793 dc_stream_adjust_vmin_vmax(dm->dc,
7794 new_crtc_state->stream,
7795 &vrr_params.adjust);
09aef2c4 7796 }
180db303 7797 }
bb47de73
NK
7798
7799 mod_freesync_build_vrr_infopacket(
7800 dm->freesync_module,
7801 new_stream,
180db303 7802 &vrr_params,
ecd0136b
HT
7803 PACKET_TYPE_VRR,
7804 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
7805 &vrr_infopacket,
7806 pack_sdp_v1_3);
bb47de73 7807
8a48b44c 7808 new_crtc_state->freesync_timing_changed |=
585d450c 7809 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7810 &vrr_params.adjust,
7811 sizeof(vrr_params.adjust)) != 0);
bb47de73 7812
8a48b44c 7813 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7814 (memcmp(&new_crtc_state->vrr_infopacket,
7815 &vrr_infopacket,
7816 sizeof(vrr_infopacket)) != 0);
7817
585d450c 7818 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7819 new_crtc_state->vrr_infopacket = vrr_infopacket;
7820
585d450c 7821 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7822 new_stream->vrr_infopacket = vrr_infopacket;
7823
7824 if (new_crtc_state->freesync_vrr_info_changed)
7825 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7826 new_crtc_state->base.crtc->base.id,
7827 (int)new_crtc_state->base.vrr_enabled,
180db303 7828 (int)vrr_params.state);
09aef2c4 7829
4a580877 7830 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7831}
7832
585d450c 7833static void update_stream_irq_parameters(
e854194c
MK
7834 struct amdgpu_display_manager *dm,
7835 struct dm_crtc_state *new_crtc_state)
7836{
7837 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7838 struct mod_vrr_params vrr_params;
e854194c 7839 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7840 struct amdgpu_device *adev = dm->adev;
585d450c 7841 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7842 unsigned long flags;
e854194c
MK
7843
7844 if (!new_stream)
7845 return;
7846
7847 /*
7848 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7849 * For now it's sufficient to just guard against these conditions.
7850 */
7851 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7852 return;
7853
4a580877 7854 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7855 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7856
e854194c
MK
7857 if (new_crtc_state->vrr_supported &&
7858 config.min_refresh_in_uhz &&
7859 config.max_refresh_in_uhz) {
a85ba005
NC
7860 /*
7861 * if freesync compatible mode was set, config.state will be set
7862 * in atomic check
7863 */
7864 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7865 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7866 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7867 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7868 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7869 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7870 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7871 } else {
7872 config.state = new_crtc_state->base.vrr_enabled ?
7873 VRR_STATE_ACTIVE_VARIABLE :
7874 VRR_STATE_INACTIVE;
7875 }
e854194c
MK
7876 } else {
7877 config.state = VRR_STATE_UNSUPPORTED;
7878 }
7879
7880 mod_freesync_build_vrr_params(dm->freesync_module,
7881 new_stream,
7882 &config, &vrr_params);
7883
7884 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7885 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7886 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7887
585d450c
AP
7888 new_crtc_state->freesync_config = config;
7889 /* Copy state for access from DM IRQ handler */
7890 acrtc->dm_irq_params.freesync_config = config;
7891 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7892 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7893 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7894}
7895
66b0c973
MK
7896static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7897 struct dm_crtc_state *new_state)
7898{
7899 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7900 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7901
7902 if (!old_vrr_active && new_vrr_active) {
7903 /* Transition VRR inactive -> active:
7904 * While VRR is active, we must not disable vblank irq, as a
7905 * reenable after disable would compute bogus vblank/pflip
7906 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7907 *
7908 * We also need vupdate irq for the actual core vblank handling
7909 * at end of vblank.
66b0c973 7910 */
d2574c33 7911 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7912 drm_crtc_vblank_get(new_state->base.crtc);
7913 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7914 __func__, new_state->base.crtc->base.id);
7915 } else if (old_vrr_active && !new_vrr_active) {
7916 /* Transition VRR active -> inactive:
7917 * Allow vblank irq disable again for fixed refresh rate.
7918 */
d2574c33 7919 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7920 drm_crtc_vblank_put(new_state->base.crtc);
7921 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7922 __func__, new_state->base.crtc->base.id);
7923 }
7924}
7925
8ad27806
NK
7926static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7927{
7928 struct drm_plane *plane;
5760dcb9 7929 struct drm_plane_state *old_plane_state;
8ad27806
NK
7930 int i;
7931
7932 /*
7933 * TODO: Make this per-stream so we don't issue redundant updates for
7934 * commits with multiple streams.
7935 */
5760dcb9 7936 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
7937 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7938 handle_cursor_update(plane, old_plane_state);
7939}
7940
3be5262e 7941static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7942 struct dc_state *dc_state,
3ee6b26b
AD
7943 struct drm_device *dev,
7944 struct amdgpu_display_manager *dm,
7945 struct drm_crtc *pcrtc,
420cd472 7946 bool wait_for_vblank)
e7b07cee 7947{
efc8278e 7948 uint32_t i;
8a48b44c 7949 uint64_t timestamp_ns;
e7b07cee 7950 struct drm_plane *plane;
0bc9706d 7951 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7952 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7953 struct drm_crtc_state *new_pcrtc_state =
7954 drm_atomic_get_new_crtc_state(state, pcrtc);
7955 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7956 struct dm_crtc_state *dm_old_crtc_state =
7957 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7958 int planes_count = 0, vpos, hpos;
e7b07cee 7959 unsigned long flags;
fdd1fe57
MK
7960 uint32_t target_vblank, last_flip_vblank;
7961 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
cc79950b 7962 bool cursor_update = false;
74aa7bd4 7963 bool pflip_present = false;
bc7f670e
DF
7964 struct {
7965 struct dc_surface_update surface_updates[MAX_SURFACES];
7966 struct dc_plane_info plane_infos[MAX_SURFACES];
7967 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7968 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7969 struct dc_stream_update stream_update;
74aa7bd4 7970 } *bundle;
bc7f670e 7971
74aa7bd4 7972 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7973
74aa7bd4
DF
7974 if (!bundle) {
7975 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7976 goto cleanup;
7977 }
e7b07cee 7978
8ad27806
NK
7979 /*
7980 * Disable the cursor first if we're disabling all the planes.
7981 * It'll remain on the screen after the planes are re-enabled
7982 * if we don't.
7983 */
7984 if (acrtc_state->active_planes == 0)
7985 amdgpu_dm_commit_cursors(state);
7986
e7b07cee 7987 /* update planes when needed */
efc8278e 7988 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 7989 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7990 struct drm_crtc_state *new_crtc_state;
0bc9706d 7991 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7992 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7993 bool plane_needs_flip;
c7af5f77 7994 struct dc_plane_state *dc_plane;
54d76575 7995 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7996
80c218d5 7997 /* Cursor plane is handled after stream updates */
cc79950b
MD
7998 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7999 if ((fb && crtc == pcrtc) ||
8000 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
8001 cursor_update = true;
8002
e7b07cee 8003 continue;
cc79950b 8004 }
e7b07cee 8005
f5ba60fe
DD
8006 if (!fb || !crtc || pcrtc != crtc)
8007 continue;
8008
8009 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8010 if (!new_crtc_state->active)
e7b07cee
HW
8011 continue;
8012
bc7f670e 8013 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8014
74aa7bd4 8015 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8016 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8017 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8018 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8019 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8020 }
8a48b44c 8021
4375d625 8022 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 8023 &bundle->scaling_infos[planes_count]);
8a48b44c 8024
695af5f9
NK
8025 bundle->surface_updates[planes_count].scaling_info =
8026 &bundle->scaling_infos[planes_count];
8a48b44c 8027
f5031000 8028 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8029
f5031000 8030 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8031
f5031000
DF
8032 if (!plane_needs_flip) {
8033 planes_count += 1;
8034 continue;
8035 }
8a48b44c 8036
695af5f9 8037 fill_dc_plane_info_and_addr(
8ce5d842 8038 dm->adev, new_plane_state,
6eed95b0 8039 afb->tiling_flags,
695af5f9 8040 &bundle->plane_infos[planes_count],
87b7ebc2 8041 &bundle->flip_addrs[planes_count].address,
6eed95b0 8042 afb->tmz_surface, false);
87b7ebc2 8043
9f07550b 8044 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8045 new_plane_state->plane->index,
8046 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8047
8048 bundle->surface_updates[planes_count].plane_info =
8049 &bundle->plane_infos[planes_count];
8a48b44c 8050
7cc191ee
LL
8051 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
8052 new_crtc_state,
8053 &bundle->flip_addrs[planes_count]);
8054
caff0e66
NK
8055 /*
8056 * Only allow immediate flips for fast updates that don't
8057 * change FB pitch, DCC state, rotation or mirroing.
8058 */
f5031000 8059 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8060 crtc->state->async_flip &&
caff0e66 8061 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8062
f5031000
DF
8063 timestamp_ns = ktime_get_ns();
8064 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8065 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8066 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8067
f5031000
DF
8068 if (!bundle->surface_updates[planes_count].surface) {
8069 DRM_ERROR("No surface for CRTC: id=%d\n",
8070 acrtc_attach->crtc_id);
8071 continue;
bc7f670e
DF
8072 }
8073
f5031000
DF
8074 if (plane == pcrtc->primary)
8075 update_freesync_state_on_stream(
8076 dm,
8077 acrtc_state,
8078 acrtc_state->stream,
8079 dc_plane,
8080 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8081
9f07550b 8082 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8083 __func__,
8084 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8085 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8086
8087 planes_count += 1;
8088
8a48b44c
DF
8089 }
8090
74aa7bd4 8091 if (pflip_present) {
634092b1
MK
8092 if (!vrr_active) {
8093 /* Use old throttling in non-vrr fixed refresh rate mode
8094 * to keep flip scheduling based on target vblank counts
8095 * working in a backwards compatible way, e.g., for
8096 * clients using the GLX_OML_sync_control extension or
8097 * DRI3/Present extension with defined target_msc.
8098 */
e3eff4b5 8099 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8100 }
8101 else {
8102 /* For variable refresh rate mode only:
8103 * Get vblank of last completed flip to avoid > 1 vrr
8104 * flips per video frame by use of throttling, but allow
8105 * flip programming anywhere in the possibly large
8106 * variable vrr vblank interval for fine-grained flip
8107 * timing control and more opportunity to avoid stutter
8108 * on late submission of flips.
8109 */
8110 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8111 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8112 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8113 }
8114
fdd1fe57 8115 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8116
8117 /*
8118 * Wait until we're out of the vertical blank period before the one
8119 * targeted by the flip
8120 */
8121 while ((acrtc_attach->enabled &&
8122 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8123 0, &vpos, &hpos, NULL,
8124 NULL, &pcrtc->hwmode)
8125 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8126 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8127 (int)(target_vblank -
e3eff4b5 8128 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8129 usleep_range(1000, 1100);
8130 }
8131
8fe684e9
NK
8132 /**
8133 * Prepare the flip event for the pageflip interrupt to handle.
8134 *
8135 * This only works in the case where we've already turned on the
8136 * appropriate hardware blocks (eg. HUBP) so in the transition case
8137 * from 0 -> n planes we have to skip a hardware generated event
8138 * and rely on sending it from software.
8139 */
8140 if (acrtc_attach->base.state->event &&
10a36226 8141 acrtc_state->active_planes > 0) {
8a48b44c
DF
8142 drm_crtc_vblank_get(pcrtc);
8143
8144 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8145
8146 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8147 prepare_flip_isr(acrtc_attach);
8148
8149 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8150 }
8151
8152 if (acrtc_state->stream) {
8a48b44c 8153 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8154 bundle->stream_update.vrr_infopacket =
8a48b44c 8155 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8156 }
cc79950b
MD
8157 } else if (cursor_update && acrtc_state->active_planes > 0 &&
8158 acrtc_attach->base.state->event) {
8159 drm_crtc_vblank_get(pcrtc);
8160
8161 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8162
8163 acrtc_attach->event = acrtc_attach->base.state->event;
8164 acrtc_attach->base.state->event = NULL;
8165
8166 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
e7b07cee
HW
8167 }
8168
bc92c065 8169 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8170 if ((planes_count || acrtc_state->active_planes == 0) &&
8171 acrtc_state->stream) {
58aa1c50
NK
8172 /*
8173 * If PSR or idle optimizations are enabled then flush out
8174 * any pending work before hardware programming.
8175 */
06dd1888
NK
8176 if (dm->vblank_control_workqueue)
8177 flush_workqueue(dm->vblank_control_workqueue);
58aa1c50 8178
b6e881c9 8179 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8180 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8181 bundle->stream_update.src = acrtc_state->stream->src;
8182 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8183 }
8184
cf020d49
NK
8185 if (new_pcrtc_state->color_mgmt_changed) {
8186 /*
8187 * TODO: This isn't fully correct since we've actually
8188 * already modified the stream in place.
8189 */
8190 bundle->stream_update.gamut_remap =
8191 &acrtc_state->stream->gamut_remap_matrix;
8192 bundle->stream_update.output_csc_transform =
8193 &acrtc_state->stream->csc_color_matrix;
8194 bundle->stream_update.out_transfer_func =
8195 acrtc_state->stream->out_transfer_func;
8196 }
bc7f670e 8197
8a48b44c 8198 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8199 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8200 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8201
e63e2491
EB
8202 /*
8203 * If FreeSync state on the stream has changed then we need to
8204 * re-adjust the min/max bounds now that DC doesn't handle this
8205 * as part of commit.
8206 */
a85ba005 8207 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8208 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8209 dc_stream_adjust_vmin_vmax(
8210 dm->dc, acrtc_state->stream,
585d450c 8211 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8212 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8213 }
bc7f670e 8214 mutex_lock(&dm->dc_lock);
8c322309 8215 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8216 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8217 amdgpu_dm_psr_disable(acrtc_state->stream);
8218
bc7f670e 8219 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8220 bundle->surface_updates,
bc7f670e
DF
8221 planes_count,
8222 acrtc_state->stream,
efc8278e
AJ
8223 &bundle->stream_update,
8224 dc_state);
8c322309 8225
8fe684e9
NK
8226 /**
8227 * Enable or disable the interrupts on the backend.
8228 *
8229 * Most pipes are put into power gating when unused.
8230 *
8231 * When power gating is enabled on a pipe we lose the
8232 * interrupt enablement state when power gating is disabled.
8233 *
8234 * So we need to update the IRQ control state in hardware
8235 * whenever the pipe turns on (since it could be previously
8236 * power gated) or off (since some pipes can't be power gated
8237 * on some ASICs).
8238 */
8239 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8240 dm_update_pflip_irq_state(drm_to_adev(dev),
8241 acrtc_attach);
8fe684e9 8242
8c322309 8243 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8244 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8245 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 8246 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
8247
8248 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8249 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8250 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8251 struct amdgpu_dm_connector *aconn =
8252 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
8253
8254 if (aconn->psr_skip_count > 0)
8255 aconn->psr_skip_count--;
58aa1c50
NK
8256
8257 /* Allow PSR when skip count is 0. */
8258 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7cc191ee
LL
8259
8260 /*
8261 * If sink supports PSR SU, there is no need to rely on
8262 * a vblank event disable request to enable PSR. PSR SU
8263 * can be enabled immediately once OS demonstrates an
8264 * adequate number of fast atomic commits to notify KMD
8265 * of update events. See `vblank_control_worker()`.
8266 */
8267 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8268 acrtc_attach->dm_irq_params.allow_psr_entry &&
8269 !acrtc_state->stream->link->psr_settings.psr_allow_active)
8270 amdgpu_dm_psr_enable(acrtc_state->stream);
58aa1c50
NK
8271 } else {
8272 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
8273 }
8274
bc7f670e 8275 mutex_unlock(&dm->dc_lock);
e7b07cee 8276 }
4b510503 8277
8ad27806
NK
8278 /*
8279 * Update cursor state *after* programming all the planes.
8280 * This avoids redundant programming in the case where we're going
8281 * to be disabling a single plane - those pipes are being disabled.
8282 */
8283 if (acrtc_state->active_planes)
8284 amdgpu_dm_commit_cursors(state);
80c218d5 8285
4b510503 8286cleanup:
74aa7bd4 8287 kfree(bundle);
e7b07cee
HW
8288}
8289
6ce8f316
NK
8290static void amdgpu_dm_commit_audio(struct drm_device *dev,
8291 struct drm_atomic_state *state)
8292{
1348969a 8293 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8294 struct amdgpu_dm_connector *aconnector;
8295 struct drm_connector *connector;
8296 struct drm_connector_state *old_con_state, *new_con_state;
8297 struct drm_crtc_state *new_crtc_state;
8298 struct dm_crtc_state *new_dm_crtc_state;
8299 const struct dc_stream_status *status;
8300 int i, inst;
8301
8302 /* Notify device removals. */
8303 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8304 if (old_con_state->crtc != new_con_state->crtc) {
8305 /* CRTC changes require notification. */
8306 goto notify;
8307 }
8308
8309 if (!new_con_state->crtc)
8310 continue;
8311
8312 new_crtc_state = drm_atomic_get_new_crtc_state(
8313 state, new_con_state->crtc);
8314
8315 if (!new_crtc_state)
8316 continue;
8317
8318 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8319 continue;
8320
8321 notify:
8322 aconnector = to_amdgpu_dm_connector(connector);
8323
8324 mutex_lock(&adev->dm.audio_lock);
8325 inst = aconnector->audio_inst;
8326 aconnector->audio_inst = -1;
8327 mutex_unlock(&adev->dm.audio_lock);
8328
8329 amdgpu_dm_audio_eld_notify(adev, inst);
8330 }
8331
8332 /* Notify audio device additions. */
8333 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8334 if (!new_con_state->crtc)
8335 continue;
8336
8337 new_crtc_state = drm_atomic_get_new_crtc_state(
8338 state, new_con_state->crtc);
8339
8340 if (!new_crtc_state)
8341 continue;
8342
8343 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8344 continue;
8345
8346 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8347 if (!new_dm_crtc_state->stream)
8348 continue;
8349
8350 status = dc_stream_get_status(new_dm_crtc_state->stream);
8351 if (!status)
8352 continue;
8353
8354 aconnector = to_amdgpu_dm_connector(connector);
8355
8356 mutex_lock(&adev->dm.audio_lock);
8357 inst = status->audio_inst;
8358 aconnector->audio_inst = inst;
8359 mutex_unlock(&adev->dm.audio_lock);
8360
8361 amdgpu_dm_audio_eld_notify(adev, inst);
8362 }
8363}
8364
1f6010a9 8365/*
27b3f4fc
LSL
8366 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8367 * @crtc_state: the DRM CRTC state
8368 * @stream_state: the DC stream state.
8369 *
8370 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8371 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8372 */
8373static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8374 struct dc_stream_state *stream_state)
8375{
b9952f93 8376 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8377}
e7b07cee 8378
b8592b48
LL
8379/**
8380 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8381 * @state: The atomic state to commit
8382 *
8383 * This will tell DC to commit the constructed DC state from atomic_check,
8384 * programming the hardware. Any failures here implies a hardware failure, since
8385 * atomic check should have filtered anything non-kosher.
8386 */
7578ecda 8387static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8388{
8389 struct drm_device *dev = state->dev;
1348969a 8390 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8391 struct amdgpu_display_manager *dm = &adev->dm;
8392 struct dm_atomic_state *dm_state;
eb3dc897 8393 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8394 uint32_t i, j;
5cc6dcbd 8395 struct drm_crtc *crtc;
0bc9706d 8396 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8397 unsigned long flags;
8398 bool wait_for_vblank = true;
8399 struct drm_connector *connector;
c2cea706 8400 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8401 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8402 int crtc_disable_count = 0;
6ee90e88 8403 bool mode_set_reset_required = false;
047de3f1 8404 int r;
e7b07cee 8405
e8a98235
RS
8406 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8407
047de3f1
CK
8408 r = drm_atomic_helper_wait_for_fences(dev, state, false);
8409 if (unlikely(r))
8410 DRM_ERROR("Waiting for fences timed out!");
8411
e7b07cee
HW
8412 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8413
eb3dc897
NK
8414 dm_state = dm_atomic_get_new_state(state);
8415 if (dm_state && dm_state->context) {
8416 dc_state = dm_state->context;
8417 } else {
8418 /* No state changes, retain current state. */
813d20dc 8419 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8420 ASSERT(dc_state_temp);
8421 dc_state = dc_state_temp;
8422 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8423 }
e7b07cee 8424
6d90a208
AP
8425 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8426 new_crtc_state, i) {
8427 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8428
8429 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8430
8431 if (old_crtc_state->active &&
8432 (!new_crtc_state->active ||
8433 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8434 manage_dm_interrupts(adev, acrtc, false);
8435 dc_stream_release(dm_old_crtc_state->stream);
8436 }
8437 }
8438
8976f73b
RS
8439 drm_atomic_helper_calc_timestamping_constants(state);
8440
e7b07cee 8441 /* update changed items */
0bc9706d 8442 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8443 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8444
54d76575
LSL
8445 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8446 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8447
9f07550b 8448 drm_dbg_state(state->dev,
e7b07cee
HW
8449 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8450 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8451 "connectors_changed:%d\n",
8452 acrtc->crtc_id,
0bc9706d
LSL
8453 new_crtc_state->enable,
8454 new_crtc_state->active,
8455 new_crtc_state->planes_changed,
8456 new_crtc_state->mode_changed,
8457 new_crtc_state->active_changed,
8458 new_crtc_state->connectors_changed);
e7b07cee 8459
5c68c652
VL
8460 /* Disable cursor if disabling crtc */
8461 if (old_crtc_state->active && !new_crtc_state->active) {
8462 struct dc_cursor_position position;
8463
8464 memset(&position, 0, sizeof(position));
8465 mutex_lock(&dm->dc_lock);
8466 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8467 mutex_unlock(&dm->dc_lock);
8468 }
8469
27b3f4fc
LSL
8470 /* Copy all transient state flags into dc state */
8471 if (dm_new_crtc_state->stream) {
8472 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8473 dm_new_crtc_state->stream);
8474 }
8475
e7b07cee
HW
8476 /* handles headless hotplug case, updating new_state and
8477 * aconnector as needed
8478 */
8479
54d76575 8480 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8481
4711c033 8482 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8483
54d76575 8484 if (!dm_new_crtc_state->stream) {
e7b07cee 8485 /*
b830ebc9
HW
8486 * this could happen because of issues with
8487 * userspace notifications delivery.
8488 * In this case userspace tries to set mode on
1f6010a9
DF
8489 * display which is disconnected in fact.
8490 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8491 * We expect reset mode will come soon.
8492 *
8493 * This can also happen when unplug is done
8494 * during resume sequence ended
8495 *
8496 * In this case, we want to pretend we still
8497 * have a sink to keep the pipe running so that
8498 * hw state is consistent with the sw state
8499 */
f1ad2f5e 8500 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8501 __func__, acrtc->base.base.id);
8502 continue;
8503 }
8504
54d76575
LSL
8505 if (dm_old_crtc_state->stream)
8506 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8507
97028037
LP
8508 pm_runtime_get_noresume(dev->dev);
8509
e7b07cee 8510 acrtc->enabled = true;
0bc9706d
LSL
8511 acrtc->hw_mode = new_crtc_state->mode;
8512 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8513 mode_set_reset_required = true;
0bc9706d 8514 } else if (modereset_required(new_crtc_state)) {
4711c033 8515 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8516 /* i.e. reset mode */
6ee90e88 8517 if (dm_old_crtc_state->stream)
54d76575 8518 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8519
6ee90e88 8520 mode_set_reset_required = true;
e7b07cee
HW
8521 }
8522 } /* for_each_crtc_in_state() */
8523
eb3dc897 8524 if (dc_state) {
6ee90e88 8525 /* if there mode set or reset, disable eDP PSR */
58aa1c50 8526 if (mode_set_reset_required) {
06dd1888
NK
8527 if (dm->vblank_control_workqueue)
8528 flush_workqueue(dm->vblank_control_workqueue);
cae5c1ab 8529
6ee90e88 8530 amdgpu_dm_psr_disable_all(dm);
58aa1c50 8531 }
6ee90e88 8532
eb3dc897 8533 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8534 mutex_lock(&dm->dc_lock);
eb3dc897 8535 WARN_ON(!dc_commit_state(dm->dc, dc_state));
f3106c94
JC
8536
8537 /* Allow idle optimization when vblank count is 0 for display off */
8538 if (dm->active_vblank_irq_count == 0)
8539 dc_allow_idle_optimizations(dm->dc, true);
674e78ac 8540 mutex_unlock(&dm->dc_lock);
fa2123db 8541 }
fe8858bb 8542
0bc9706d 8543 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8544 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8545
54d76575 8546 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8547
54d76575 8548 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8549 const struct dc_stream_status *status =
54d76575 8550 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8551
eb3dc897 8552 if (!status)
09f609c3
LL
8553 status = dc_stream_get_status_from_state(dc_state,
8554 dm_new_crtc_state->stream);
e7b07cee 8555 if (!status)
54d76575 8556 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8557 else
8558 acrtc->otg_inst = status->primary_otg_inst;
8559 }
8560 }
0c8620d6
BL
8561#ifdef CONFIG_DRM_AMD_DC_HDCP
8562 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8563 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8564 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8565 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8566
8567 new_crtc_state = NULL;
8568
8569 if (acrtc)
8570 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8571
8572 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8573
8574 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8575 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8576 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8577 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8578 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8579 continue;
8580 }
8581
8582 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8583 hdcp_update_display(
8584 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8585 new_con_state->hdcp_content_type,
0e86d3d4 8586 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
8587 }
8588#endif
e7b07cee 8589
02d6a6fc 8590 /* Handle connector state changes */
c2cea706 8591 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8592 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8593 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8594 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8595 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8596 struct dc_stream_update stream_update;
b232d4ed 8597 struct dc_info_packet hdr_packet;
e7b07cee 8598 struct dc_stream_status *status = NULL;
b232d4ed 8599 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8600
efc8278e 8601 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8602 memset(&stream_update, 0, sizeof(stream_update));
8603
44d09c6a 8604 if (acrtc) {
0bc9706d 8605 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8606 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8607 }
0bc9706d 8608
e7b07cee 8609 /* Skip any modesets/resets */
0bc9706d 8610 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8611 continue;
8612
54d76575 8613 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8614 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8615
b232d4ed
NK
8616 scaling_changed = is_scaling_state_different(dm_new_con_state,
8617 dm_old_con_state);
8618
8619 abm_changed = dm_new_crtc_state->abm_level !=
8620 dm_old_crtc_state->abm_level;
8621
8622 hdr_changed =
72921cdf 8623 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
8624
8625 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8626 continue;
e7b07cee 8627
b6e881c9 8628 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8629 if (scaling_changed) {
02d6a6fc 8630 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8631 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8632
02d6a6fc
DF
8633 stream_update.src = dm_new_crtc_state->stream->src;
8634 stream_update.dst = dm_new_crtc_state->stream->dst;
8635 }
8636
b232d4ed 8637 if (abm_changed) {
02d6a6fc
DF
8638 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8639
8640 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8641 }
70e8ffc5 8642
b232d4ed
NK
8643 if (hdr_changed) {
8644 fill_hdr_info_packet(new_con_state, &hdr_packet);
8645 stream_update.hdr_static_metadata = &hdr_packet;
8646 }
8647
54d76575 8648 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
8649
8650 if (WARN_ON(!status))
8651 continue;
8652
3be5262e 8653 WARN_ON(!status->plane_count);
e7b07cee 8654
02d6a6fc
DF
8655 /*
8656 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8657 * Here we create an empty update on each plane.
8658 * To fix this, DC should permit updating only stream properties.
8659 */
8660 for (j = 0; j < status->plane_count; j++)
efc8278e 8661 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8662
8663
8664 mutex_lock(&dm->dc_lock);
8665 dc_commit_updates_for_stream(dm->dc,
efc8278e 8666 dummy_updates,
02d6a6fc
DF
8667 status->plane_count,
8668 dm_new_crtc_state->stream,
efc8278e
AJ
8669 &stream_update,
8670 dc_state);
02d6a6fc 8671 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8672 }
8673
b5e83f6f 8674 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8675 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8676 new_crtc_state, i) {
fe2a1965
LP
8677 if (old_crtc_state->active && !new_crtc_state->active)
8678 crtc_disable_count++;
8679
54d76575 8680 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8681 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8682
585d450c
AP
8683 /* For freesync config update on crtc state and params for irq */
8684 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8685
66b0c973
MK
8686 /* Handle vrr on->off / off->on transitions */
8687 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8688 dm_new_crtc_state);
e7b07cee
HW
8689 }
8690
8fe684e9
NK
8691 /**
8692 * Enable interrupts for CRTCs that are newly enabled or went through
8693 * a modeset. It was intentionally deferred until after the front end
8694 * state was modified to wait until the OTG was on and so the IRQ
8695 * handlers didn't access stale or invalid state.
8696 */
8697 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8698 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 8699#ifdef CONFIG_DEBUG_FS
86bc2219 8700 bool configure_crc = false;
8e7b6fee 8701 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
8702#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8703 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
8704#endif
8705 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8706 cur_crc_src = acrtc->dm_irq_params.crc_src;
8707 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 8708#endif
585d450c
AP
8709 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8710
8fe684e9
NK
8711 if (new_crtc_state->active &&
8712 (!old_crtc_state->active ||
8713 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8714 dc_stream_retain(dm_new_crtc_state->stream);
8715 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8716 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 8717
24eb9374 8718#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
8719 /**
8720 * Frontend may have changed so reapply the CRC capture
8721 * settings for the stream.
8722 */
8723 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8724
8e7b6fee 8725 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
8726 configure_crc = true;
8727#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
8728 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8729 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8730 acrtc->dm_irq_params.crc_window.update_win = true;
8731 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
8732 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8733 crc_rd_wrk->crtc = crtc;
8734 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8735 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8736 }
86bc2219 8737#endif
e2881d6d 8738 }
c920888c 8739
86bc2219 8740 if (configure_crc)
bbc49fc0
WL
8741 if (amdgpu_dm_crtc_configure_crc_source(
8742 crtc, dm_new_crtc_state, cur_crc_src))
8743 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 8744#endif
8fe684e9
NK
8745 }
8746 }
e7b07cee 8747
420cd472 8748 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8749 if (new_crtc_state->async_flip)
420cd472
DF
8750 wait_for_vblank = false;
8751
e7b07cee 8752 /* update planes when needed per crtc*/
5cc6dcbd 8753 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8754 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8755
54d76575 8756 if (dm_new_crtc_state->stream)
eb3dc897 8757 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8758 dm, crtc, wait_for_vblank);
e7b07cee
HW
8759 }
8760
6ce8f316
NK
8761 /* Update audio instances for each connector. */
8762 amdgpu_dm_commit_audio(dev, state);
8763
7230362c 8764 /* restore the backlight level */
7fd13bae
AD
8765 for (i = 0; i < dm->num_of_edps; i++) {
8766 if (dm->backlight_dev[i] &&
4052287a 8767 (dm->actual_brightness[i] != dm->brightness[i]))
7fd13bae
AD
8768 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8769 }
83a3439d 8770
e7b07cee
HW
8771 /*
8772 * send vblank event on all events not handled in flip and
8773 * mark consumed event for drm_atomic_helper_commit_hw_done
8774 */
4a580877 8775 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8776 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8777
0bc9706d
LSL
8778 if (new_crtc_state->event)
8779 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8780
0bc9706d 8781 new_crtc_state->event = NULL;
e7b07cee 8782 }
4a580877 8783 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8784
29c8f234
LL
8785 /* Signal HW programming completion */
8786 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8787
8788 if (wait_for_vblank)
320a1274 8789 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8790
8791 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8792
5f6fab24
AD
8793 /* return the stolen vga memory back to VRAM */
8794 if (!adev->mman.keep_stolen_vga_memory)
8795 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8796 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8797
1f6010a9
DF
8798 /*
8799 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8800 * so we can put the GPU into runtime suspend if we're not driving any
8801 * displays anymore
8802 */
fe2a1965
LP
8803 for (i = 0; i < crtc_disable_count; i++)
8804 pm_runtime_put_autosuspend(dev->dev);
97028037 8805 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8806
8807 if (dc_state_temp)
8808 dc_release_state(dc_state_temp);
e7b07cee
HW
8809}
8810
8811
8812static int dm_force_atomic_commit(struct drm_connector *connector)
8813{
8814 int ret = 0;
8815 struct drm_device *ddev = connector->dev;
8816 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8817 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8818 struct drm_plane *plane = disconnected_acrtc->base.primary;
8819 struct drm_connector_state *conn_state;
8820 struct drm_crtc_state *crtc_state;
8821 struct drm_plane_state *plane_state;
8822
8823 if (!state)
8824 return -ENOMEM;
8825
8826 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8827
8828 /* Construct an atomic state to restore previous display setting */
8829
8830 /*
8831 * Attach connectors to drm_atomic_state
8832 */
8833 conn_state = drm_atomic_get_connector_state(state, connector);
8834
8835 ret = PTR_ERR_OR_ZERO(conn_state);
8836 if (ret)
2dc39051 8837 goto out;
e7b07cee
HW
8838
8839 /* Attach crtc to drm_atomic_state*/
8840 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8841
8842 ret = PTR_ERR_OR_ZERO(crtc_state);
8843 if (ret)
2dc39051 8844 goto out;
e7b07cee
HW
8845
8846 /* force a restore */
8847 crtc_state->mode_changed = true;
8848
8849 /* Attach plane to drm_atomic_state */
8850 plane_state = drm_atomic_get_plane_state(state, plane);
8851
8852 ret = PTR_ERR_OR_ZERO(plane_state);
8853 if (ret)
2dc39051 8854 goto out;
e7b07cee
HW
8855
8856 /* Call commit internally with the state we just constructed */
8857 ret = drm_atomic_commit(state);
e7b07cee 8858
2dc39051 8859out:
e7b07cee 8860 drm_atomic_state_put(state);
2dc39051
VL
8861 if (ret)
8862 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
8863
8864 return ret;
8865}
8866
8867/*
1f6010a9
DF
8868 * This function handles all cases when set mode does not come upon hotplug.
8869 * This includes when a display is unplugged then plugged back into the
8870 * same port and when running without usermode desktop manager supprot
e7b07cee 8871 */
3ee6b26b
AD
8872void dm_restore_drm_connector_state(struct drm_device *dev,
8873 struct drm_connector *connector)
e7b07cee 8874{
c84dec2f 8875 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8876 struct amdgpu_crtc *disconnected_acrtc;
8877 struct dm_crtc_state *acrtc_state;
8878
8879 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8880 return;
8881
8882 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8883 if (!disconnected_acrtc)
8884 return;
e7b07cee 8885
70e8ffc5
HW
8886 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8887 if (!acrtc_state->stream)
e7b07cee
HW
8888 return;
8889
8890 /*
8891 * If the previous sink is not released and different from the current,
8892 * we deduce we are in a state where we can not rely on usermode call
8893 * to turn on the display, so we do it here
8894 */
8895 if (acrtc_state->stream->sink != aconnector->dc_sink)
8896 dm_force_atomic_commit(&aconnector->base);
8897}
8898
1f6010a9 8899/*
e7b07cee
HW
8900 * Grabs all modesetting locks to serialize against any blocking commits,
8901 * Waits for completion of all non blocking commits.
8902 */
3ee6b26b
AD
8903static int do_aquire_global_lock(struct drm_device *dev,
8904 struct drm_atomic_state *state)
e7b07cee
HW
8905{
8906 struct drm_crtc *crtc;
8907 struct drm_crtc_commit *commit;
8908 long ret;
8909
1f6010a9
DF
8910 /*
8911 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8912 * ensure that when the framework release it the
8913 * extra locks we are locking here will get released to
8914 */
8915 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8916 if (ret)
8917 return ret;
8918
8919 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8920 spin_lock(&crtc->commit_lock);
8921 commit = list_first_entry_or_null(&crtc->commit_list,
8922 struct drm_crtc_commit, commit_entry);
8923 if (commit)
8924 drm_crtc_commit_get(commit);
8925 spin_unlock(&crtc->commit_lock);
8926
8927 if (!commit)
8928 continue;
8929
1f6010a9
DF
8930 /*
8931 * Make sure all pending HW programming completed and
e7b07cee
HW
8932 * page flips done
8933 */
8934 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8935
8936 if (ret > 0)
8937 ret = wait_for_completion_interruptible_timeout(
8938 &commit->flip_done, 10*HZ);
8939
8940 if (ret == 0)
8941 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8942 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8943
8944 drm_crtc_commit_put(commit);
8945 }
8946
8947 return ret < 0 ? ret : 0;
8948}
8949
bb47de73
NK
8950static void get_freesync_config_for_crtc(
8951 struct dm_crtc_state *new_crtc_state,
8952 struct dm_connector_state *new_con_state)
98e6436d
AK
8953{
8954 struct mod_freesync_config config = {0};
98e6436d
AK
8955 struct amdgpu_dm_connector *aconnector =
8956 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8957 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8958 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 8959 bool fs_vid_mode = false;
98e6436d 8960
a057ec46 8961 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8962 vrefresh >= aconnector->min_vfreq &&
8963 vrefresh <= aconnector->max_vfreq;
bb47de73 8964
a057ec46
IB
8965 if (new_crtc_state->vrr_supported) {
8966 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
8967 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8968
8969 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8970 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 8971 config.vsif_supported = true;
180db303 8972 config.btr = true;
98e6436d 8973
a85ba005
NC
8974 if (fs_vid_mode) {
8975 config.state = VRR_STATE_ACTIVE_FIXED;
8976 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8977 goto out;
8978 } else if (new_crtc_state->base.vrr_enabled) {
8979 config.state = VRR_STATE_ACTIVE_VARIABLE;
8980 } else {
8981 config.state = VRR_STATE_INACTIVE;
8982 }
8983 }
8984out:
bb47de73
NK
8985 new_crtc_state->freesync_config = config;
8986}
98e6436d 8987
bb47de73
NK
8988static void reset_freesync_config_for_crtc(
8989 struct dm_crtc_state *new_crtc_state)
8990{
8991 new_crtc_state->vrr_supported = false;
98e6436d 8992
bb47de73
NK
8993 memset(&new_crtc_state->vrr_infopacket, 0,
8994 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8995}
8996
a85ba005
NC
8997static bool
8998is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8999 struct drm_crtc_state *new_crtc_state)
9000{
1cbd7887 9001 const struct drm_display_mode *old_mode, *new_mode;
a85ba005
NC
9002
9003 if (!old_crtc_state || !new_crtc_state)
9004 return false;
9005
1cbd7887
VS
9006 old_mode = &old_crtc_state->mode;
9007 new_mode = &new_crtc_state->mode;
9008
9009 if (old_mode->clock == new_mode->clock &&
9010 old_mode->hdisplay == new_mode->hdisplay &&
9011 old_mode->vdisplay == new_mode->vdisplay &&
9012 old_mode->htotal == new_mode->htotal &&
9013 old_mode->vtotal != new_mode->vtotal &&
9014 old_mode->hsync_start == new_mode->hsync_start &&
9015 old_mode->vsync_start != new_mode->vsync_start &&
9016 old_mode->hsync_end == new_mode->hsync_end &&
9017 old_mode->vsync_end != new_mode->vsync_end &&
9018 old_mode->hskew == new_mode->hskew &&
9019 old_mode->vscan == new_mode->vscan &&
9020 (old_mode->vsync_end - old_mode->vsync_start) ==
9021 (new_mode->vsync_end - new_mode->vsync_start))
a85ba005
NC
9022 return true;
9023
9024 return false;
9025}
9026
9027static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9028 uint64_t num, den, res;
9029 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9030
9031 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9032
9033 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9034 den = (unsigned long long)new_crtc_state->mode.htotal *
9035 (unsigned long long)new_crtc_state->mode.vtotal;
9036
9037 res = div_u64(num, den);
9038 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9039}
9040
f11d9373 9041static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
17ce8a69
RL
9042 struct drm_atomic_state *state,
9043 struct drm_crtc *crtc,
9044 struct drm_crtc_state *old_crtc_state,
9045 struct drm_crtc_state *new_crtc_state,
9046 bool enable,
9047 bool *lock_and_validation_needed)
e7b07cee 9048{
eb3dc897 9049 struct dm_atomic_state *dm_state = NULL;
54d76575 9050 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9051 struct dc_stream_state *new_stream;
62f55537 9052 int ret = 0;
d4d4a645 9053
1f6010a9
DF
9054 /*
9055 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9056 * update changed items
9057 */
4b9674e5
LL
9058 struct amdgpu_crtc *acrtc = NULL;
9059 struct amdgpu_dm_connector *aconnector = NULL;
9060 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9061 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9062
4b9674e5 9063 new_stream = NULL;
9635b754 9064
4b9674e5
LL
9065 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9066 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9067 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9068 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9069
4b9674e5
LL
9070 /* TODO This hack should go away */
9071 if (aconnector && enable) {
9072 /* Make sure fake sink is created in plug-in scenario */
9073 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9074 &aconnector->base);
9075 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9076 &aconnector->base);
19f89e23 9077
4b9674e5
LL
9078 if (IS_ERR(drm_new_conn_state)) {
9079 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9080 goto fail;
9081 }
19f89e23 9082
4b9674e5
LL
9083 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9084 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9085
02d35a67
JFZ
9086 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9087 goto skip_modeset;
9088
cbd14ae7
SW
9089 new_stream = create_validate_stream_for_sink(aconnector,
9090 &new_crtc_state->mode,
9091 dm_new_conn_state,
9092 dm_old_crtc_state->stream);
19f89e23 9093
4b9674e5
LL
9094 /*
9095 * we can have no stream on ACTION_SET if a display
9096 * was disconnected during S3, in this case it is not an
9097 * error, the OS will be updated after detection, and
9098 * will do the right thing on next atomic commit
9099 */
19f89e23 9100
4b9674e5
LL
9101 if (!new_stream) {
9102 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9103 __func__, acrtc->base.base.id);
9104 ret = -ENOMEM;
9105 goto fail;
9106 }
e7b07cee 9107
3d4e52d0
VL
9108 /*
9109 * TODO: Check VSDB bits to decide whether this should
9110 * be enabled or not.
9111 */
9112 new_stream->triggered_crtc_reset.enabled =
9113 dm->force_timing_sync;
9114
4b9674e5 9115 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9116
88694af9
NK
9117 ret = fill_hdr_info_packet(drm_new_conn_state,
9118 &new_stream->hdr_static_metadata);
9119 if (ret)
9120 goto fail;
9121
7e930949
NK
9122 /*
9123 * If we already removed the old stream from the context
9124 * (and set the new stream to NULL) then we can't reuse
9125 * the old stream even if the stream and scaling are unchanged.
9126 * We'll hit the BUG_ON and black screen.
9127 *
9128 * TODO: Refactor this function to allow this check to work
9129 * in all conditions.
9130 */
de05abe6 9131 if (dm_new_crtc_state->stream &&
a85ba005
NC
9132 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9133 goto skip_modeset;
9134
7e930949
NK
9135 if (dm_new_crtc_state->stream &&
9136 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9137 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9138 new_crtc_state->mode_changed = false;
9139 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9140 new_crtc_state->mode_changed);
62f55537 9141 }
4b9674e5 9142 }
b830ebc9 9143
02d35a67 9144 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9145 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9146 goto skip_modeset;
e7b07cee 9147
9f07550b 9148 drm_dbg_state(state->dev,
4b9674e5
LL
9149 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9150 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9151 "connectors_changed:%d\n",
9152 acrtc->crtc_id,
9153 new_crtc_state->enable,
9154 new_crtc_state->active,
9155 new_crtc_state->planes_changed,
9156 new_crtc_state->mode_changed,
9157 new_crtc_state->active_changed,
9158 new_crtc_state->connectors_changed);
62f55537 9159
4b9674e5
LL
9160 /* Remove stream for any changed/disabled CRTC */
9161 if (!enable) {
62f55537 9162
4b9674e5
LL
9163 if (!dm_old_crtc_state->stream)
9164 goto skip_modeset;
eb3dc897 9165
de05abe6 9166 if (dm_new_crtc_state->stream &&
a85ba005
NC
9167 is_timing_unchanged_for_freesync(new_crtc_state,
9168 old_crtc_state)) {
9169 new_crtc_state->mode_changed = false;
9170 DRM_DEBUG_DRIVER(
9171 "Mode change not required for front porch change, "
9172 "setting mode_changed to %d",
9173 new_crtc_state->mode_changed);
9174
9175 set_freesync_fixed_config(dm_new_crtc_state);
9176
9177 goto skip_modeset;
de05abe6 9178 } else if (aconnector &&
a85ba005
NC
9179 is_freesync_video_mode(&new_crtc_state->mode,
9180 aconnector)) {
e88ebd83
SC
9181 struct drm_display_mode *high_mode;
9182
9183 high_mode = get_highest_refresh_rate_mode(aconnector, false);
9184 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9185 set_freesync_fixed_config(dm_new_crtc_state);
9186 }
a85ba005
NC
9187 }
9188
4b9674e5
LL
9189 ret = dm_atomic_get_state(state, &dm_state);
9190 if (ret)
9191 goto fail;
e7b07cee 9192
4b9674e5
LL
9193 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9194 crtc->base.id);
62f55537 9195
4b9674e5
LL
9196 /* i.e. reset mode */
9197 if (dc_remove_stream_from_ctx(
9198 dm->dc,
9199 dm_state->context,
9200 dm_old_crtc_state->stream) != DC_OK) {
9201 ret = -EINVAL;
9202 goto fail;
9203 }
62f55537 9204
4b9674e5
LL
9205 dc_stream_release(dm_old_crtc_state->stream);
9206 dm_new_crtc_state->stream = NULL;
bb47de73 9207
4b9674e5 9208 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9209
4b9674e5 9210 *lock_and_validation_needed = true;
62f55537 9211
4b9674e5
LL
9212 } else {/* Add stream for any updated/enabled CRTC */
9213 /*
9214 * Quick fix to prevent NULL pointer on new_stream when
9215 * added MST connectors not found in existing crtc_state in the chained mode
9216 * TODO: need to dig out the root cause of that
9217 */
84a8b390 9218 if (!aconnector)
4b9674e5 9219 goto skip_modeset;
62f55537 9220
4b9674e5
LL
9221 if (modereset_required(new_crtc_state))
9222 goto skip_modeset;
62f55537 9223
4b9674e5
LL
9224 if (modeset_required(new_crtc_state, new_stream,
9225 dm_old_crtc_state->stream)) {
62f55537 9226
4b9674e5 9227 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9228
4b9674e5
LL
9229 ret = dm_atomic_get_state(state, &dm_state);
9230 if (ret)
9231 goto fail;
27b3f4fc 9232
4b9674e5 9233 dm_new_crtc_state->stream = new_stream;
62f55537 9234
4b9674e5 9235 dc_stream_retain(new_stream);
1dc90497 9236
4711c033
LT
9237 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9238 crtc->base.id);
1dc90497 9239
4b9674e5
LL
9240 if (dc_add_stream_to_ctx(
9241 dm->dc,
9242 dm_state->context,
9243 dm_new_crtc_state->stream) != DC_OK) {
9244 ret = -EINVAL;
9245 goto fail;
9b690ef3
BL
9246 }
9247
4b9674e5
LL
9248 *lock_and_validation_needed = true;
9249 }
9250 }
e277adc5 9251
4b9674e5
LL
9252skip_modeset:
9253 /* Release extra reference */
9254 if (new_stream)
9255 dc_stream_release(new_stream);
e277adc5 9256
4b9674e5
LL
9257 /*
9258 * We want to do dc stream updates that do not require a
9259 * full modeset below.
9260 */
2afda735 9261 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9262 return 0;
9263 /*
9264 * Given above conditions, the dc state cannot be NULL because:
9265 * 1. We're in the process of enabling CRTCs (just been added
9266 * to the dc context, or already is on the context)
9267 * 2. Has a valid connector attached, and
9268 * 3. Is currently active and enabled.
9269 * => The dc stream state currently exists.
9270 */
9271 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9272
4b9674e5 9273 /* Scaling or underscan settings */
c521fc31
RL
9274 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9275 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
9276 update_stream_scaling_settings(
9277 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9278
b05e2c5e
DF
9279 /* ABM settings */
9280 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9281
4b9674e5
LL
9282 /*
9283 * Color management settings. We also update color properties
9284 * when a modeset is needed, to ensure it gets reprogrammed.
9285 */
9286 if (dm_new_crtc_state->base.color_mgmt_changed ||
9287 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9288 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9289 if (ret)
9290 goto fail;
62f55537 9291 }
e7b07cee 9292
4b9674e5
LL
9293 /* Update Freesync settings. */
9294 get_freesync_config_for_crtc(dm_new_crtc_state,
9295 dm_new_conn_state);
9296
62f55537 9297 return ret;
9635b754
DS
9298
9299fail:
9300 if (new_stream)
9301 dc_stream_release(new_stream);
9302 return ret;
62f55537 9303}
9b690ef3 9304
f6ff2a08
NK
9305static bool should_reset_plane(struct drm_atomic_state *state,
9306 struct drm_plane *plane,
9307 struct drm_plane_state *old_plane_state,
9308 struct drm_plane_state *new_plane_state)
9309{
9310 struct drm_plane *other;
9311 struct drm_plane_state *old_other_state, *new_other_state;
9312 struct drm_crtc_state *new_crtc_state;
9313 int i;
9314
70a1efac
NK
9315 /*
9316 * TODO: Remove this hack once the checks below are sufficient
9317 * enough to determine when we need to reset all the planes on
9318 * the stream.
9319 */
9320 if (state->allow_modeset)
9321 return true;
9322
f6ff2a08
NK
9323 /* Exit early if we know that we're adding or removing the plane. */
9324 if (old_plane_state->crtc != new_plane_state->crtc)
9325 return true;
9326
9327 /* old crtc == new_crtc == NULL, plane not in context. */
9328 if (!new_plane_state->crtc)
9329 return false;
9330
9331 new_crtc_state =
9332 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9333
9334 if (!new_crtc_state)
9335 return true;
9336
7316c4ad
NK
9337 /* CRTC Degamma changes currently require us to recreate planes. */
9338 if (new_crtc_state->color_mgmt_changed)
9339 return true;
9340
f6ff2a08
NK
9341 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9342 return true;
9343
9344 /*
9345 * If there are any new primary or overlay planes being added or
9346 * removed then the z-order can potentially change. To ensure
9347 * correct z-order and pipe acquisition the current DC architecture
9348 * requires us to remove and recreate all existing planes.
9349 *
9350 * TODO: Come up with a more elegant solution for this.
9351 */
9352 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9353 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9354 if (other->type == DRM_PLANE_TYPE_CURSOR)
9355 continue;
9356
9357 if (old_other_state->crtc != new_plane_state->crtc &&
9358 new_other_state->crtc != new_plane_state->crtc)
9359 continue;
9360
9361 if (old_other_state->crtc != new_other_state->crtc)
9362 return true;
9363
dc4cb30d
NK
9364 /* Src/dst size and scaling updates. */
9365 if (old_other_state->src_w != new_other_state->src_w ||
9366 old_other_state->src_h != new_other_state->src_h ||
9367 old_other_state->crtc_w != new_other_state->crtc_w ||
9368 old_other_state->crtc_h != new_other_state->crtc_h)
9369 return true;
9370
9371 /* Rotation / mirroring updates. */
9372 if (old_other_state->rotation != new_other_state->rotation)
9373 return true;
9374
9375 /* Blending updates. */
9376 if (old_other_state->pixel_blend_mode !=
9377 new_other_state->pixel_blend_mode)
9378 return true;
9379
9380 /* Alpha updates. */
9381 if (old_other_state->alpha != new_other_state->alpha)
9382 return true;
9383
9384 /* Colorspace changes. */
9385 if (old_other_state->color_range != new_other_state->color_range ||
9386 old_other_state->color_encoding != new_other_state->color_encoding)
9387 return true;
9388
9a81cc60
NK
9389 /* Framebuffer checks fall at the end. */
9390 if (!old_other_state->fb || !new_other_state->fb)
9391 continue;
9392
9393 /* Pixel format changes can require bandwidth updates. */
9394 if (old_other_state->fb->format != new_other_state->fb->format)
9395 return true;
9396
6eed95b0
BN
9397 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9398 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9399
9400 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9401 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9402 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9403 return true;
9404 }
9405
9406 return false;
9407}
9408
b0455fda
SS
9409static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9410 struct drm_plane_state *new_plane_state,
9411 struct drm_framebuffer *fb)
9412{
e72868c4
SS
9413 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9414 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9415 unsigned int pitch;
e72868c4 9416 bool linear;
b0455fda
SS
9417
9418 if (fb->width > new_acrtc->max_cursor_width ||
9419 fb->height > new_acrtc->max_cursor_height) {
9420 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9421 new_plane_state->fb->width,
9422 new_plane_state->fb->height);
9423 return -EINVAL;
9424 }
9425 if (new_plane_state->src_w != fb->width << 16 ||
9426 new_plane_state->src_h != fb->height << 16) {
9427 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9428 return -EINVAL;
9429 }
9430
9431 /* Pitch in pixels */
9432 pitch = fb->pitches[0] / fb->format->cpp[0];
9433
9434 if (fb->width != pitch) {
9435 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9436 fb->width, pitch);
9437 return -EINVAL;
9438 }
9439
9440 switch (pitch) {
9441 case 64:
9442 case 128:
9443 case 256:
9444 /* FB pitch is supported by cursor plane */
9445 break;
9446 default:
9447 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9448 return -EINVAL;
9449 }
9450
e72868c4
SS
9451 /* Core DRM takes care of checking FB modifiers, so we only need to
9452 * check tiling flags when the FB doesn't have a modifier. */
9453 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9454 if (adev->family < AMDGPU_FAMILY_AI) {
9455 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9456 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9457 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9458 } else {
9459 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9460 }
9461 if (!linear) {
9462 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9463 return -EINVAL;
9464 }
9465 }
9466
b0455fda
SS
9467 return 0;
9468}
9469
9e869063
LL
9470static int dm_update_plane_state(struct dc *dc,
9471 struct drm_atomic_state *state,
9472 struct drm_plane *plane,
9473 struct drm_plane_state *old_plane_state,
9474 struct drm_plane_state *new_plane_state,
9475 bool enable,
9476 bool *lock_and_validation_needed)
62f55537 9477{
eb3dc897
NK
9478
9479 struct dm_atomic_state *dm_state = NULL;
62f55537 9480 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9481 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9482 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9483 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9484 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9485 bool needs_reset;
62f55537 9486 int ret = 0;
e7b07cee 9487
9b690ef3 9488
9e869063
LL
9489 new_plane_crtc = new_plane_state->crtc;
9490 old_plane_crtc = old_plane_state->crtc;
9491 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9492 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9493
626bf90f
SS
9494 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9495 if (!enable || !new_plane_crtc ||
9496 drm_atomic_plane_disabling(plane->state, new_plane_state))
9497 return 0;
9498
9499 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9500
5f581248
SS
9501 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9502 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9503 return -EINVAL;
9504 }
9505
24f99d2b 9506 if (new_plane_state->fb) {
b0455fda
SS
9507 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9508 new_plane_state->fb);
9509 if (ret)
9510 return ret;
24f99d2b
SS
9511 }
9512
9e869063 9513 return 0;
626bf90f 9514 }
9b690ef3 9515
f6ff2a08
NK
9516 needs_reset = should_reset_plane(state, plane, old_plane_state,
9517 new_plane_state);
9518
9e869063
LL
9519 /* Remove any changed/removed planes */
9520 if (!enable) {
f6ff2a08 9521 if (!needs_reset)
9e869063 9522 return 0;
a7b06724 9523
9e869063
LL
9524 if (!old_plane_crtc)
9525 return 0;
62f55537 9526
9e869063
LL
9527 old_crtc_state = drm_atomic_get_old_crtc_state(
9528 state, old_plane_crtc);
9529 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9530
9e869063
LL
9531 if (!dm_old_crtc_state->stream)
9532 return 0;
62f55537 9533
9e869063
LL
9534 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9535 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9536
9e869063
LL
9537 ret = dm_atomic_get_state(state, &dm_state);
9538 if (ret)
9539 return ret;
eb3dc897 9540
9e869063
LL
9541 if (!dc_remove_plane_from_context(
9542 dc,
9543 dm_old_crtc_state->stream,
9544 dm_old_plane_state->dc_state,
9545 dm_state->context)) {
62f55537 9546
c3537613 9547 return -EINVAL;
9e869063 9548 }
e7b07cee 9549
9b690ef3 9550
9e869063
LL
9551 dc_plane_state_release(dm_old_plane_state->dc_state);
9552 dm_new_plane_state->dc_state = NULL;
1dc90497 9553
9e869063 9554 *lock_and_validation_needed = true;
1dc90497 9555
9e869063
LL
9556 } else { /* Add new planes */
9557 struct dc_plane_state *dc_new_plane_state;
1dc90497 9558
9e869063
LL
9559 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9560 return 0;
e7b07cee 9561
9e869063
LL
9562 if (!new_plane_crtc)
9563 return 0;
e7b07cee 9564
9e869063
LL
9565 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9566 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9567
9e869063
LL
9568 if (!dm_new_crtc_state->stream)
9569 return 0;
62f55537 9570
f6ff2a08 9571 if (!needs_reset)
9e869063 9572 return 0;
62f55537 9573
8c44515b
AP
9574 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9575 if (ret)
9576 return ret;
9577
9e869063 9578 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9579
9e869063
LL
9580 dc_new_plane_state = dc_create_plane_state(dc);
9581 if (!dc_new_plane_state)
9582 return -ENOMEM;
62f55537 9583
4711c033
LT
9584 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9585 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9586
695af5f9 9587 ret = fill_dc_plane_attributes(
1348969a 9588 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9589 dc_new_plane_state,
9590 new_plane_state,
9591 new_crtc_state);
9592 if (ret) {
9593 dc_plane_state_release(dc_new_plane_state);
9594 return ret;
9595 }
62f55537 9596
9e869063
LL
9597 ret = dm_atomic_get_state(state, &dm_state);
9598 if (ret) {
9599 dc_plane_state_release(dc_new_plane_state);
9600 return ret;
9601 }
eb3dc897 9602
9e869063
LL
9603 /*
9604 * Any atomic check errors that occur after this will
9605 * not need a release. The plane state will be attached
9606 * to the stream, and therefore part of the atomic
9607 * state. It'll be released when the atomic state is
9608 * cleaned.
9609 */
9610 if (!dc_add_plane_to_context(
9611 dc,
9612 dm_new_crtc_state->stream,
9613 dc_new_plane_state,
9614 dm_state->context)) {
62f55537 9615
9e869063
LL
9616 dc_plane_state_release(dc_new_plane_state);
9617 return -EINVAL;
9618 }
8c45c5db 9619
9e869063 9620 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9621
214993e1
ML
9622 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9623
9e869063
LL
9624 /* Tell DC to do a full surface update every time there
9625 * is a plane change. Inefficient, but works for now.
9626 */
9627 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9628
9629 *lock_and_validation_needed = true;
62f55537 9630 }
e7b07cee
HW
9631
9632
62f55537
AG
9633 return ret;
9634}
a87fa993 9635
69cb5629
VZ
9636static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9637 int *src_w, int *src_h)
9638{
9639 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9640 case DRM_MODE_ROTATE_90:
9641 case DRM_MODE_ROTATE_270:
9642 *src_w = plane_state->src_h >> 16;
9643 *src_h = plane_state->src_w >> 16;
9644 break;
9645 case DRM_MODE_ROTATE_0:
9646 case DRM_MODE_ROTATE_180:
9647 default:
9648 *src_w = plane_state->src_w >> 16;
9649 *src_h = plane_state->src_h >> 16;
9650 break;
9651 }
9652}
9653
12f4849a
SS
9654static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9655 struct drm_crtc *crtc,
9656 struct drm_crtc_state *new_crtc_state)
9657{
d1bfbe8a
SS
9658 struct drm_plane *cursor = crtc->cursor, *underlying;
9659 struct drm_plane_state *new_cursor_state, *new_underlying_state;
9660 int i;
9661 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
9662 int cursor_src_w, cursor_src_h;
9663 int underlying_src_w, underlying_src_h;
12f4849a
SS
9664
9665 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9666 * cursor per pipe but it's going to inherit the scaling and
9667 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 9668 * blending properties match the underlying planes'. */
12f4849a 9669
d1bfbe8a
SS
9670 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9671 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
9672 return 0;
9673 }
9674
69cb5629
VZ
9675 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9676 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9677 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 9678
d1bfbe8a
SS
9679 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9680 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9681 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9682 continue;
12f4849a 9683
d1bfbe8a
SS
9684 /* Ignore disabled planes */
9685 if (!new_underlying_state->fb)
9686 continue;
9687
69cb5629
VZ
9688 dm_get_oriented_plane_size(new_underlying_state,
9689 &underlying_src_w, &underlying_src_h);
9690 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9691 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
9692
9693 if (cursor_scale_w != underlying_scale_w ||
9694 cursor_scale_h != underlying_scale_h) {
9695 drm_dbg_atomic(crtc->dev,
9696 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9697 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9698 return -EINVAL;
9699 }
9700
9701 /* If this plane covers the whole CRTC, no need to check planes underneath */
9702 if (new_underlying_state->crtc_x <= 0 &&
9703 new_underlying_state->crtc_y <= 0 &&
9704 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9705 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9706 break;
12f4849a
SS
9707 }
9708
9709 return 0;
9710}
9711
e10517b3 9712#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9713static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9714{
9715 struct drm_connector *connector;
128f8ed5 9716 struct drm_connector_state *conn_state, *old_conn_state;
44be939f
ML
9717 struct amdgpu_dm_connector *aconnector = NULL;
9718 int i;
128f8ed5
RL
9719 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9720 if (!conn_state->crtc)
9721 conn_state = old_conn_state;
9722
44be939f
ML
9723 if (conn_state->crtc != crtc)
9724 continue;
9725
9726 aconnector = to_amdgpu_dm_connector(connector);
9727 if (!aconnector->port || !aconnector->mst_port)
9728 aconnector = NULL;
9729 else
9730 break;
9731 }
9732
9733 if (!aconnector)
9734 return 0;
9735
9736 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9737}
e10517b3 9738#endif
44be939f 9739
b8592b48
LL
9740/**
9741 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9742 * @dev: The DRM device
9743 * @state: The atomic state to commit
9744 *
9745 * Validate that the given atomic state is programmable by DC into hardware.
9746 * This involves constructing a &struct dc_state reflecting the new hardware
9747 * state we wish to commit, then querying DC to see if it is programmable. It's
9748 * important not to modify the existing DC state. Otherwise, atomic_check
9749 * may unexpectedly commit hardware changes.
9750 *
9751 * When validating the DC state, it's important that the right locks are
9752 * acquired. For full updates case which removes/adds/updates streams on one
9753 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9754 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9755 * flip using DRMs synchronization events.
b8592b48
LL
9756 *
9757 * Note that DM adds the affected connectors for all CRTCs in state, when that
9758 * might not seem necessary. This is because DC stream creation requires the
9759 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9760 * be possible but non-trivial - a possible TODO item.
9761 *
9762 * Return: -Error code if validation failed.
9763 */
7578ecda
AD
9764static int amdgpu_dm_atomic_check(struct drm_device *dev,
9765 struct drm_atomic_state *state)
62f55537 9766{
1348969a 9767 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9768 struct dm_atomic_state *dm_state = NULL;
62f55537 9769 struct dc *dc = adev->dm.dc;
62f55537 9770 struct drm_connector *connector;
c2cea706 9771 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9772 struct drm_crtc *crtc;
fc9e9920 9773 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9774 struct drm_plane *plane;
9775 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9776 enum dc_status status;
1e88ad0a 9777 int ret, i;
62f55537 9778 bool lock_and_validation_needed = false;
214993e1 9779 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6513104b
HW
9780#if defined(CONFIG_DRM_AMD_DC_DCN)
9781 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
9782 struct drm_dp_mst_topology_state *mst_state;
9783 struct drm_dp_mst_topology_mgr *mgr;
6513104b 9784#endif
62f55537 9785
e8a98235 9786 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9787
62f55537 9788 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
9789 if (ret) {
9790 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 9791 goto fail;
68ca1c3e 9792 }
62f55537 9793
c5892a10
SW
9794 /* Check connector changes */
9795 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9796 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9797 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9798
9799 /* Skip connectors that are disabled or part of modeset already. */
9800 if (!old_con_state->crtc && !new_con_state->crtc)
9801 continue;
9802
9803 if (!new_con_state->crtc)
9804 continue;
9805
9806 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9807 if (IS_ERR(new_crtc_state)) {
68ca1c3e 9808 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
9809 ret = PTR_ERR(new_crtc_state);
9810 goto fail;
9811 }
9812
9813 if (dm_old_con_state->abm_level !=
9814 dm_new_con_state->abm_level)
9815 new_crtc_state->connectors_changed = true;
9816 }
9817
e10517b3 9818#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 9819 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
9820 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9821 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9822 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
9823 if (ret) {
9824 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 9825 goto fail;
68ca1c3e 9826 }
44be939f
ML
9827 }
9828 }
71be4b16 9829 if (!pre_validate_dsc(state, &dm_state, vars)) {
9830 ret = -EINVAL;
9831 goto fail;
9832 }
44be939f 9833 }
e10517b3 9834#endif
1e88ad0a 9835 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9836 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9837
1e88ad0a 9838 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9839 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9840 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9841 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9842 continue;
7bef1af3 9843
03fc4cf4 9844 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
9845 if (ret) {
9846 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 9847 goto fail;
68ca1c3e 9848 }
03fc4cf4 9849
1e88ad0a
S
9850 if (!new_crtc_state->enable)
9851 continue;
fc9e9920 9852
1e88ad0a 9853 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
9854 if (ret) {
9855 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 9856 goto fail;
68ca1c3e 9857 }
fc9e9920 9858
1e88ad0a 9859 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
9860 if (ret) {
9861 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 9862 goto fail;
68ca1c3e 9863 }
115a385c 9864
cbac53f7 9865 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9866 new_crtc_state->mode_changed = true;
e7b07cee
HW
9867 }
9868
2d9e6431
NK
9869 /*
9870 * Add all primary and overlay planes on the CRTC to the state
9871 * whenever a plane is enabled to maintain correct z-ordering
9872 * and to enable fast surface updates.
9873 */
9874 drm_for_each_crtc(crtc, dev) {
9875 bool modified = false;
9876
9877 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9878 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9879 continue;
9880
9881 if (new_plane_state->crtc == crtc ||
9882 old_plane_state->crtc == crtc) {
9883 modified = true;
9884 break;
9885 }
9886 }
9887
9888 if (!modified)
9889 continue;
9890
9891 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9892 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9893 continue;
9894
9895 new_plane_state =
9896 drm_atomic_get_plane_state(state, plane);
9897
9898 if (IS_ERR(new_plane_state)) {
9899 ret = PTR_ERR(new_plane_state);
68ca1c3e 9900 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
9901 goto fail;
9902 }
9903 }
9904 }
9905
62f55537 9906 /* Remove exiting planes if they are modified */
9e869063
LL
9907 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9908 ret = dm_update_plane_state(dc, state, plane,
9909 old_plane_state,
9910 new_plane_state,
9911 false,
9912 &lock_and_validation_needed);
68ca1c3e
S
9913 if (ret) {
9914 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 9915 goto fail;
68ca1c3e 9916 }
62f55537
AG
9917 }
9918
9919 /* Disable all crtcs which require disable */
4b9674e5
LL
9920 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9921 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9922 old_crtc_state,
9923 new_crtc_state,
9924 false,
9925 &lock_and_validation_needed);
68ca1c3e
S
9926 if (ret) {
9927 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 9928 goto fail;
68ca1c3e 9929 }
62f55537
AG
9930 }
9931
9932 /* Enable all crtcs which require enable */
4b9674e5
LL
9933 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9934 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9935 old_crtc_state,
9936 new_crtc_state,
9937 true,
9938 &lock_and_validation_needed);
68ca1c3e
S
9939 if (ret) {
9940 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 9941 goto fail;
68ca1c3e 9942 }
62f55537
AG
9943 }
9944
9945 /* Add new/modified planes */
9e869063
LL
9946 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9947 ret = dm_update_plane_state(dc, state, plane,
9948 old_plane_state,
9949 new_plane_state,
9950 true,
9951 &lock_and_validation_needed);
68ca1c3e
S
9952 if (ret) {
9953 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 9954 goto fail;
68ca1c3e 9955 }
62f55537
AG
9956 }
9957
b349f76e
ES
9958 /* Run this here since we want to validate the streams we created */
9959 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
9960 if (ret) {
9961 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 9962 goto fail;
68ca1c3e 9963 }
62f55537 9964
214993e1
ML
9965 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9966 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9967 if (dm_new_crtc_state->mpo_requested)
9968 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9969 }
9970
12f4849a
SS
9971 /* Check cursor planes scaling */
9972 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9973 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
9974 if (ret) {
9975 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 9976 goto fail;
68ca1c3e 9977 }
12f4849a
SS
9978 }
9979
43d10d30
NK
9980 if (state->legacy_cursor_update) {
9981 /*
9982 * This is a fast cursor update coming from the plane update
9983 * helper, check if it can be done asynchronously for better
9984 * performance.
9985 */
9986 state->async_update =
9987 !drm_atomic_helper_async_check(dev, state);
9988
9989 /*
9990 * Skip the remaining global validation if this is an async
9991 * update. Cursor updates can be done without affecting
9992 * state or bandwidth calcs and this avoids the performance
9993 * penalty of locking the private state object and
9994 * allocating a new dc_state.
9995 */
9996 if (state->async_update)
9997 return 0;
9998 }
9999
ebdd27e1 10000 /* Check scaling and underscan changes*/
1f6010a9 10001 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10002 * new stream into context w\o causing full reset. Need to
10003 * decide how to handle.
10004 */
c2cea706 10005 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10006 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10007 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10008 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10009
10010 /* Skip any modesets/resets */
0bc9706d
LSL
10011 if (!acrtc || drm_atomic_crtc_needs_modeset(
10012 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10013 continue;
10014
b830ebc9 10015 /* Skip any thing not scale or underscan changes */
54d76575 10016 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10017 continue;
10018
10019 lock_and_validation_needed = true;
10020 }
10021
41724ea2
BL
10022#if defined(CONFIG_DRM_AMD_DC_DCN)
10023 /* set the slot info for each mst_state based on the link encoding format */
10024 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10025 struct amdgpu_dm_connector *aconnector;
10026 struct drm_connector *connector;
10027 struct drm_connector_list_iter iter;
10028 u8 link_coding_cap;
10029
10030 if (!mgr->mst_state )
10031 continue;
10032
10033 drm_connector_list_iter_begin(dev, &iter);
10034 drm_for_each_connector_iter(connector, &iter) {
10035 int id = connector->index;
10036
10037 if (id == mst_state->mgr->conn_base_id) {
10038 aconnector = to_amdgpu_dm_connector(connector);
10039 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10040 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10041
10042 break;
10043 }
10044 }
10045 drm_connector_list_iter_end(&iter);
10046
10047 }
10048#endif
f6d7c7fa
NK
10049 /**
10050 * Streams and planes are reset when there are changes that affect
10051 * bandwidth. Anything that affects bandwidth needs to go through
10052 * DC global validation to ensure that the configuration can be applied
10053 * to hardware.
10054 *
10055 * We have to currently stall out here in atomic_check for outstanding
10056 * commits to finish in this case because our IRQ handlers reference
10057 * DRM state directly - we can end up disabling interrupts too early
10058 * if we don't.
10059 *
10060 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10061 */
f6d7c7fa 10062 if (lock_and_validation_needed) {
eb3dc897 10063 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
10064 if (ret) {
10065 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 10066 goto fail;
68ca1c3e 10067 }
e7b07cee
HW
10068
10069 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
10070 if (ret) {
10071 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 10072 goto fail;
68ca1c3e 10073 }
1dc90497 10074
d9fe1a4c 10075#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
10076 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
10077 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
71be4b16 10078 ret = -EINVAL;
8c20a1ed 10079 goto fail;
68ca1c3e 10080 }
8c20a1ed 10081
6513104b 10082 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
10083 if (ret) {
10084 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 10085 goto fail;
68ca1c3e 10086 }
d9fe1a4c 10087#endif
29b9ba74 10088
ded58c7b
ZL
10089 /*
10090 * Perform validation of MST topology in the state:
10091 * We need to perform MST atomic check before calling
10092 * dc_validate_global_state(), or there is a chance
10093 * to get stuck in an infinite loop and hang eventually.
10094 */
10095 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
10096 if (ret) {
10097 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 10098 goto fail;
68ca1c3e 10099 }
85fb8bb9 10100 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 10101 if (status != DC_OK) {
68ca1c3e 10102 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 10103 dc_status_to_str(status), status);
e7b07cee
HW
10104 ret = -EINVAL;
10105 goto fail;
10106 }
bd200d19 10107 } else {
674e78ac 10108 /*
bd200d19
NK
10109 * The commit is a fast update. Fast updates shouldn't change
10110 * the DC context, affect global validation, and can have their
10111 * commit work done in parallel with other commits not touching
10112 * the same resource. If we have a new DC context as part of
10113 * the DM atomic state from validation we need to free it and
10114 * retain the existing one instead.
fde9f39a
MR
10115 *
10116 * Furthermore, since the DM atomic state only contains the DC
10117 * context and can safely be annulled, we can free the state
10118 * and clear the associated private object now to free
10119 * some memory and avoid a possible use-after-free later.
674e78ac 10120 */
bd200d19 10121
fde9f39a
MR
10122 for (i = 0; i < state->num_private_objs; i++) {
10123 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10124
fde9f39a
MR
10125 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10126 int j = state->num_private_objs-1;
bd200d19 10127
fde9f39a
MR
10128 dm_atomic_destroy_state(obj,
10129 state->private_objs[i].state);
10130
10131 /* If i is not at the end of the array then the
10132 * last element needs to be moved to where i was
10133 * before the array can safely be truncated.
10134 */
10135 if (i != j)
10136 state->private_objs[i] =
10137 state->private_objs[j];
bd200d19 10138
fde9f39a
MR
10139 state->private_objs[j].ptr = NULL;
10140 state->private_objs[j].state = NULL;
10141 state->private_objs[j].old_state = NULL;
10142 state->private_objs[j].new_state = NULL;
10143
10144 state->num_private_objs = j;
10145 break;
10146 }
bd200d19 10147 }
e7b07cee
HW
10148 }
10149
caff0e66
NK
10150 /* Store the overall update type for use later in atomic check. */
10151 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10152 struct dm_crtc_state *dm_new_crtc_state =
10153 to_dm_crtc_state(new_crtc_state);
10154
f6d7c7fa
NK
10155 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10156 UPDATE_TYPE_FULL :
10157 UPDATE_TYPE_FAST;
e7b07cee
HW
10158 }
10159
10160 /* Must be success */
10161 WARN_ON(ret);
e8a98235
RS
10162
10163 trace_amdgpu_dm_atomic_check_finish(state, ret);
10164
e7b07cee
HW
10165 return ret;
10166
10167fail:
10168 if (ret == -EDEADLK)
01e28f9c 10169 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10170 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10171 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10172 else
01e28f9c 10173 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10174
e8a98235
RS
10175 trace_amdgpu_dm_atomic_check_finish(state, ret);
10176
e7b07cee
HW
10177 return ret;
10178}
10179
3ee6b26b
AD
10180static bool is_dp_capable_without_timing_msa(struct dc *dc,
10181 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10182{
10183 uint8_t dpcd_data;
10184 bool capable = false;
10185
c84dec2f 10186 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10187 dm_helpers_dp_read_dpcd(
10188 NULL,
c84dec2f 10189 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10190 DP_DOWN_STREAM_PORT_COUNT,
10191 &dpcd_data,
10192 sizeof(dpcd_data))) {
10193 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10194 }
10195
10196 return capable;
10197}
f9b4f20c 10198
46db138d
SW
10199static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10200 unsigned int offset,
10201 unsigned int total_length,
10202 uint8_t *data,
10203 unsigned int length,
10204 struct amdgpu_hdmi_vsdb_info *vsdb)
10205{
10206 bool res;
10207 union dmub_rb_cmd cmd;
10208 struct dmub_cmd_send_edid_cea *input;
10209 struct dmub_cmd_edid_cea_output *output;
10210
10211 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10212 return false;
10213
10214 memset(&cmd, 0, sizeof(cmd));
10215
10216 input = &cmd.edid_cea.data.input;
10217
10218 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10219 cmd.edid_cea.header.sub_type = 0;
10220 cmd.edid_cea.header.payload_bytes =
10221 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10222 input->offset = offset;
10223 input->length = length;
eb9e59eb 10224 input->cea_total_length = total_length;
46db138d
SW
10225 memcpy(input->payload, data, length);
10226
10227 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10228 if (!res) {
10229 DRM_ERROR("EDID CEA parser failed\n");
10230 return false;
10231 }
10232
10233 output = &cmd.edid_cea.data.output;
10234
10235 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10236 if (!output->ack.success) {
10237 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10238 output->ack.offset);
10239 }
10240 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10241 if (!output->amd_vsdb.vsdb_found)
10242 return false;
10243
10244 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10245 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10246 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10247 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10248 } else {
b76a8062 10249 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
10250 return false;
10251 }
10252
10253 return true;
10254}
10255
10256static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
10257 uint8_t *edid_ext, int len,
10258 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10259{
10260 int i;
f9b4f20c
SW
10261
10262 /* send extension block to DMCU for parsing */
10263 for (i = 0; i < len; i += 8) {
10264 bool res;
10265 int offset;
10266
10267 /* send 8 bytes a time */
46db138d 10268 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
10269 return false;
10270
10271 if (i+8 == len) {
10272 /* EDID block sent completed, expect result */
10273 int version, min_rate, max_rate;
10274
46db138d 10275 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
10276 if (res) {
10277 /* amd vsdb found */
10278 vsdb_info->freesync_supported = 1;
10279 vsdb_info->amd_vsdb_version = version;
10280 vsdb_info->min_refresh_rate_hz = min_rate;
10281 vsdb_info->max_refresh_rate_hz = max_rate;
10282 return true;
10283 }
10284 /* not amd vsdb */
10285 return false;
10286 }
10287
10288 /* check for ack*/
46db138d 10289 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
10290 if (!res)
10291 return false;
10292 }
10293
10294 return false;
10295}
10296
46db138d
SW
10297static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10298 uint8_t *edid_ext, int len,
10299 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10300{
10301 int i;
10302
10303 /* send extension block to DMCU for parsing */
10304 for (i = 0; i < len; i += 8) {
10305 /* send 8 bytes a time */
10306 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10307 return false;
10308 }
10309
10310 return vsdb_info->freesync_supported;
10311}
10312
10313static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10314 uint8_t *edid_ext, int len,
10315 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10316{
10317 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10318
10319 if (adev->dm.dmub_srv)
10320 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10321 else
10322 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10323}
10324
7c7dd774 10325static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10326 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10327{
10328 uint8_t *edid_ext = NULL;
10329 int i;
10330 bool valid_vsdb_found = false;
10331
10332 /*----- drm_find_cea_extension() -----*/
10333 /* No EDID or EDID extensions */
10334 if (edid == NULL || edid->extensions == 0)
7c7dd774 10335 return -ENODEV;
f9b4f20c
SW
10336
10337 /* Find CEA extension */
10338 for (i = 0; i < edid->extensions; i++) {
10339 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10340 if (edid_ext[0] == CEA_EXT)
10341 break;
10342 }
10343
10344 if (i == edid->extensions)
7c7dd774 10345 return -ENODEV;
f9b4f20c
SW
10346
10347 /*----- cea_db_offsets() -----*/
10348 if (edid_ext[0] != CEA_EXT)
7c7dd774 10349 return -ENODEV;
f9b4f20c
SW
10350
10351 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10352
10353 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10354}
10355
98e6436d
AK
10356void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10357 struct edid *edid)
e7b07cee 10358{
eb0709ba 10359 int i = 0;
e7b07cee
HW
10360 struct detailed_timing *timing;
10361 struct detailed_non_pixel *data;
10362 struct detailed_data_monitor_range *range;
c84dec2f
HW
10363 struct amdgpu_dm_connector *amdgpu_dm_connector =
10364 to_amdgpu_dm_connector(connector);
bb47de73 10365 struct dm_connector_state *dm_con_state = NULL;
9ad54467 10366 struct dc_sink *sink;
e7b07cee
HW
10367
10368 struct drm_device *dev = connector->dev;
1348969a 10369 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 10370 bool freesync_capable = false;
f9b4f20c 10371 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 10372
8218d7f1
HW
10373 if (!connector->state) {
10374 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10375 goto update;
8218d7f1
HW
10376 }
10377
9b2fdc33
AP
10378 sink = amdgpu_dm_connector->dc_sink ?
10379 amdgpu_dm_connector->dc_sink :
10380 amdgpu_dm_connector->dc_em_sink;
10381
10382 if (!edid || !sink) {
98e6436d
AK
10383 dm_con_state = to_dm_connector_state(connector->state);
10384
10385 amdgpu_dm_connector->min_vfreq = 0;
10386 amdgpu_dm_connector->max_vfreq = 0;
10387 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
10388 connector->display_info.monitor_range.min_vfreq = 0;
10389 connector->display_info.monitor_range.max_vfreq = 0;
10390 freesync_capable = false;
98e6436d 10391
bb47de73 10392 goto update;
98e6436d
AK
10393 }
10394
8218d7f1
HW
10395 dm_con_state = to_dm_connector_state(connector->state);
10396
e7b07cee 10397 if (!adev->dm.freesync_module)
bb47de73 10398 goto update;
f9b4f20c
SW
10399
10400
9b2fdc33
AP
10401 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10402 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
10403 bool edid_check_required = false;
10404
10405 if (edid) {
e7b07cee
HW
10406 edid_check_required = is_dp_capable_without_timing_msa(
10407 adev->dm.dc,
c84dec2f 10408 amdgpu_dm_connector);
e7b07cee 10409 }
e7b07cee 10410
f9b4f20c
SW
10411 if (edid_check_required == true && (edid->version > 1 ||
10412 (edid->version == 1 && edid->revision > 1))) {
10413 for (i = 0; i < 4; i++) {
e7b07cee 10414
f9b4f20c
SW
10415 timing = &edid->detailed_timings[i];
10416 data = &timing->data.other_data;
10417 range = &data->data.range;
10418 /*
10419 * Check if monitor has continuous frequency mode
10420 */
10421 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10422 continue;
10423 /*
10424 * Check for flag range limits only. If flag == 1 then
10425 * no additional timing information provided.
10426 * Default GTF, GTF Secondary curve and CVT are not
10427 * supported
10428 */
10429 if (range->flags != 1)
10430 continue;
a0ffc3fd 10431
f9b4f20c
SW
10432 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10433 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10434 amdgpu_dm_connector->pixel_clock_mhz =
10435 range->pixel_clock_mhz * 10;
a0ffc3fd 10436
f9b4f20c
SW
10437 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10438 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10439
f9b4f20c
SW
10440 break;
10441 }
98e6436d 10442
f9b4f20c
SW
10443 if (amdgpu_dm_connector->max_vfreq -
10444 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10445
f9b4f20c
SW
10446 freesync_capable = true;
10447 }
10448 }
9b2fdc33 10449 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10450 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10451 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10452 timing = &edid->detailed_timings[i];
10453 data = &timing->data.other_data;
10454
10455 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10456 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10457 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10458 freesync_capable = true;
10459
10460 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10461 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10462 }
10463 }
bb47de73
NK
10464
10465update:
10466 if (dm_con_state)
10467 dm_con_state->freesync_capable = freesync_capable;
10468
10469 if (connector->vrr_capable_property)
10470 drm_connector_set_vrr_capable_property(connector,
10471 freesync_capable);
e7b07cee
HW
10472}
10473
3d4e52d0
VL
10474void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10475{
1348969a 10476 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10477 struct dc *dc = adev->dm.dc;
10478 int i;
10479
10480 mutex_lock(&adev->dm.dc_lock);
10481 if (dc->current_state) {
10482 for (i = 0; i < dc->current_state->stream_count; ++i)
10483 dc->current_state->streams[i]
10484 ->triggered_crtc_reset.enabled =
10485 adev->dm.force_timing_sync;
10486
10487 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10488 dc_trigger_sync(dc, dc->current_state);
10489 }
10490 mutex_unlock(&adev->dm.dc_lock);
10491}
9d83722d
RS
10492
10493void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10494 uint32_t value, const char *func_name)
10495{
10496#ifdef DM_CHECK_ADDR_0
10497 if (address == 0) {
10498 DC_ERR("invalid register write. address = 0");
10499 return;
10500 }
10501#endif
10502 cgs_write_register(ctx->cgs_device, address, value);
10503 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10504}
10505
10506uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10507 const char *func_name)
10508{
10509 uint32_t value;
10510#ifdef DM_CHECK_ADDR_0
10511 if (address == 0) {
10512 DC_ERR("invalid register read; address = 0\n");
10513 return 0;
10514 }
10515#endif
10516
10517 if (ctx->dmub_srv &&
10518 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10519 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10520 ASSERT(false);
10521 return 0;
10522 }
10523
10524 value = cgs_read_register(ctx->cgs_device, address);
10525
10526 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10527
10528 return value;
10529}
81927e28 10530
240e6d25
IB
10531static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
10532 struct dc_context *ctx,
10533 uint8_t status_type,
10534 uint32_t *operation_result)
88f52b1f
JS
10535{
10536 struct amdgpu_device *adev = ctx->driver_context;
10537 int return_status = -1;
10538 struct dmub_notification *p_notify = adev->dm.dmub_notify;
10539
10540 if (is_cmd_aux) {
10541 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10542 return_status = p_notify->aux_reply.length;
10543 *operation_result = p_notify->result;
10544 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
10545 *operation_result = AUX_RET_ERROR_TIMEOUT;
10546 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
10547 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10548 } else {
10549 *operation_result = AUX_RET_ERROR_UNKNOWN;
10550 }
10551 } else {
10552 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10553 return_status = 0;
10554 *operation_result = p_notify->sc_status;
10555 } else {
10556 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10557 }
10558 }
10559
10560 return return_status;
10561}
10562
10563int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
10564 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
10565{
10566 struct amdgpu_device *adev = ctx->driver_context;
10567 int ret = 0;
10568
88f52b1f
JS
10569 if (is_cmd_aux) {
10570 dc_process_dmub_aux_transfer_async(ctx->dc,
10571 link_index, (struct aux_payload *)cmd_payload);
10572 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
10573 (struct set_config_cmd_payload *)cmd_payload,
10574 adev->dm.dmub_notify)) {
10575 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10576 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10577 (uint32_t *)operation_result);
10578 }
10579
9e3a50d2 10580 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 10581 if (ret == 0) {
9e3a50d2 10582 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
10583 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10584 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
10585 (uint32_t *)operation_result);
81927e28 10586 }
81927e28 10587
88f52b1f
JS
10588 if (is_cmd_aux) {
10589 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10590 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 10591
88f52b1f
JS
10592 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10593 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10594 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
10595 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10596 adev->dm.dmub_notify->aux_reply.length);
10597 }
10598 }
81927e28
JS
10599 }
10600
88f52b1f
JS
10601 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10602 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10603 (uint32_t *)operation_result);
81927e28 10604}
1edf5ae1
ZL
10605
10606/*
10607 * Check whether seamless boot is supported.
10608 *
10609 * So far we only support seamless boot on CHIP_VANGOGH.
10610 * If everything goes well, we may consider expanding
10611 * seamless boot to other ASICs.
10612 */
10613bool check_seamless_boot_capability(struct amdgpu_device *adev)
10614{
10615 switch (adev->asic_type) {
10616 case CHIP_VANGOGH:
10617 if (!adev->mman.keep_stolen_vga_memory)
10618 return true;
10619 break;
10620 default:
10621 break;
10622 }
10623
10624 return false;
10625}