drm/amdgpu/vcn3: drop extraneous Beige Goby hunk
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
1dc90497 32#include "dc/inc/core_types.h"
a7669aff 33#include "dal_asic_id.h"
cdca3f21 34#include "dmub/dmub_srv.h"
743b9786
NK
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
f9b4f20c 38#include "dc/dc_edid_parser.h"
81927e28 39#include "dc/dc_stat.h"
9d83722d 40#include "amdgpu_dm_trace.h"
4562236b
HW
41
42#include "vid.h"
43#include "amdgpu.h"
a49dcb88 44#include "amdgpu_display.h"
a94d5569 45#include "amdgpu_ucode.h"
4562236b
HW
46#include "atom.h"
47#include "amdgpu_dm.h"
52704fca
BL
48#ifdef CONFIG_DRM_AMD_DC_HDCP
49#include "amdgpu_dm_hdcp.h"
53e108aa 50#include <drm/drm_hdcp.h>
52704fca 51#endif
e7b07cee 52#include "amdgpu_pm.h"
4562236b
HW
53
54#include "amd_shared.h"
55#include "amdgpu_dm_irq.h"
56#include "dm_helpers.h"
e7b07cee 57#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
58#if defined(CONFIG_DEBUG_FS)
59#include "amdgpu_dm_debugfs.h"
60#endif
f4594cd1 61#include "amdgpu_dm_psr.h"
4562236b
HW
62
63#include "ivsrcid/ivsrcid_vislands30.h"
64
81927e28 65#include "i2caux_interface.h"
4562236b
HW
66#include <linux/module.h>
67#include <linux/moduleparam.h>
e7b07cee 68#include <linux/types.h>
97028037 69#include <linux/pm_runtime.h>
09d21852 70#include <linux/pci.h>
a94d5569 71#include <linux/firmware.h>
6ce8f316 72#include <linux/component.h>
4562236b
HW
73
74#include <drm/drm_atomic.h>
674e78ac 75#include <drm/drm_atomic_uapi.h>
4562236b
HW
76#include <drm/drm_atomic_helper.h>
77#include <drm/drm_dp_mst_helper.h>
e7b07cee 78#include <drm/drm_fb_helper.h>
09d21852 79#include <drm/drm_fourcc.h>
e7b07cee 80#include <drm/drm_edid.h>
09d21852 81#include <drm/drm_vblank.h>
6ce8f316 82#include <drm/drm_audio_component.h>
4562236b 83
b86a1aa3 84#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 85#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 86
ad941f7a
FX
87#include "dcn/dcn_1_0_offset.h"
88#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
89#include "soc15_hw_ip.h"
90#include "vega10_ip_offset.h"
ff5ef992
AD
91
92#include "soc15_common.h"
93#endif
94
e7b07cee 95#include "modules/inc/mod_freesync.h"
bbf854dc 96#include "modules/power/power_helpers.h"
ecd0136b 97#include "modules/inc/mod_info_packet.h"
e7b07cee 98
743b9786
NK
99#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
101#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
103#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
105#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
107#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
109#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
111#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
113#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
114#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116#endif
2200eb9e 117
a94d5569
DF
118#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 120
5ea23931
RL
121#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
8c7aea40
NK
124/* Number of bytes in PSP header for firmware. */
125#define PSP_HEADER_BYTES 0x100
126
127/* Number of bytes in PSP footer for firmware. */
128#define PSP_FOOTER_BYTES 0x100
129
b8592b48
LL
130/**
131 * DOC: overview
132 *
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
135 * requests into DC requests, and DC responses into DRM responses.
136 *
137 * The root control structure is &struct amdgpu_display_manager.
138 */
139
7578ecda
AD
140/* basic init/fini API */
141static int amdgpu_dm_init(struct amdgpu_device *adev);
142static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 143static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 144
0f877894
OV
145static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146{
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 default:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
161 }
162}
163
164static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165{
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 return;
172
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
175
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
178 subconnector);
179}
180
1f6010a9
DF
181/*
182 * initializes drm_device display related structures, based on the information
7578ecda
AD
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
185 *
186 * Returns 0 on success
187 */
188static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189/* removes and deallocates the drm structures, created by the above function */
190static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
7578ecda 192static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 193 struct drm_plane *plane,
cc1fec57
NK
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
7578ecda
AD
196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 uint32_t link_index,
202 struct amdgpu_encoder *amdgpu_encoder);
203static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
206
207static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
7578ecda
AD
209static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
213
674e78ac
NK
214static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
7578ecda 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
a85ba005
NC
220static bool
221is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 struct drm_crtc_state *new_crtc_state);
4562236b
HW
223/*
224 * dm_vblank_get_counter
225 *
226 * @brief
227 * Get counter for number of vertical blanks
228 *
229 * @param
230 * struct amdgpu_device *adev - [in] desired amdgpu device
231 * int disp_idx - [in] which CRTC to get the counter from
232 *
233 * @return
234 * Counter for vertical blanks
235 */
236static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237{
238 if (crtc >= adev->mode_info.num_crtc)
239 return 0;
240 else {
241 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242
585d450c 243 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
244 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 crtc);
4562236b
HW
246 return 0;
247 }
248
585d450c 249 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
250 }
251}
252
253static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 254 u32 *vbl, u32 *position)
4562236b 255{
81c50963
ST
256 uint32_t v_blank_start, v_blank_end, h_position, v_position;
257
4562236b
HW
258 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 return -EINVAL;
260 else {
261 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262
585d450c 263 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
264 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 crtc);
4562236b
HW
266 return 0;
267 }
268
81c50963
ST
269 /*
270 * TODO rework base driver to use values directly.
271 * for now parse it back into reg-format
272 */
585d450c 273 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
274 &v_blank_start,
275 &v_blank_end,
276 &h_position,
277 &v_position);
278
e806208d
AG
279 *position = v_position | (h_position << 16);
280 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
281 }
282
283 return 0;
284}
285
286static bool dm_is_idle(void *handle)
287{
288 /* XXX todo */
289 return true;
290}
291
292static int dm_wait_for_idle(void *handle)
293{
294 /* XXX todo */
295 return 0;
296}
297
298static bool dm_check_soft_reset(void *handle)
299{
300 return false;
301}
302
303static int dm_soft_reset(void *handle)
304{
305 /* XXX todo */
306 return 0;
307}
308
3ee6b26b
AD
309static struct amdgpu_crtc *
310get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 int otg_inst)
4562236b 312{
4a580877 313 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
314 struct drm_crtc *crtc;
315 struct amdgpu_crtc *amdgpu_crtc;
316
bcd74374 317 if (WARN_ON(otg_inst == -1))
4562236b 318 return adev->mode_info.crtcs[0];
4562236b
HW
319
320 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 amdgpu_crtc = to_amdgpu_crtc(crtc);
322
323 if (amdgpu_crtc->otg_inst == otg_inst)
324 return amdgpu_crtc;
325 }
326
327 return NULL;
328}
329
585d450c
AP
330static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331{
332 return acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_VARIABLE ||
334 acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_FIXED;
336}
337
66b0c973
MK
338static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339{
340 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342}
343
a85ba005
NC
344static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 struct dm_crtc_state *new_state)
346{
347 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
348 return true;
349 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 return true;
351 else
352 return false;
353}
354
b8e8c934
HW
355/**
356 * dm_pflip_high_irq() - Handle pageflip interrupt
357 * @interrupt_params: ignored
358 *
359 * Handles the pageflip interrupt by notifying all interested parties
360 * that the pageflip has been completed.
361 */
4562236b
HW
362static void dm_pflip_high_irq(void *interrupt_params)
363{
4562236b
HW
364 struct amdgpu_crtc *amdgpu_crtc;
365 struct common_irq_params *irq_params = interrupt_params;
366 struct amdgpu_device *adev = irq_params->adev;
367 unsigned long flags;
71bbe51a 368 struct drm_pending_vblank_event *e;
71bbe51a
MK
369 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 bool vrr_active;
4562236b
HW
371
372 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373
374 /* IRQ could occur when in initial stage */
1f6010a9 375 /* TODO work and BO cleanup */
4562236b 376 if (amdgpu_crtc == NULL) {
cb2318b7 377 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
378 return;
379 }
380
4a580877 381 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
382
383 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 384 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
385 amdgpu_crtc->pflip_status,
386 AMDGPU_FLIP_SUBMITTED,
387 amdgpu_crtc->crtc_id,
388 amdgpu_crtc);
4a580877 389 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
390 return;
391 }
392
71bbe51a
MK
393 /* page flip completed. */
394 e = amdgpu_crtc->event;
395 amdgpu_crtc->event = NULL;
4562236b 396
bcd74374 397 WARN_ON(!e);
1159898a 398
585d450c 399 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
400
401 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 if (!vrr_active ||
585d450c 403 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
404 &v_blank_end, &hpos, &vpos) ||
405 (vpos < v_blank_start)) {
406 /* Update to correct count and vblank timestamp if racing with
407 * vblank irq. This also updates to the correct vblank timestamp
408 * even in VRR mode, as scanout is past the front-porch atm.
409 */
410 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 411
71bbe51a
MK
412 /* Wake up userspace by sending the pageflip event with proper
413 * count and timestamp of vblank of flip completion.
414 */
415 if (e) {
416 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417
418 /* Event sent, so done with vblank for this flip */
419 drm_crtc_vblank_put(&amdgpu_crtc->base);
420 }
421 } else if (e) {
422 /* VRR active and inside front-porch: vblank count and
423 * timestamp for pageflip event will only be up to date after
424 * drm_crtc_handle_vblank() has been executed from late vblank
425 * irq handler after start of back-porch (vline 0). We queue the
426 * pageflip event for send-out by drm_crtc_handle_vblank() with
427 * updated timestamp and count, once it runs after us.
428 *
429 * We need to open-code this instead of using the helper
430 * drm_crtc_arm_vblank_event(), as that helper would
431 * call drm_crtc_accurate_vblank_count(), which we must
432 * not call in VRR mode while we are in front-porch!
433 */
434
435 /* sequence will be replaced by real count during send-out. */
436 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
437 e->pipe = amdgpu_crtc->crtc_id;
438
4a580877 439 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
440 e = NULL;
441 }
4562236b 442
fdd1fe57
MK
443 /* Keep track of vblank of this flip for flip throttling. We use the
444 * cooked hw counter, as that one incremented at start of this vblank
445 * of pageflip completion, so last_flip_vblank is the forbidden count
446 * for queueing new pageflips if vsync + VRR is enabled.
447 */
5d1c59c4 448 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 449 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 450
54f5499a 451 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 452 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 453
cb2318b7
VL
454 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
455 amdgpu_crtc->crtc_id, amdgpu_crtc,
456 vrr_active, (int) !e);
4562236b
HW
457}
458
d2574c33
MK
459static void dm_vupdate_high_irq(void *interrupt_params)
460{
461 struct common_irq_params *irq_params = interrupt_params;
462 struct amdgpu_device *adev = irq_params->adev;
463 struct amdgpu_crtc *acrtc;
47588233
RS
464 struct drm_device *drm_dev;
465 struct drm_vblank_crtc *vblank;
466 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 467 unsigned long flags;
585d450c 468 int vrr_active;
d2574c33
MK
469
470 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471
472 if (acrtc) {
585d450c 473 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
474 drm_dev = acrtc->base.dev;
475 vblank = &drm_dev->vblank[acrtc->base.index];
476 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
477 frame_duration_ns = vblank->time - previous_timestamp;
478
479 if (frame_duration_ns > 0) {
480 trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 frame_duration_ns,
482 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
483 atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 }
d2574c33 485
cb2318b7 486 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 487 acrtc->crtc_id,
585d450c 488 vrr_active);
d2574c33
MK
489
490 /* Core vblank handling is done here after end of front-porch in
491 * vrr mode, as vblank timestamping will give valid results
492 * while now done after front-porch. This will also deliver
493 * page-flip completion events that have been queued to us
494 * if a pageflip happened inside front-porch.
495 */
585d450c 496 if (vrr_active) {
d2574c33 497 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
498
499 /* BTR processing for pre-DCE12 ASICs */
585d450c 500 if (acrtc->dm_irq_params.stream &&
09aef2c4 501 adev->family < AMDGPU_FAMILY_AI) {
4a580877 502 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
503 mod_freesync_handle_v_update(
504 adev->dm.freesync_module,
585d450c
AP
505 acrtc->dm_irq_params.stream,
506 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
507
508 dc_stream_adjust_vmin_vmax(
509 adev->dm.dc,
585d450c
AP
510 acrtc->dm_irq_params.stream,
511 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 512 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
513 }
514 }
d2574c33
MK
515 }
516}
517
b8e8c934
HW
518/**
519 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 520 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
521 *
522 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523 * event handler.
524 */
4562236b
HW
525static void dm_crtc_high_irq(void *interrupt_params)
526{
527 struct common_irq_params *irq_params = interrupt_params;
528 struct amdgpu_device *adev = irq_params->adev;
4562236b 529 struct amdgpu_crtc *acrtc;
09aef2c4 530 unsigned long flags;
585d450c 531 int vrr_active;
4562236b 532
b57de80a 533 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
534 if (!acrtc)
535 return;
536
585d450c 537 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 538
cb2318b7 539 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 540 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 541
2346ef47
NK
542 /**
543 * Core vblank handling at start of front-porch is only possible
544 * in non-vrr mode, as only there vblank timestamping will give
545 * valid results while done in front-porch. Otherwise defer it
546 * to dm_vupdate_high_irq after end of front-porch.
547 */
585d450c 548 if (!vrr_active)
2346ef47
NK
549 drm_crtc_handle_vblank(&acrtc->base);
550
551 /**
552 * Following stuff must happen at start of vblank, for crc
553 * computation and below-the-range btr support in vrr mode.
554 */
16f17eda 555 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
556
557 /* BTR updates need to happen before VUPDATE on Vega and above. */
558 if (adev->family < AMDGPU_FAMILY_AI)
559 return;
16f17eda 560
4a580877 561 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 562
585d450c
AP
563 if (acrtc->dm_irq_params.stream &&
564 acrtc->dm_irq_params.vrr_params.supported &&
565 acrtc->dm_irq_params.freesync_config.state ==
566 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 567 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
568 acrtc->dm_irq_params.stream,
569 &acrtc->dm_irq_params.vrr_params);
16f17eda 570
585d450c
AP
571 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
573 }
574
2b5aed9a
MK
575 /*
576 * If there aren't any active_planes then DCH HUBP may be clock-gated.
577 * In that case, pageflip completion interrupts won't fire and pageflip
578 * completion events won't get delivered. Prevent this by sending
579 * pending pageflip events from here if a flip is still pending.
580 *
581 * If any planes are enabled, use dm_pflip_high_irq() instead, to
582 * avoid race conditions between flip programming and completion,
583 * which could cause too early flip completion events.
584 */
2346ef47
NK
585 if (adev->family >= AMDGPU_FAMILY_RV &&
586 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 587 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
588 if (acrtc->event) {
589 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 acrtc->event = NULL;
591 drm_crtc_vblank_put(&acrtc->base);
592 }
593 acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 }
595
4a580877 596 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
597}
598
86bc2219 599#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 600#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
601/**
602 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603 * DCN generation ASICs
48e01bf4 604 * @interrupt_params: interrupt parameters
86bc2219
WL
605 *
606 * Used to set crc window/read out crc value at vertical line 0 position
607 */
86bc2219
WL
608static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609{
610 struct common_irq_params *irq_params = interrupt_params;
611 struct amdgpu_device *adev = irq_params->adev;
612 struct amdgpu_crtc *acrtc;
613
614 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
615
616 if (!acrtc)
617 return;
618
619 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
620}
621#endif
86bc2219 622
81927e28
JS
623/**
624 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
625 * @interrupt_params: used for determining the Outbox instance
626 *
627 * Handles the Outbox Interrupt
628 * event handler.
629 */
630#define DMUB_TRACE_MAX_READ 64
631static void dm_dmub_outbox1_low_irq(void *interrupt_params)
632{
633 struct dmub_notification notify;
634 struct common_irq_params *irq_params = interrupt_params;
635 struct amdgpu_device *adev = irq_params->adev;
636 struct amdgpu_display_manager *dm = &adev->dm;
637 struct dmcub_trace_buf_entry entry = { 0 };
638 uint32_t count = 0;
639
640 if (dc_enable_dmub_notifications(adev->dm.dc)) {
641 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
642 do {
643 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
644 } while (notify.pending_notification);
645
646 if (adev->dm.dmub_notify)
647 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
648 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
649 complete(&adev->dm.dmub_aux_transfer_done);
650 // TODO : HPD Implementation
651
652 } else {
653 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
654 }
655 }
656
657
658 do {
659 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
660 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
661 entry.param0, entry.param1);
662
663 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
664 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
665 } else
666 break;
667
668 count++;
669
670 } while (count <= DMUB_TRACE_MAX_READ);
671
672 ASSERT(count <= DMUB_TRACE_MAX_READ);
673}
86bc2219
WL
674#endif
675
4562236b
HW
676static int dm_set_clockgating_state(void *handle,
677 enum amd_clockgating_state state)
678{
679 return 0;
680}
681
682static int dm_set_powergating_state(void *handle,
683 enum amd_powergating_state state)
684{
685 return 0;
686}
687
688/* Prototypes of private functions */
689static int dm_early_init(void* handle);
690
a32e24b4 691/* Allocate memory for FBC compressed data */
3e332d3a 692static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 693{
3e332d3a 694 struct drm_device *dev = connector->dev;
1348969a 695 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 696 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
697 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
698 struct drm_display_mode *mode;
42e67c3b
RL
699 unsigned long max_size = 0;
700
701 if (adev->dm.dc->fbc_compressor == NULL)
702 return;
a32e24b4 703
3e332d3a 704 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
705 return;
706
3e332d3a
RL
707 if (compressor->bo_ptr)
708 return;
42e67c3b 709
42e67c3b 710
3e332d3a
RL
711 list_for_each_entry(mode, &connector->modes, head) {
712 if (max_size < mode->htotal * mode->vtotal)
713 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
714 }
715
716 if (max_size) {
717 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 718 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 719 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
720
721 if (r)
42e67c3b
RL
722 DRM_ERROR("DM: Failed to initialize FBC\n");
723 else {
724 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
725 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
726 }
727
a32e24b4
RL
728 }
729
730}
a32e24b4 731
6ce8f316
NK
732static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
733 int pipe, bool *enabled,
734 unsigned char *buf, int max_bytes)
735{
736 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 737 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
738 struct drm_connector *connector;
739 struct drm_connector_list_iter conn_iter;
740 struct amdgpu_dm_connector *aconnector;
741 int ret = 0;
742
743 *enabled = false;
744
745 mutex_lock(&adev->dm.audio_lock);
746
747 drm_connector_list_iter_begin(dev, &conn_iter);
748 drm_for_each_connector_iter(connector, &conn_iter) {
749 aconnector = to_amdgpu_dm_connector(connector);
750 if (aconnector->audio_inst != port)
751 continue;
752
753 *enabled = true;
754 ret = drm_eld_size(connector->eld);
755 memcpy(buf, connector->eld, min(max_bytes, ret));
756
757 break;
758 }
759 drm_connector_list_iter_end(&conn_iter);
760
761 mutex_unlock(&adev->dm.audio_lock);
762
763 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
764
765 return ret;
766}
767
768static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
769 .get_eld = amdgpu_dm_audio_component_get_eld,
770};
771
772static int amdgpu_dm_audio_component_bind(struct device *kdev,
773 struct device *hda_kdev, void *data)
774{
775 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 776 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
777 struct drm_audio_component *acomp = data;
778
779 acomp->ops = &amdgpu_dm_audio_component_ops;
780 acomp->dev = kdev;
781 adev->dm.audio_component = acomp;
782
783 return 0;
784}
785
786static void amdgpu_dm_audio_component_unbind(struct device *kdev,
787 struct device *hda_kdev, void *data)
788{
789 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 790 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
791 struct drm_audio_component *acomp = data;
792
793 acomp->ops = NULL;
794 acomp->dev = NULL;
795 adev->dm.audio_component = NULL;
796}
797
798static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
799 .bind = amdgpu_dm_audio_component_bind,
800 .unbind = amdgpu_dm_audio_component_unbind,
801};
802
803static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
804{
805 int i, ret;
806
807 if (!amdgpu_audio)
808 return 0;
809
810 adev->mode_info.audio.enabled = true;
811
812 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
813
814 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
815 adev->mode_info.audio.pin[i].channels = -1;
816 adev->mode_info.audio.pin[i].rate = -1;
817 adev->mode_info.audio.pin[i].bits_per_sample = -1;
818 adev->mode_info.audio.pin[i].status_bits = 0;
819 adev->mode_info.audio.pin[i].category_code = 0;
820 adev->mode_info.audio.pin[i].connected = false;
821 adev->mode_info.audio.pin[i].id =
822 adev->dm.dc->res_pool->audios[i]->inst;
823 adev->mode_info.audio.pin[i].offset = 0;
824 }
825
826 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
827 if (ret < 0)
828 return ret;
829
830 adev->dm.audio_registered = true;
831
832 return 0;
833}
834
835static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
836{
837 if (!amdgpu_audio)
838 return;
839
840 if (!adev->mode_info.audio.enabled)
841 return;
842
843 if (adev->dm.audio_registered) {
844 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
845 adev->dm.audio_registered = false;
846 }
847
848 /* TODO: Disable audio? */
849
850 adev->mode_info.audio.enabled = false;
851}
852
dfd84d90 853static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
854{
855 struct drm_audio_component *acomp = adev->dm.audio_component;
856
857 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
858 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
859
860 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
861 pin, -1);
862 }
863}
864
743b9786
NK
865static int dm_dmub_hw_init(struct amdgpu_device *adev)
866{
743b9786
NK
867 const struct dmcub_firmware_header_v1_0 *hdr;
868 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 869 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
870 const struct firmware *dmub_fw = adev->dm.dmub_fw;
871 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
872 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
873 struct dmub_srv_hw_params hw_params;
874 enum dmub_status status;
875 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 876 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
877 bool has_hw_support;
878
879 if (!dmub_srv)
880 /* DMUB isn't supported on the ASIC. */
881 return 0;
882
8c7aea40
NK
883 if (!fb_info) {
884 DRM_ERROR("No framebuffer info for DMUB service.\n");
885 return -EINVAL;
886 }
887
743b9786
NK
888 if (!dmub_fw) {
889 /* Firmware required for DMUB support. */
890 DRM_ERROR("No firmware provided for DMUB.\n");
891 return -EINVAL;
892 }
893
894 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
895 if (status != DMUB_STATUS_OK) {
896 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
897 return -EINVAL;
898 }
899
900 if (!has_hw_support) {
901 DRM_INFO("DMUB unsupported on ASIC\n");
902 return 0;
903 }
904
905 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
906
743b9786
NK
907 fw_inst_const = dmub_fw->data +
908 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 909 PSP_HEADER_BYTES;
743b9786
NK
910
911 fw_bss_data = dmub_fw->data +
912 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
913 le32_to_cpu(hdr->inst_const_bytes);
914
915 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
916 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
917 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
918
919 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
920
ddde28a5
HW
921 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
922 * amdgpu_ucode_init_single_fw will load dmub firmware
923 * fw_inst_const part to cw0; otherwise, the firmware back door load
924 * will be done by dm_dmub_hw_init
925 */
926 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
927 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
928 fw_inst_const_size);
929 }
930
a576b345
NK
931 if (fw_bss_data_size)
932 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
933 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
934
935 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
936 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
937 adev->bios_size);
938
939 /* Reset regions that need to be reset. */
940 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
941 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
942
943 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
944 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
945
946 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
947 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
948
949 /* Initialize hardware. */
950 memset(&hw_params, 0, sizeof(hw_params));
951 hw_params.fb_base = adev->gmc.fb_start;
952 hw_params.fb_offset = adev->gmc.aper_base;
953
31a7f4bb
HW
954 /* backdoor load firmware and trigger dmub running */
955 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
956 hw_params.load_inst_const = true;
957
743b9786
NK
958 if (dmcu)
959 hw_params.psp_version = dmcu->psp_version;
960
8c7aea40
NK
961 for (i = 0; i < fb_info->num_fb; ++i)
962 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
963
964 status = dmub_srv_hw_init(dmub_srv, &hw_params);
965 if (status != DMUB_STATUS_OK) {
966 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
967 return -EINVAL;
968 }
969
970 /* Wait for firmware load to finish. */
971 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
972 if (status != DMUB_STATUS_OK)
973 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
974
975 /* Init DMCU and ABM if available. */
976 if (dmcu && abm) {
977 dmcu->funcs->dmcu_init(dmcu);
978 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
979 }
980
051b7887
RL
981 if (!adev->dm.dc->ctx->dmub_srv)
982 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
983 if (!adev->dm.dc->ctx->dmub_srv) {
984 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985 return -ENOMEM;
986 }
987
743b9786
NK
988 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 adev->dm.dmcub_fw_version);
990
991 return 0;
992}
993
a3fe0e33 994#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 995static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 996{
c0fb85ae
YZ
997 uint64_t pt_base;
998 uint32_t logical_addr_low;
999 uint32_t logical_addr_high;
1000 uint32_t agp_base, agp_bot, agp_top;
1001 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1002
c0fb85ae
YZ
1003 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1005
c0fb85ae
YZ
1006 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007 /*
1008 * Raven2 has a HW issue that it is unable to use the vram which
1009 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010 * workaround that increase system aperture high address (add 1)
1011 * to get rid of the VM fault and hardware hang.
1012 */
1013 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014 else
1015 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1016
c0fb85ae
YZ
1017 agp_base = 0;
1018 agp_bot = adev->gmc.agp_start >> 24;
1019 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1020
c44a22b3 1021
c0fb85ae
YZ
1022 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1028
c0fb85ae
YZ
1029 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031
1032 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035
1036 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039
1040 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043
1044 pa_config->is_hvm_enabled = 0;
c44a22b3 1045
c44a22b3 1046}
e6cd859d 1047#endif
ea3b4242
QZ
1048#if defined(CONFIG_DRM_AMD_DC_DCN)
1049static void event_mall_stutter(struct work_struct *work)
1050{
1051
1052 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053 struct amdgpu_display_manager *dm = vblank_work->dm;
1054
1055 mutex_lock(&dm->dc_lock);
1056
1057 if (vblank_work->enable)
1058 dm->active_vblank_irq_count++;
5af50b0b 1059 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1060 dm->active_vblank_irq_count--;
1061
2cbcb78c 1062 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1063
4711c033 1064 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242
QZ
1065
1066 mutex_unlock(&dm->dc_lock);
1067}
1068
1069static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070{
1071
1072 int max_caps = dc->caps.max_links;
1073 struct vblank_workqueue *vblank_work;
1074 int i = 0;
1075
1076 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077 if (ZERO_OR_NULL_PTR(vblank_work)) {
1078 kfree(vblank_work);
1079 return NULL;
1080 }
c44a22b3 1081
ea3b4242
QZ
1082 for (i = 0; i < max_caps; i++)
1083 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1084
1085 return vblank_work;
1086}
1087#endif
7578ecda 1088static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1089{
1090 struct dc_init_data init_data;
52704fca
BL
1091#ifdef CONFIG_DRM_AMD_DC_HDCP
1092 struct dc_callback_init init_params;
1093#endif
743b9786 1094 int r;
52704fca 1095
4a580877 1096 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1097 adev->dm.adev = adev;
1098
4562236b
HW
1099 /* Zero all the fields */
1100 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1101#ifdef CONFIG_DRM_AMD_DC_HDCP
1102 memset(&init_params, 0, sizeof(init_params));
1103#endif
4562236b 1104
674e78ac 1105 mutex_init(&adev->dm.dc_lock);
6ce8f316 1106 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1107#if defined(CONFIG_DRM_AMD_DC_DCN)
1108 spin_lock_init(&adev->dm.vblank_lock);
1109#endif
674e78ac 1110
4562236b
HW
1111 if(amdgpu_dm_irq_init(adev)) {
1112 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113 goto error;
1114 }
1115
1116 init_data.asic_id.chip_family = adev->family;
1117
2dc31ca1 1118 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1119 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120
770d13b1 1121 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1122 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123 init_data.asic_id.atombios_base_address =
1124 adev->mode_info.atom_context->bios;
1125
1126 init_data.driver = adev;
1127
1128 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1129
1130 if (!adev->dm.cgs_device) {
1131 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1132 goto error;
1133 }
1134
1135 init_data.cgs_device = adev->dm.cgs_device;
1136
4562236b
HW
1137 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1138
60fb100b
AD
1139 switch (adev->asic_type) {
1140 case CHIP_CARRIZO:
1141 case CHIP_STONEY:
1142 case CHIP_RAVEN:
fe3db437 1143 case CHIP_RENOIR:
6e227308 1144 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1145 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146 init_data.flags.disable_dmcu = true;
60fb100b 1147 break;
6df9218a
CL
1148#if defined(CONFIG_DRM_AMD_DC_DCN)
1149 case CHIP_VANGOGH:
1150 init_data.flags.gpu_vm_support = true;
1151 break;
1ebcaebd
NK
1152#endif
1153#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1154 case CHIP_YELLOW_CARP:
1155 init_data.flags.gpu_vm_support = true;
1156 break;
6df9218a 1157#endif
60fb100b
AD
1158 default:
1159 break;
1160 }
6e227308 1161
04b94af4
AD
1162 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1163 init_data.flags.fbc_support = true;
1164
d99f38ae
AD
1165 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1166 init_data.flags.multi_mon_pp_mclk_switch = true;
1167
eaf56410
LL
1168 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1169 init_data.flags.disable_fractional_pwm = true;
1170
27eaa492 1171 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1172
0dd79532 1173 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1174 /* Display Core create. */
1175 adev->dm.dc = dc_create(&init_data);
1176
423788c7 1177 if (adev->dm.dc) {
76121231 1178 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1179 } else {
76121231 1180 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1181 goto error;
1182 }
4562236b 1183
8a791dab
HW
1184 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1185 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1186 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1187 }
1188
f99d8762
HW
1189 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1190 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1191
8a791dab
HW
1192 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1193 adev->dm.dc->debug.disable_stutter = true;
1194
1195 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1196 adev->dm.dc->debug.disable_dsc = true;
1197
1198 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1199 adev->dm.dc->debug.disable_clock_gate = true;
1200
743b9786
NK
1201 r = dm_dmub_hw_init(adev);
1202 if (r) {
1203 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1204 goto error;
1205 }
1206
bb6785c1
NK
1207 dc_hardware_init(adev->dm.dc);
1208
0b08c54b 1209#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1210 if (adev->apu_flags) {
e6cd859d
AD
1211 struct dc_phy_addr_space_config pa_config;
1212
0b08c54b 1213 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1214
0b08c54b
YZ
1215 // Call the DC init_memory func
1216 dc_setup_system_context(adev->dm.dc, &pa_config);
1217 }
1218#endif
c0fb85ae 1219
4562236b
HW
1220 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1221 if (!adev->dm.freesync_module) {
1222 DRM_ERROR(
1223 "amdgpu: failed to initialize freesync_module.\n");
1224 } else
f1ad2f5e 1225 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1226 adev->dm.freesync_module);
1227
e277adc5
LSL
1228 amdgpu_dm_init_color_mod();
1229
ea3b4242
QZ
1230#if defined(CONFIG_DRM_AMD_DC_DCN)
1231 if (adev->dm.dc->caps.max_links > 0) {
1232 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1233
1234 if (!adev->dm.vblank_workqueue)
1235 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1236 else
1237 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1238 }
1239#endif
1240
52704fca 1241#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1242 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1243 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1244
96a3b32e
BL
1245 if (!adev->dm.hdcp_workqueue)
1246 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1247 else
1248 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1249
96a3b32e
BL
1250 dc_init_callbacks(adev->dm.dc, &init_params);
1251 }
9a65df19
WL
1252#endif
1253#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1254 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1255#endif
81927e28
JS
1256 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1257 init_completion(&adev->dm.dmub_aux_transfer_done);
1258 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1259 if (!adev->dm.dmub_notify) {
1260 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1261 goto error;
1262 }
1263 amdgpu_dm_outbox_init(adev);
1264 }
1265
4562236b
HW
1266 if (amdgpu_dm_initialize_drm_device(adev)) {
1267 DRM_ERROR(
1268 "amdgpu: failed to initialize sw for display support.\n");
1269 goto error;
1270 }
1271
f74367e4
AD
1272 /* create fake encoders for MST */
1273 dm_dp_create_fake_mst_encoders(adev);
1274
4562236b
HW
1275 /* TODO: Add_display_info? */
1276
1277 /* TODO use dynamic cursor width */
4a580877
LT
1278 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1279 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1280
4a580877 1281 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1282 DRM_ERROR(
1283 "amdgpu: failed to initialize sw for display support.\n");
1284 goto error;
1285 }
1286
c0fb85ae 1287
f1ad2f5e 1288 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1289
1290 return 0;
1291error:
1292 amdgpu_dm_fini(adev);
1293
59d0f396 1294 return -EINVAL;
4562236b
HW
1295}
1296
e9669fb7
AG
1297static int amdgpu_dm_early_fini(void *handle)
1298{
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300
1301 amdgpu_dm_audio_fini(adev);
1302
1303 return 0;
1304}
1305
7578ecda 1306static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1307{
f74367e4
AD
1308 int i;
1309
1310 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1311 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1312 }
1313
4562236b 1314 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1315
9a65df19
WL
1316#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1317 if (adev->dm.crc_rd_wrk) {
1318 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1319 kfree(adev->dm.crc_rd_wrk);
1320 adev->dm.crc_rd_wrk = NULL;
1321 }
1322#endif
52704fca
BL
1323#ifdef CONFIG_DRM_AMD_DC_HDCP
1324 if (adev->dm.hdcp_workqueue) {
e96b1b29 1325 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1326 adev->dm.hdcp_workqueue = NULL;
1327 }
1328
1329 if (adev->dm.dc)
1330 dc_deinit_callbacks(adev->dm.dc);
1331#endif
51ba6912
QZ
1332
1333#if defined(CONFIG_DRM_AMD_DC_DCN)
1334 if (adev->dm.vblank_workqueue) {
1335 adev->dm.vblank_workqueue->dm = NULL;
1336 kfree(adev->dm.vblank_workqueue);
1337 adev->dm.vblank_workqueue = NULL;
1338 }
1339#endif
1340
3beac533 1341 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1342
81927e28
JS
1343 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1344 kfree(adev->dm.dmub_notify);
1345 adev->dm.dmub_notify = NULL;
1346 }
1347
743b9786
NK
1348 if (adev->dm.dmub_bo)
1349 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1350 &adev->dm.dmub_bo_gpu_addr,
1351 &adev->dm.dmub_bo_cpu_addr);
52704fca 1352
c8bdf2b6
ED
1353 /* DC Destroy TODO: Replace destroy DAL */
1354 if (adev->dm.dc)
1355 dc_destroy(&adev->dm.dc);
4562236b
HW
1356 /*
1357 * TODO: pageflip, vlank interrupt
1358 *
1359 * amdgpu_dm_irq_fini(adev);
1360 */
1361
1362 if (adev->dm.cgs_device) {
1363 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1364 adev->dm.cgs_device = NULL;
1365 }
1366 if (adev->dm.freesync_module) {
1367 mod_freesync_destroy(adev->dm.freesync_module);
1368 adev->dm.freesync_module = NULL;
1369 }
674e78ac 1370
6ce8f316 1371 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1372 mutex_destroy(&adev->dm.dc_lock);
1373
4562236b
HW
1374 return;
1375}
1376
a94d5569 1377static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1378{
a7669aff 1379 const char *fw_name_dmcu = NULL;
a94d5569
DF
1380 int r;
1381 const struct dmcu_firmware_header_v1_0 *hdr;
1382
1383 switch(adev->asic_type) {
55e56389
MR
1384#if defined(CONFIG_DRM_AMD_DC_SI)
1385 case CHIP_TAHITI:
1386 case CHIP_PITCAIRN:
1387 case CHIP_VERDE:
1388 case CHIP_OLAND:
1389#endif
a94d5569
DF
1390 case CHIP_BONAIRE:
1391 case CHIP_HAWAII:
1392 case CHIP_KAVERI:
1393 case CHIP_KABINI:
1394 case CHIP_MULLINS:
1395 case CHIP_TONGA:
1396 case CHIP_FIJI:
1397 case CHIP_CARRIZO:
1398 case CHIP_STONEY:
1399 case CHIP_POLARIS11:
1400 case CHIP_POLARIS10:
1401 case CHIP_POLARIS12:
1402 case CHIP_VEGAM:
1403 case CHIP_VEGA10:
1404 case CHIP_VEGA12:
1405 case CHIP_VEGA20:
476e955d 1406 case CHIP_NAVI10:
baebcf2e 1407 case CHIP_NAVI14:
30221ad8 1408 case CHIP_RENOIR:
79037324 1409 case CHIP_SIENNA_CICHLID:
a6c5308f 1410 case CHIP_NAVY_FLOUNDER:
2a411205 1411 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 1412 case CHIP_BEIGE_GOBY:
469989ca 1413 case CHIP_VANGOGH:
1ebcaebd
NK
1414#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1415 case CHIP_YELLOW_CARP:
1416#endif
a94d5569 1417 return 0;
5ea23931
RL
1418 case CHIP_NAVI12:
1419 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1420 break;
a94d5569 1421 case CHIP_RAVEN:
a7669aff
HW
1422 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1423 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1424 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1425 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1426 else
a7669aff 1427 return 0;
a94d5569
DF
1428 break;
1429 default:
1430 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1431 return -EINVAL;
a94d5569
DF
1432 }
1433
1434 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1435 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1436 return 0;
1437 }
1438
1439 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1440 if (r == -ENOENT) {
1441 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1442 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1443 adev->dm.fw_dmcu = NULL;
1444 return 0;
1445 }
1446 if (r) {
1447 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1448 fw_name_dmcu);
1449 return r;
1450 }
1451
1452 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1453 if (r) {
1454 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1455 fw_name_dmcu);
1456 release_firmware(adev->dm.fw_dmcu);
1457 adev->dm.fw_dmcu = NULL;
1458 return r;
1459 }
1460
1461 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1462 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1463 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1464 adev->firmware.fw_size +=
1465 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1466
1467 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1468 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1469 adev->firmware.fw_size +=
1470 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1471
ee6e89c0
DF
1472 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1473
a94d5569
DF
1474 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1475
4562236b
HW
1476 return 0;
1477}
1478
743b9786
NK
1479static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1480{
1481 struct amdgpu_device *adev = ctx;
1482
1483 return dm_read_reg(adev->dm.dc->ctx, address);
1484}
1485
1486static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1487 uint32_t value)
1488{
1489 struct amdgpu_device *adev = ctx;
1490
1491 return dm_write_reg(adev->dm.dc->ctx, address, value);
1492}
1493
1494static int dm_dmub_sw_init(struct amdgpu_device *adev)
1495{
1496 struct dmub_srv_create_params create_params;
8c7aea40
NK
1497 struct dmub_srv_region_params region_params;
1498 struct dmub_srv_region_info region_info;
1499 struct dmub_srv_fb_params fb_params;
1500 struct dmub_srv_fb_info *fb_info;
1501 struct dmub_srv *dmub_srv;
743b9786
NK
1502 const struct dmcub_firmware_header_v1_0 *hdr;
1503 const char *fw_name_dmub;
1504 enum dmub_asic dmub_asic;
1505 enum dmub_status status;
1506 int r;
1507
1508 switch (adev->asic_type) {
1509 case CHIP_RENOIR:
1510 dmub_asic = DMUB_ASIC_DCN21;
1511 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1512 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1513 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1514 break;
79037324
BL
1515 case CHIP_SIENNA_CICHLID:
1516 dmub_asic = DMUB_ASIC_DCN30;
1517 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1518 break;
5ce868fc
BL
1519 case CHIP_NAVY_FLOUNDER:
1520 dmub_asic = DMUB_ASIC_DCN30;
1521 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1522 break;
469989ca
RL
1523 case CHIP_VANGOGH:
1524 dmub_asic = DMUB_ASIC_DCN301;
1525 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1526 break;
2a411205
BL
1527 case CHIP_DIMGREY_CAVEFISH:
1528 dmub_asic = DMUB_ASIC_DCN302;
1529 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1530 break;
656fe9b6
AP
1531 case CHIP_BEIGE_GOBY:
1532 dmub_asic = DMUB_ASIC_DCN303;
1533 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1534 break;
1ebcaebd
NK
1535#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1536 case CHIP_YELLOW_CARP:
1537 dmub_asic = DMUB_ASIC_DCN31;
1538 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1539 break;
1540#endif
743b9786
NK
1541
1542 default:
1543 /* ASIC doesn't support DMUB. */
1544 return 0;
1545 }
1546
743b9786
NK
1547 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1548 if (r) {
1549 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1550 return 0;
1551 }
1552
1553 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1554 if (r) {
1555 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1556 return 0;
1557 }
1558
743b9786 1559 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1560
9a6ed547
NK
1561 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1562 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1563 AMDGPU_UCODE_ID_DMCUB;
1564 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1565 adev->dm.dmub_fw;
1566 adev->firmware.fw_size +=
1567 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1568
9a6ed547
NK
1569 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1570 adev->dm.dmcub_fw_version);
1571 }
1572
1573 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1574
8c7aea40
NK
1575 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1576 dmub_srv = adev->dm.dmub_srv;
1577
1578 if (!dmub_srv) {
1579 DRM_ERROR("Failed to allocate DMUB service!\n");
1580 return -ENOMEM;
1581 }
1582
1583 memset(&create_params, 0, sizeof(create_params));
1584 create_params.user_ctx = adev;
1585 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1586 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1587 create_params.asic = dmub_asic;
1588
1589 /* Create the DMUB service. */
1590 status = dmub_srv_create(dmub_srv, &create_params);
1591 if (status != DMUB_STATUS_OK) {
1592 DRM_ERROR("Error creating DMUB service: %d\n", status);
1593 return -EINVAL;
1594 }
1595
1596 /* Calculate the size of all the regions for the DMUB service. */
1597 memset(&region_params, 0, sizeof(region_params));
1598
1599 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1600 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1601 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1602 region_params.vbios_size = adev->bios_size;
0922b899 1603 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1604 adev->dm.dmub_fw->data +
1605 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1606 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1607 region_params.fw_inst_const =
1608 adev->dm.dmub_fw->data +
1609 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1610 PSP_HEADER_BYTES;
8c7aea40
NK
1611
1612 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1613 &region_info);
1614
1615 if (status != DMUB_STATUS_OK) {
1616 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1617 return -EINVAL;
1618 }
1619
1620 /*
1621 * Allocate a framebuffer based on the total size of all the regions.
1622 * TODO: Move this into GART.
1623 */
1624 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1625 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1626 &adev->dm.dmub_bo_gpu_addr,
1627 &adev->dm.dmub_bo_cpu_addr);
1628 if (r)
1629 return r;
1630
1631 /* Rebase the regions on the framebuffer address. */
1632 memset(&fb_params, 0, sizeof(fb_params));
1633 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1634 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1635 fb_params.region_info = &region_info;
1636
1637 adev->dm.dmub_fb_info =
1638 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1639 fb_info = adev->dm.dmub_fb_info;
1640
1641 if (!fb_info) {
1642 DRM_ERROR(
1643 "Failed to allocate framebuffer info for DMUB service!\n");
1644 return -ENOMEM;
1645 }
1646
1647 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1648 if (status != DMUB_STATUS_OK) {
1649 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1650 return -EINVAL;
1651 }
1652
743b9786
NK
1653 return 0;
1654}
1655
a94d5569
DF
1656static int dm_sw_init(void *handle)
1657{
1658 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1659 int r;
1660
1661 r = dm_dmub_sw_init(adev);
1662 if (r)
1663 return r;
a94d5569
DF
1664
1665 return load_dmcu_fw(adev);
1666}
1667
4562236b
HW
1668static int dm_sw_fini(void *handle)
1669{
a94d5569
DF
1670 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1671
8c7aea40
NK
1672 kfree(adev->dm.dmub_fb_info);
1673 adev->dm.dmub_fb_info = NULL;
1674
743b9786
NK
1675 if (adev->dm.dmub_srv) {
1676 dmub_srv_destroy(adev->dm.dmub_srv);
1677 adev->dm.dmub_srv = NULL;
1678 }
1679
75e1658e
ND
1680 release_firmware(adev->dm.dmub_fw);
1681 adev->dm.dmub_fw = NULL;
743b9786 1682
75e1658e
ND
1683 release_firmware(adev->dm.fw_dmcu);
1684 adev->dm.fw_dmcu = NULL;
a94d5569 1685
4562236b
HW
1686 return 0;
1687}
1688
7abcf6b5 1689static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1690{
c84dec2f 1691 struct amdgpu_dm_connector *aconnector;
4562236b 1692 struct drm_connector *connector;
f8d2d39e 1693 struct drm_connector_list_iter iter;
7abcf6b5 1694 int ret = 0;
4562236b 1695
f8d2d39e
LP
1696 drm_connector_list_iter_begin(dev, &iter);
1697 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1698 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1699 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1700 aconnector->mst_mgr.aux) {
f1ad2f5e 1701 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1702 aconnector,
1703 aconnector->base.base.id);
7abcf6b5
AG
1704
1705 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1706 if (ret < 0) {
1707 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1708 aconnector->dc_link->type =
1709 dc_connection_single;
1710 break;
7abcf6b5 1711 }
f8d2d39e 1712 }
4562236b 1713 }
f8d2d39e 1714 drm_connector_list_iter_end(&iter);
4562236b 1715
7abcf6b5
AG
1716 return ret;
1717}
1718
1719static int dm_late_init(void *handle)
1720{
42e67c3b 1721 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1722
bbf854dc
DF
1723 struct dmcu_iram_parameters params;
1724 unsigned int linear_lut[16];
1725 int i;
17bdb4a8 1726 struct dmcu *dmcu = NULL;
bbf854dc 1727
17bdb4a8
JFZ
1728 dmcu = adev->dm.dc->res_pool->dmcu;
1729
bbf854dc
DF
1730 for (i = 0; i < 16; i++)
1731 linear_lut[i] = 0xFFFF * i / 15;
1732
1733 params.set = 0;
1734 params.backlight_ramping_start = 0xCCCC;
1735 params.backlight_ramping_reduction = 0xCCCCCCCC;
1736 params.backlight_lut_array_size = 16;
1737 params.backlight_lut_array = linear_lut;
1738
2ad0cdf9
AK
1739 /* Min backlight level after ABM reduction, Don't allow below 1%
1740 * 0xFFFF x 0.01 = 0x28F
1741 */
1742 params.min_abm_backlight = 0x28F;
5cb32419 1743 /* In the case where abm is implemented on dmcub,
6e568e43
JW
1744 * dmcu object will be null.
1745 * ABM 2.4 and up are implemented on dmcub.
1746 */
1747 if (dmcu) {
1748 if (!dmcu_load_iram(dmcu, params))
1749 return -EINVAL;
1750 } else if (adev->dm.dc->ctx->dmub_srv) {
1751 struct dc_link *edp_links[MAX_NUM_EDP];
1752 int edp_num;
bbf854dc 1753
6e568e43
JW
1754 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1755 for (i = 0; i < edp_num; i++) {
1756 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1757 return -EINVAL;
1758 }
1759 }
bbf854dc 1760
4a580877 1761 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1762}
1763
1764static void s3_handle_mst(struct drm_device *dev, bool suspend)
1765{
c84dec2f 1766 struct amdgpu_dm_connector *aconnector;
4562236b 1767 struct drm_connector *connector;
f8d2d39e 1768 struct drm_connector_list_iter iter;
fe7553be
LP
1769 struct drm_dp_mst_topology_mgr *mgr;
1770 int ret;
1771 bool need_hotplug = false;
4562236b 1772
f8d2d39e
LP
1773 drm_connector_list_iter_begin(dev, &iter);
1774 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1775 aconnector = to_amdgpu_dm_connector(connector);
1776 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1777 aconnector->mst_port)
1778 continue;
1779
1780 mgr = &aconnector->mst_mgr;
1781
1782 if (suspend) {
1783 drm_dp_mst_topology_mgr_suspend(mgr);
1784 } else {
6f85f738 1785 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1786 if (ret < 0) {
1787 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1788 need_hotplug = true;
1789 }
1790 }
4562236b 1791 }
f8d2d39e 1792 drm_connector_list_iter_end(&iter);
fe7553be
LP
1793
1794 if (need_hotplug)
1795 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1796}
1797
9340dfd3
HW
1798static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1799{
1800 struct smu_context *smu = &adev->smu;
1801 int ret = 0;
1802
1803 if (!is_support_sw_smu(adev))
1804 return 0;
1805
1806 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1807 * on window driver dc implementation.
1808 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1809 * should be passed to smu during boot up and resume from s3.
1810 * boot up: dc calculate dcn watermark clock settings within dc_create,
1811 * dcn20_resource_construct
1812 * then call pplib functions below to pass the settings to smu:
1813 * smu_set_watermarks_for_clock_ranges
1814 * smu_set_watermarks_table
1815 * navi10_set_watermarks_table
1816 * smu_write_watermarks_table
1817 *
1818 * For Renoir, clock settings of dcn watermark are also fixed values.
1819 * dc has implemented different flow for window driver:
1820 * dc_hardware_init / dc_set_power_state
1821 * dcn10_init_hw
1822 * notify_wm_ranges
1823 * set_wm_ranges
1824 * -- Linux
1825 * smu_set_watermarks_for_clock_ranges
1826 * renoir_set_watermarks_table
1827 * smu_write_watermarks_table
1828 *
1829 * For Linux,
1830 * dc_hardware_init -> amdgpu_dm_init
1831 * dc_set_power_state --> dm_resume
1832 *
1833 * therefore, this function apply to navi10/12/14 but not Renoir
1834 * *
1835 */
1836 switch(adev->asic_type) {
1837 case CHIP_NAVI10:
1838 case CHIP_NAVI14:
1839 case CHIP_NAVI12:
1840 break;
1841 default:
1842 return 0;
1843 }
1844
e7a95eea
EQ
1845 ret = smu_write_watermarks_table(smu);
1846 if (ret) {
1847 DRM_ERROR("Failed to update WMTABLE!\n");
1848 return ret;
9340dfd3
HW
1849 }
1850
9340dfd3
HW
1851 return 0;
1852}
1853
b8592b48
LL
1854/**
1855 * dm_hw_init() - Initialize DC device
28d687ea 1856 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1857 *
1858 * Initialize the &struct amdgpu_display_manager device. This involves calling
1859 * the initializers of each DM component, then populating the struct with them.
1860 *
1861 * Although the function implies hardware initialization, both hardware and
1862 * software are initialized here. Splitting them out to their relevant init
1863 * hooks is a future TODO item.
1864 *
1865 * Some notable things that are initialized here:
1866 *
1867 * - Display Core, both software and hardware
1868 * - DC modules that we need (freesync and color management)
1869 * - DRM software states
1870 * - Interrupt sources and handlers
1871 * - Vblank support
1872 * - Debug FS entries, if enabled
1873 */
4562236b
HW
1874static int dm_hw_init(void *handle)
1875{
1876 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1877 /* Create DAL display manager */
1878 amdgpu_dm_init(adev);
4562236b
HW
1879 amdgpu_dm_hpd_init(adev);
1880
4562236b
HW
1881 return 0;
1882}
1883
b8592b48
LL
1884/**
1885 * dm_hw_fini() - Teardown DC device
28d687ea 1886 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1887 *
1888 * Teardown components within &struct amdgpu_display_manager that require
1889 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1890 * were loaded. Also flush IRQ workqueues and disable them.
1891 */
4562236b
HW
1892static int dm_hw_fini(void *handle)
1893{
1894 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1895
1896 amdgpu_dm_hpd_fini(adev);
1897
1898 amdgpu_dm_irq_fini(adev);
21de3396 1899 amdgpu_dm_fini(adev);
4562236b
HW
1900 return 0;
1901}
1902
cdaae837
BL
1903
1904static int dm_enable_vblank(struct drm_crtc *crtc);
1905static void dm_disable_vblank(struct drm_crtc *crtc);
1906
1907static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1908 struct dc_state *state, bool enable)
1909{
1910 enum dc_irq_source irq_source;
1911 struct amdgpu_crtc *acrtc;
1912 int rc = -EBUSY;
1913 int i = 0;
1914
1915 for (i = 0; i < state->stream_count; i++) {
1916 acrtc = get_crtc_by_otg_inst(
1917 adev, state->stream_status[i].primary_otg_inst);
1918
1919 if (acrtc && state->stream_status[i].plane_count != 0) {
1920 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1921 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
1922 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1923 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
1924 if (rc)
1925 DRM_WARN("Failed to %s pflip interrupts\n",
1926 enable ? "enable" : "disable");
1927
1928 if (enable) {
1929 rc = dm_enable_vblank(&acrtc->base);
1930 if (rc)
1931 DRM_WARN("Failed to enable vblank interrupts\n");
1932 } else {
1933 dm_disable_vblank(&acrtc->base);
1934 }
1935
1936 }
1937 }
1938
1939}
1940
dfd84d90 1941static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1942{
1943 struct dc_state *context = NULL;
1944 enum dc_status res = DC_ERROR_UNEXPECTED;
1945 int i;
1946 struct dc_stream_state *del_streams[MAX_PIPES];
1947 int del_streams_count = 0;
1948
1949 memset(del_streams, 0, sizeof(del_streams));
1950
1951 context = dc_create_state(dc);
1952 if (context == NULL)
1953 goto context_alloc_fail;
1954
1955 dc_resource_state_copy_construct_current(dc, context);
1956
1957 /* First remove from context all streams */
1958 for (i = 0; i < context->stream_count; i++) {
1959 struct dc_stream_state *stream = context->streams[i];
1960
1961 del_streams[del_streams_count++] = stream;
1962 }
1963
1964 /* Remove all planes for removed streams and then remove the streams */
1965 for (i = 0; i < del_streams_count; i++) {
1966 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1967 res = DC_FAIL_DETACH_SURFACES;
1968 goto fail;
1969 }
1970
1971 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1972 if (res != DC_OK)
1973 goto fail;
1974 }
1975
1976
1977 res = dc_validate_global_state(dc, context, false);
1978
1979 if (res != DC_OK) {
1980 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1981 goto fail;
1982 }
1983
1984 res = dc_commit_state(dc, context);
1985
1986fail:
1987 dc_release_state(context);
1988
1989context_alloc_fail:
1990 return res;
1991}
1992
4562236b
HW
1993static int dm_suspend(void *handle)
1994{
1995 struct amdgpu_device *adev = handle;
1996 struct amdgpu_display_manager *dm = &adev->dm;
1997 int ret = 0;
4562236b 1998
53b3f8f4 1999 if (amdgpu_in_reset(adev)) {
cdaae837 2000 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2001
2002#if defined(CONFIG_DRM_AMD_DC_DCN)
2003 dc_allow_idle_optimizations(adev->dm.dc, false);
2004#endif
2005
cdaae837
BL
2006 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2007
2008 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2009
2010 amdgpu_dm_commit_zero_streams(dm->dc);
2011
2012 amdgpu_dm_irq_suspend(adev);
2013
2014 return ret;
2015 }
4562236b 2016
d2f0b53b 2017 WARN_ON(adev->dm.cached_state);
4a580877 2018 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2019
4a580877 2020 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2021
4562236b
HW
2022 amdgpu_dm_irq_suspend(adev);
2023
32f5062d 2024 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2025
1c2075d4 2026 return 0;
4562236b
HW
2027}
2028
1daf8c63
AD
2029static struct amdgpu_dm_connector *
2030amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2031 struct drm_crtc *crtc)
4562236b
HW
2032{
2033 uint32_t i;
c2cea706 2034 struct drm_connector_state *new_con_state;
4562236b
HW
2035 struct drm_connector *connector;
2036 struct drm_crtc *crtc_from_state;
2037
c2cea706
LSL
2038 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2039 crtc_from_state = new_con_state->crtc;
4562236b
HW
2040
2041 if (crtc_from_state == crtc)
c84dec2f 2042 return to_amdgpu_dm_connector(connector);
4562236b
HW
2043 }
2044
2045 return NULL;
2046}
2047
fbbdadf2
BL
2048static void emulated_link_detect(struct dc_link *link)
2049{
2050 struct dc_sink_init_data sink_init_data = { 0 };
2051 struct display_sink_capability sink_caps = { 0 };
2052 enum dc_edid_status edid_status;
2053 struct dc_context *dc_ctx = link->ctx;
2054 struct dc_sink *sink = NULL;
2055 struct dc_sink *prev_sink = NULL;
2056
2057 link->type = dc_connection_none;
2058 prev_sink = link->local_sink;
2059
30164a16
VL
2060 if (prev_sink)
2061 dc_sink_release(prev_sink);
fbbdadf2
BL
2062
2063 switch (link->connector_signal) {
2064 case SIGNAL_TYPE_HDMI_TYPE_A: {
2065 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2066 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2067 break;
2068 }
2069
2070 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2071 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2072 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2073 break;
2074 }
2075
2076 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2077 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2078 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2079 break;
2080 }
2081
2082 case SIGNAL_TYPE_LVDS: {
2083 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2084 sink_caps.signal = SIGNAL_TYPE_LVDS;
2085 break;
2086 }
2087
2088 case SIGNAL_TYPE_EDP: {
2089 sink_caps.transaction_type =
2090 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2091 sink_caps.signal = SIGNAL_TYPE_EDP;
2092 break;
2093 }
2094
2095 case SIGNAL_TYPE_DISPLAY_PORT: {
2096 sink_caps.transaction_type =
2097 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2098 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2099 break;
2100 }
2101
2102 default:
2103 DC_ERROR("Invalid connector type! signal:%d\n",
2104 link->connector_signal);
2105 return;
2106 }
2107
2108 sink_init_data.link = link;
2109 sink_init_data.sink_signal = sink_caps.signal;
2110
2111 sink = dc_sink_create(&sink_init_data);
2112 if (!sink) {
2113 DC_ERROR("Failed to create sink!\n");
2114 return;
2115 }
2116
dcd5fb82 2117 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2118 link->local_sink = sink;
2119
2120 edid_status = dm_helpers_read_local_edid(
2121 link->ctx,
2122 link,
2123 sink);
2124
2125 if (edid_status != EDID_OK)
2126 DC_ERROR("Failed to read EDID");
2127
2128}
2129
cdaae837
BL
2130static void dm_gpureset_commit_state(struct dc_state *dc_state,
2131 struct amdgpu_display_manager *dm)
2132{
2133 struct {
2134 struct dc_surface_update surface_updates[MAX_SURFACES];
2135 struct dc_plane_info plane_infos[MAX_SURFACES];
2136 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2137 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2138 struct dc_stream_update stream_update;
2139 } * bundle;
2140 int k, m;
2141
2142 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2143
2144 if (!bundle) {
2145 dm_error("Failed to allocate update bundle\n");
2146 goto cleanup;
2147 }
2148
2149 for (k = 0; k < dc_state->stream_count; k++) {
2150 bundle->stream_update.stream = dc_state->streams[k];
2151
2152 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2153 bundle->surface_updates[m].surface =
2154 dc_state->stream_status->plane_states[m];
2155 bundle->surface_updates[m].surface->force_full_update =
2156 true;
2157 }
2158 dc_commit_updates_for_stream(
2159 dm->dc, bundle->surface_updates,
2160 dc_state->stream_status->plane_count,
efc8278e 2161 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2162 }
2163
2164cleanup:
2165 kfree(bundle);
2166
2167 return;
2168}
2169
3c4d55c9
AP
2170static void dm_set_dpms_off(struct dc_link *link)
2171{
2172 struct dc_stream_state *stream_state;
2173 struct amdgpu_dm_connector *aconnector = link->priv;
2174 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2175 struct dc_stream_update stream_update;
2176 bool dpms_off = true;
2177
2178 memset(&stream_update, 0, sizeof(stream_update));
2179 stream_update.dpms_off = &dpms_off;
2180
2181 mutex_lock(&adev->dm.dc_lock);
2182 stream_state = dc_stream_find_from_link(link);
2183
2184 if (stream_state == NULL) {
2185 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2186 mutex_unlock(&adev->dm.dc_lock);
2187 return;
2188 }
2189
2190 stream_update.stream = stream_state;
2191 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2192 stream_state, &stream_update,
2193 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2194 mutex_unlock(&adev->dm.dc_lock);
2195}
2196
4562236b
HW
2197static int dm_resume(void *handle)
2198{
2199 struct amdgpu_device *adev = handle;
4a580877 2200 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2201 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2202 struct amdgpu_dm_connector *aconnector;
4562236b 2203 struct drm_connector *connector;
f8d2d39e 2204 struct drm_connector_list_iter iter;
4562236b 2205 struct drm_crtc *crtc;
c2cea706 2206 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2207 struct dm_crtc_state *dm_new_crtc_state;
2208 struct drm_plane *plane;
2209 struct drm_plane_state *new_plane_state;
2210 struct dm_plane_state *dm_new_plane_state;
113b7a01 2211 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2212 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2213 struct dc_state *dc_state;
2214 int i, r, j;
4562236b 2215
53b3f8f4 2216 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2217 dc_state = dm->cached_dc_state;
2218
2219 r = dm_dmub_hw_init(adev);
2220 if (r)
2221 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2222
2223 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2224 dc_resume(dm->dc);
2225
2226 amdgpu_dm_irq_resume_early(adev);
2227
2228 for (i = 0; i < dc_state->stream_count; i++) {
2229 dc_state->streams[i]->mode_changed = true;
2230 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2231 dc_state->stream_status->plane_states[j]->update_flags.raw
2232 = 0xffffffff;
2233 }
2234 }
1ebcaebd
NK
2235#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
2236 /*
2237 * Resource allocation happens for link encoders for newer ASIC in
2238 * dc_validate_global_state, so we need to revalidate it.
2239 *
2240 * This shouldn't fail (it passed once before), so warn if it does.
2241 */
2242 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2243#endif
cdaae837
BL
2244
2245 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2246
cdaae837
BL
2247 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2248
2249 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2250
2251 dc_release_state(dm->cached_dc_state);
2252 dm->cached_dc_state = NULL;
2253
2254 amdgpu_dm_irq_resume_late(adev);
2255
2256 mutex_unlock(&dm->dc_lock);
2257
2258 return 0;
2259 }
113b7a01
LL
2260 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2261 dc_release_state(dm_state->context);
2262 dm_state->context = dc_create_state(dm->dc);
2263 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2264 dc_resource_state_construct(dm->dc, dm_state->context);
2265
8c7aea40
NK
2266 /* Before powering on DC we need to re-initialize DMUB. */
2267 r = dm_dmub_hw_init(adev);
2268 if (r)
2269 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2270
a80aa93d
ML
2271 /* power on hardware */
2272 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2273
4562236b
HW
2274 /* program HPD filter */
2275 dc_resume(dm->dc);
2276
4562236b
HW
2277 /*
2278 * early enable HPD Rx IRQ, should be done before set mode as short
2279 * pulse interrupts are used for MST
2280 */
2281 amdgpu_dm_irq_resume_early(adev);
2282
d20ebea8 2283 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2284 s3_handle_mst(ddev, false);
2285
4562236b 2286 /* Do detection*/
f8d2d39e
LP
2287 drm_connector_list_iter_begin(ddev, &iter);
2288 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2289 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2290
2291 /*
2292 * this is the case when traversing through already created
2293 * MST connectors, should be skipped
2294 */
2295 if (aconnector->mst_port)
2296 continue;
2297
03ea364c 2298 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2299 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2300 DRM_ERROR("KMS: Failed to detect connector\n");
2301
2302 if (aconnector->base.force && new_connection_type == dc_connection_none)
2303 emulated_link_detect(aconnector->dc_link);
2304 else
2305 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2306
2307 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2308 aconnector->fake_enable = false;
2309
dcd5fb82
MF
2310 if (aconnector->dc_sink)
2311 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2312 aconnector->dc_sink = NULL;
2313 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2314 mutex_unlock(&aconnector->hpd_lock);
4562236b 2315 }
f8d2d39e 2316 drm_connector_list_iter_end(&iter);
4562236b 2317
1f6010a9 2318 /* Force mode set in atomic commit */
a80aa93d 2319 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2320 new_crtc_state->active_changed = true;
4f346e65 2321
fcb4019e
LSL
2322 /*
2323 * atomic_check is expected to create the dc states. We need to release
2324 * them here, since they were duplicated as part of the suspend
2325 * procedure.
2326 */
a80aa93d 2327 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2328 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2329 if (dm_new_crtc_state->stream) {
2330 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2331 dc_stream_release(dm_new_crtc_state->stream);
2332 dm_new_crtc_state->stream = NULL;
2333 }
2334 }
2335
a80aa93d 2336 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2337 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2338 if (dm_new_plane_state->dc_state) {
2339 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2340 dc_plane_state_release(dm_new_plane_state->dc_state);
2341 dm_new_plane_state->dc_state = NULL;
2342 }
2343 }
2344
2d1af6a1 2345 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2346
a80aa93d 2347 dm->cached_state = NULL;
0a214e2f 2348
9faa4237 2349 amdgpu_dm_irq_resume_late(adev);
4562236b 2350
9340dfd3
HW
2351 amdgpu_dm_smu_write_watermarks_table(adev);
2352
2d1af6a1 2353 return 0;
4562236b
HW
2354}
2355
b8592b48
LL
2356/**
2357 * DOC: DM Lifecycle
2358 *
2359 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2360 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2361 * the base driver's device list to be initialized and torn down accordingly.
2362 *
2363 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2364 */
2365
4562236b
HW
2366static const struct amd_ip_funcs amdgpu_dm_funcs = {
2367 .name = "dm",
2368 .early_init = dm_early_init,
7abcf6b5 2369 .late_init = dm_late_init,
4562236b
HW
2370 .sw_init = dm_sw_init,
2371 .sw_fini = dm_sw_fini,
e9669fb7 2372 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2373 .hw_init = dm_hw_init,
2374 .hw_fini = dm_hw_fini,
2375 .suspend = dm_suspend,
2376 .resume = dm_resume,
2377 .is_idle = dm_is_idle,
2378 .wait_for_idle = dm_wait_for_idle,
2379 .check_soft_reset = dm_check_soft_reset,
2380 .soft_reset = dm_soft_reset,
2381 .set_clockgating_state = dm_set_clockgating_state,
2382 .set_powergating_state = dm_set_powergating_state,
2383};
2384
2385const struct amdgpu_ip_block_version dm_ip_block =
2386{
2387 .type = AMD_IP_BLOCK_TYPE_DCE,
2388 .major = 1,
2389 .minor = 0,
2390 .rev = 0,
2391 .funcs = &amdgpu_dm_funcs,
2392};
2393
ca3268c4 2394
b8592b48
LL
2395/**
2396 * DOC: atomic
2397 *
2398 * *WIP*
2399 */
0a323b84 2400
b3663f70 2401static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2402 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2403 .get_format_info = amd_get_format_info,
366c1baa 2404 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2405 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2406 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2407};
2408
2409static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2410 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2411};
2412
94562810
RS
2413static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2414{
2415 u32 max_cll, min_cll, max, min, q, r;
2416 struct amdgpu_dm_backlight_caps *caps;
2417 struct amdgpu_display_manager *dm;
2418 struct drm_connector *conn_base;
2419 struct amdgpu_device *adev;
ec11fe37 2420 struct dc_link *link = NULL;
94562810
RS
2421 static const u8 pre_computed_values[] = {
2422 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2423 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2424
2425 if (!aconnector || !aconnector->dc_link)
2426 return;
2427
ec11fe37 2428 link = aconnector->dc_link;
2429 if (link->connector_signal != SIGNAL_TYPE_EDP)
2430 return;
2431
94562810 2432 conn_base = &aconnector->base;
1348969a 2433 adev = drm_to_adev(conn_base->dev);
94562810
RS
2434 dm = &adev->dm;
2435 caps = &dm->backlight_caps;
2436 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2437 caps->aux_support = false;
2438 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2439 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2440
2441 if (caps->ext_caps->bits.oled == 1 ||
2442 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2443 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2444 caps->aux_support = true;
2445
7a46f05e
TI
2446 if (amdgpu_backlight == 0)
2447 caps->aux_support = false;
2448 else if (amdgpu_backlight == 1)
2449 caps->aux_support = true;
2450
94562810
RS
2451 /* From the specification (CTA-861-G), for calculating the maximum
2452 * luminance we need to use:
2453 * Luminance = 50*2**(CV/32)
2454 * Where CV is a one-byte value.
2455 * For calculating this expression we may need float point precision;
2456 * to avoid this complexity level, we take advantage that CV is divided
2457 * by a constant. From the Euclids division algorithm, we know that CV
2458 * can be written as: CV = 32*q + r. Next, we replace CV in the
2459 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2460 * need to pre-compute the value of r/32. For pre-computing the values
2461 * We just used the following Ruby line:
2462 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2463 * The results of the above expressions can be verified at
2464 * pre_computed_values.
2465 */
2466 q = max_cll >> 5;
2467 r = max_cll % 32;
2468 max = (1 << q) * pre_computed_values[r];
2469
2470 // min luminance: maxLum * (CV/255)^2 / 100
2471 q = DIV_ROUND_CLOSEST(min_cll, 255);
2472 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2473
2474 caps->aux_max_input_signal = max;
2475 caps->aux_min_input_signal = min;
2476}
2477
97e51c16
HW
2478void amdgpu_dm_update_connector_after_detect(
2479 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2480{
2481 struct drm_connector *connector = &aconnector->base;
2482 struct drm_device *dev = connector->dev;
b73a22d3 2483 struct dc_sink *sink;
4562236b
HW
2484
2485 /* MST handled by drm_mst framework */
2486 if (aconnector->mst_mgr.mst_state == true)
2487 return;
2488
4562236b 2489 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2490 if (sink)
2491 dc_sink_retain(sink);
4562236b 2492
1f6010a9
DF
2493 /*
2494 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2495 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2496 * Skip if already done during boot.
4562236b
HW
2497 */
2498 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2499 && aconnector->dc_em_sink) {
2500
1f6010a9
DF
2501 /*
2502 * For S3 resume with headless use eml_sink to fake stream
2503 * because on resume connector->sink is set to NULL
4562236b
HW
2504 */
2505 mutex_lock(&dev->mode_config.mutex);
2506
2507 if (sink) {
922aa1e1 2508 if (aconnector->dc_sink) {
98e6436d 2509 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2510 /*
2511 * retain and release below are used to
2512 * bump up refcount for sink because the link doesn't point
2513 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2514 * reshuffle by UMD we will get into unwanted dc_sink release
2515 */
dcd5fb82 2516 dc_sink_release(aconnector->dc_sink);
922aa1e1 2517 }
4562236b 2518 aconnector->dc_sink = sink;
dcd5fb82 2519 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2520 amdgpu_dm_update_freesync_caps(connector,
2521 aconnector->edid);
4562236b 2522 } else {
98e6436d 2523 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2524 if (!aconnector->dc_sink) {
4562236b 2525 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2526 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2527 }
4562236b
HW
2528 }
2529
2530 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2531
2532 if (sink)
2533 dc_sink_release(sink);
4562236b
HW
2534 return;
2535 }
2536
2537 /*
2538 * TODO: temporary guard to look for proper fix
2539 * if this sink is MST sink, we should not do anything
2540 */
dcd5fb82
MF
2541 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2542 dc_sink_release(sink);
4562236b 2543 return;
dcd5fb82 2544 }
4562236b
HW
2545
2546 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2547 /*
2548 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2549 * Do nothing!!
2550 */
f1ad2f5e 2551 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2552 aconnector->connector_id);
dcd5fb82
MF
2553 if (sink)
2554 dc_sink_release(sink);
4562236b
HW
2555 return;
2556 }
2557
f1ad2f5e 2558 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2559 aconnector->connector_id, aconnector->dc_sink, sink);
2560
2561 mutex_lock(&dev->mode_config.mutex);
2562
1f6010a9
DF
2563 /*
2564 * 1. Update status of the drm connector
2565 * 2. Send an event and let userspace tell us what to do
2566 */
4562236b 2567 if (sink) {
1f6010a9
DF
2568 /*
2569 * TODO: check if we still need the S3 mode update workaround.
2570 * If yes, put it here.
2571 */
c64b0d6b 2572 if (aconnector->dc_sink) {
98e6436d 2573 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2574 dc_sink_release(aconnector->dc_sink);
2575 }
4562236b
HW
2576
2577 aconnector->dc_sink = sink;
dcd5fb82 2578 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2579 if (sink->dc_edid.length == 0) {
4562236b 2580 aconnector->edid = NULL;
e6142dd5
AP
2581 if (aconnector->dc_link->aux_mode) {
2582 drm_dp_cec_unset_edid(
2583 &aconnector->dm_dp_aux.aux);
2584 }
900b3cb1 2585 } else {
4562236b 2586 aconnector->edid =
e6142dd5 2587 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2588
c555f023 2589 drm_connector_update_edid_property(connector,
e6142dd5 2590 aconnector->edid);
e6142dd5
AP
2591 if (aconnector->dc_link->aux_mode)
2592 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2593 aconnector->edid);
4562236b 2594 }
e6142dd5 2595
98e6436d 2596 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2597 update_connector_ext_caps(aconnector);
4562236b 2598 } else {
e86e8947 2599 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2600 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2601 drm_connector_update_edid_property(connector, NULL);
4562236b 2602 aconnector->num_modes = 0;
dcd5fb82 2603 dc_sink_release(aconnector->dc_sink);
4562236b 2604 aconnector->dc_sink = NULL;
5326c452 2605 aconnector->edid = NULL;
0c8620d6
BL
2606#ifdef CONFIG_DRM_AMD_DC_HDCP
2607 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2608 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2609 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2610#endif
4562236b
HW
2611 }
2612
2613 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2614
0f877894
OV
2615 update_subconnector_property(aconnector);
2616
dcd5fb82
MF
2617 if (sink)
2618 dc_sink_release(sink);
4562236b
HW
2619}
2620
2621static void handle_hpd_irq(void *param)
2622{
c84dec2f 2623 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2624 struct drm_connector *connector = &aconnector->base;
2625 struct drm_device *dev = connector->dev;
fbbdadf2 2626 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2627 struct amdgpu_device *adev = drm_to_adev(dev);
b972b4f9 2628#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2629 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2630#endif
4562236b 2631
b972b4f9
HW
2632 if (adev->dm.disable_hpd_irq)
2633 return;
2634
1f6010a9
DF
2635 /*
2636 * In case of failure or MST no need to update connector status or notify the OS
2637 * since (for MST case) MST does this in its own context.
4562236b
HW
2638 */
2639 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2640
0c8620d6 2641#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2642 if (adev->dm.hdcp_workqueue) {
96a3b32e 2643 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2644 dm_con_state->update_hdcp = true;
2645 }
0c8620d6 2646#endif
2e0ac3d6
HW
2647 if (aconnector->fake_enable)
2648 aconnector->fake_enable = false;
2649
fbbdadf2
BL
2650 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2651 DRM_ERROR("KMS: Failed to detect connector\n");
2652
2653 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2654 emulated_link_detect(aconnector->dc_link);
2655
2656
2657 drm_modeset_lock_all(dev);
2658 dm_restore_drm_connector_state(dev, connector);
2659 drm_modeset_unlock_all(dev);
2660
2661 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2662 drm_kms_helper_hotplug_event(dev);
2663
2664 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2665 if (new_connection_type == dc_connection_none &&
2666 aconnector->dc_link->type == dc_connection_none)
2667 dm_set_dpms_off(aconnector->dc_link);
4562236b 2668
3c4d55c9 2669 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2670
2671 drm_modeset_lock_all(dev);
2672 dm_restore_drm_connector_state(dev, connector);
2673 drm_modeset_unlock_all(dev);
2674
2675 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2676 drm_kms_helper_hotplug_event(dev);
2677 }
2678 mutex_unlock(&aconnector->hpd_lock);
2679
2680}
2681
c84dec2f 2682static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2683{
2684 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2685 uint8_t dret;
2686 bool new_irq_handled = false;
2687 int dpcd_addr;
2688 int dpcd_bytes_to_read;
2689
2690 const int max_process_count = 30;
2691 int process_count = 0;
2692
2693 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2694
2695 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2696 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2697 /* DPCD 0x200 - 0x201 for downstream IRQ */
2698 dpcd_addr = DP_SINK_COUNT;
2699 } else {
2700 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2701 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2702 dpcd_addr = DP_SINK_COUNT_ESI;
2703 }
2704
2705 dret = drm_dp_dpcd_read(
2706 &aconnector->dm_dp_aux.aux,
2707 dpcd_addr,
2708 esi,
2709 dpcd_bytes_to_read);
2710
2711 while (dret == dpcd_bytes_to_read &&
2712 process_count < max_process_count) {
2713 uint8_t retry;
2714 dret = 0;
2715
2716 process_count++;
2717
f1ad2f5e 2718 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2719 /* handle HPD short pulse irq */
2720 if (aconnector->mst_mgr.mst_state)
2721 drm_dp_mst_hpd_irq(
2722 &aconnector->mst_mgr,
2723 esi,
2724 &new_irq_handled);
4562236b
HW
2725
2726 if (new_irq_handled) {
2727 /* ACK at DPCD to notify down stream */
2728 const int ack_dpcd_bytes_to_write =
2729 dpcd_bytes_to_read - 1;
2730
2731 for (retry = 0; retry < 3; retry++) {
2732 uint8_t wret;
2733
2734 wret = drm_dp_dpcd_write(
2735 &aconnector->dm_dp_aux.aux,
2736 dpcd_addr + 1,
2737 &esi[1],
2738 ack_dpcd_bytes_to_write);
2739 if (wret == ack_dpcd_bytes_to_write)
2740 break;
2741 }
2742
1f6010a9 2743 /* check if there is new irq to be handled */
4562236b
HW
2744 dret = drm_dp_dpcd_read(
2745 &aconnector->dm_dp_aux.aux,
2746 dpcd_addr,
2747 esi,
2748 dpcd_bytes_to_read);
2749
2750 new_irq_handled = false;
d4a6e8a9 2751 } else {
4562236b 2752 break;
d4a6e8a9 2753 }
4562236b
HW
2754 }
2755
2756 if (process_count == max_process_count)
f1ad2f5e 2757 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2758}
2759
2760static void handle_hpd_rx_irq(void *param)
2761{
c84dec2f 2762 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2763 struct drm_connector *connector = &aconnector->base;
2764 struct drm_device *dev = connector->dev;
53cbf65c 2765 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2766 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2767 bool result = false;
fbbdadf2 2768 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2769 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2770 union hpd_irq_data hpd_irq_data;
d2aa1356 2771 bool lock_flag = 0;
2a0f9270
BL
2772
2773 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2774
b972b4f9
HW
2775 if (adev->dm.disable_hpd_irq)
2776 return;
2777
2778
1f6010a9
DF
2779 /*
2780 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2781 * conflict, after implement i2c helper, this mutex should be
2782 * retired.
2783 */
b86e7eef 2784 mutex_lock(&aconnector->hpd_lock);
4562236b 2785
3083a984
QZ
2786 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2787
2788 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2789 (dc_link->type == dc_connection_mst_branch)) {
2790 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2791 result = true;
2792 dm_handle_hpd_rx_irq(aconnector);
2793 goto out;
2794 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2795 result = false;
2796 dm_handle_hpd_rx_irq(aconnector);
2797 goto out;
2798 }
2799 }
2800
d2aa1356
AP
2801 /*
2802 * TODO: We need the lock to avoid touching DC state while it's being
2803 * modified during automated compliance testing, or when link loss
2804 * happens. While this should be split into subhandlers and proper
2805 * interfaces to avoid having to conditionally lock like this in the
2806 * outer layer, we need this workaround temporarily to allow MST
2807 * lightup in some scenarios to avoid timeout.
2808 */
2809 if (!amdgpu_in_reset(adev) &&
2810 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2811 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
cf8b92a7 2812 mutex_lock(&adev->dm.dc_lock);
d2aa1356
AP
2813 lock_flag = 1;
2814 }
2815
2a0f9270 2816#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2817 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2818#else
c8ea79a8 2819 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2820#endif
d2aa1356 2821 if (!amdgpu_in_reset(adev) && lock_flag)
cf8b92a7 2822 mutex_unlock(&adev->dm.dc_lock);
c8ea79a8 2823
3083a984 2824out:
c8ea79a8 2825 if (result && !is_mst_root_connector) {
4562236b 2826 /* Downstream Port status changed. */
fbbdadf2
BL
2827 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2828 DRM_ERROR("KMS: Failed to detect connector\n");
2829
2830 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2831 emulated_link_detect(dc_link);
2832
2833 if (aconnector->fake_enable)
2834 aconnector->fake_enable = false;
2835
2836 amdgpu_dm_update_connector_after_detect(aconnector);
2837
2838
2839 drm_modeset_lock_all(dev);
2840 dm_restore_drm_connector_state(dev, connector);
2841 drm_modeset_unlock_all(dev);
2842
2843 drm_kms_helper_hotplug_event(dev);
2844 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2845
2846 if (aconnector->fake_enable)
2847 aconnector->fake_enable = false;
2848
4562236b
HW
2849 amdgpu_dm_update_connector_after_detect(aconnector);
2850
2851
2852 drm_modeset_lock_all(dev);
2853 dm_restore_drm_connector_state(dev, connector);
2854 drm_modeset_unlock_all(dev);
2855
2856 drm_kms_helper_hotplug_event(dev);
2857 }
2858 }
2a0f9270 2859#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2860 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2861 if (adev->dm.hdcp_workqueue)
2862 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2863 }
2a0f9270 2864#endif
4562236b 2865
b86e7eef 2866 if (dc_link->type != dc_connection_mst_branch)
e86e8947 2867 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
2868
2869 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
2870}
2871
2872static void register_hpd_handlers(struct amdgpu_device *adev)
2873{
4a580877 2874 struct drm_device *dev = adev_to_drm(adev);
4562236b 2875 struct drm_connector *connector;
c84dec2f 2876 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2877 const struct dc_link *dc_link;
2878 struct dc_interrupt_params int_params = {0};
2879
2880 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2881 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2882
2883 list_for_each_entry(connector,
2884 &dev->mode_config.connector_list, head) {
2885
c84dec2f 2886 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2887 dc_link = aconnector->dc_link;
2888
2889 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2890 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2891 int_params.irq_source = dc_link->irq_source_hpd;
2892
2893 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2894 handle_hpd_irq,
2895 (void *) aconnector);
2896 }
2897
2898 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2899
2900 /* Also register for DP short pulse (hpd_rx). */
2901 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2902 int_params.irq_source = dc_link->irq_source_hpd_rx;
2903
2904 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2905 handle_hpd_rx_irq,
2906 (void *) aconnector);
2907 }
2908 }
2909}
2910
55e56389
MR
2911#if defined(CONFIG_DRM_AMD_DC_SI)
2912/* Register IRQ sources and initialize IRQ callbacks */
2913static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2914{
2915 struct dc *dc = adev->dm.dc;
2916 struct common_irq_params *c_irq_params;
2917 struct dc_interrupt_params int_params = {0};
2918 int r;
2919 int i;
2920 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2921
2922 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2923 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2924
2925 /*
2926 * Actions of amdgpu_irq_add_id():
2927 * 1. Register a set() function with base driver.
2928 * Base driver will call set() function to enable/disable an
2929 * interrupt in DC hardware.
2930 * 2. Register amdgpu_dm_irq_handler().
2931 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2932 * coming from DC hardware.
2933 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2934 * for acknowledging and handling. */
2935
2936 /* Use VBLANK interrupt */
2937 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2938 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2939 if (r) {
2940 DRM_ERROR("Failed to add crtc irq id!\n");
2941 return r;
2942 }
2943
2944 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2945 int_params.irq_source =
2946 dc_interrupt_to_irq_source(dc, i+1 , 0);
2947
2948 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2949
2950 c_irq_params->adev = adev;
2951 c_irq_params->irq_src = int_params.irq_source;
2952
2953 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2954 dm_crtc_high_irq, c_irq_params);
2955 }
2956
2957 /* Use GRPH_PFLIP interrupt */
2958 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2959 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2960 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2961 if (r) {
2962 DRM_ERROR("Failed to add page flip irq id!\n");
2963 return r;
2964 }
2965
2966 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2967 int_params.irq_source =
2968 dc_interrupt_to_irq_source(dc, i, 0);
2969
2970 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2971
2972 c_irq_params->adev = adev;
2973 c_irq_params->irq_src = int_params.irq_source;
2974
2975 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2976 dm_pflip_high_irq, c_irq_params);
2977
2978 }
2979
2980 /* HPD */
2981 r = amdgpu_irq_add_id(adev, client_id,
2982 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2983 if (r) {
2984 DRM_ERROR("Failed to add hpd irq id!\n");
2985 return r;
2986 }
2987
2988 register_hpd_handlers(adev);
2989
2990 return 0;
2991}
2992#endif
2993
4562236b
HW
2994/* Register IRQ sources and initialize IRQ callbacks */
2995static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2996{
2997 struct dc *dc = adev->dm.dc;
2998 struct common_irq_params *c_irq_params;
2999 struct dc_interrupt_params int_params = {0};
3000 int r;
3001 int i;
1ffdeca6 3002 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3003
84374725 3004 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 3005 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3006
3007 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3008 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3009
1f6010a9
DF
3010 /*
3011 * Actions of amdgpu_irq_add_id():
4562236b
HW
3012 * 1. Register a set() function with base driver.
3013 * Base driver will call set() function to enable/disable an
3014 * interrupt in DC hardware.
3015 * 2. Register amdgpu_dm_irq_handler().
3016 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3017 * coming from DC hardware.
3018 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3019 * for acknowledging and handling. */
3020
b57de80a 3021 /* Use VBLANK interrupt */
e9029155 3022 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3023 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3024 if (r) {
3025 DRM_ERROR("Failed to add crtc irq id!\n");
3026 return r;
3027 }
3028
3029 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030 int_params.irq_source =
3d761e79 3031 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3032
b57de80a 3033 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3034
3035 c_irq_params->adev = adev;
3036 c_irq_params->irq_src = int_params.irq_source;
3037
3038 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 dm_crtc_high_irq, c_irq_params);
3040 }
3041
d2574c33
MK
3042 /* Use VUPDATE interrupt */
3043 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3044 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3045 if (r) {
3046 DRM_ERROR("Failed to add vupdate irq id!\n");
3047 return r;
3048 }
3049
3050 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3051 int_params.irq_source =
3052 dc_interrupt_to_irq_source(dc, i, 0);
3053
3054 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3055
3056 c_irq_params->adev = adev;
3057 c_irq_params->irq_src = int_params.irq_source;
3058
3059 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3060 dm_vupdate_high_irq, c_irq_params);
3061 }
3062
3d761e79 3063 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3064 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3065 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3066 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3067 if (r) {
3068 DRM_ERROR("Failed to add page flip irq id!\n");
3069 return r;
3070 }
3071
3072 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3073 int_params.irq_source =
3074 dc_interrupt_to_irq_source(dc, i, 0);
3075
3076 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3077
3078 c_irq_params->adev = adev;
3079 c_irq_params->irq_src = int_params.irq_source;
3080
3081 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3082 dm_pflip_high_irq, c_irq_params);
3083
3084 }
3085
3086 /* HPD */
2c8ad2d5
AD
3087 r = amdgpu_irq_add_id(adev, client_id,
3088 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3089 if (r) {
3090 DRM_ERROR("Failed to add hpd irq id!\n");
3091 return r;
3092 }
3093
3094 register_hpd_handlers(adev);
3095
3096 return 0;
3097}
3098
b86a1aa3 3099#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3100/* Register IRQ sources and initialize IRQ callbacks */
3101static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3102{
3103 struct dc *dc = adev->dm.dc;
3104 struct common_irq_params *c_irq_params;
3105 struct dc_interrupt_params int_params = {0};
3106 int r;
3107 int i;
660d5406
WL
3108#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3109 static const unsigned int vrtl_int_srcid[] = {
3110 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3111 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3112 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3113 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3114 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3115 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3116 };
3117#endif
ff5ef992
AD
3118
3119 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3120 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3121
1f6010a9
DF
3122 /*
3123 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3124 * 1. Register a set() function with base driver.
3125 * Base driver will call set() function to enable/disable an
3126 * interrupt in DC hardware.
3127 * 2. Register amdgpu_dm_irq_handler().
3128 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3129 * coming from DC hardware.
3130 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3131 * for acknowledging and handling.
1f6010a9 3132 */
ff5ef992
AD
3133
3134 /* Use VSTARTUP interrupt */
3135 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3136 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3137 i++) {
3760f76c 3138 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3139
3140 if (r) {
3141 DRM_ERROR("Failed to add crtc irq id!\n");
3142 return r;
3143 }
3144
3145 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3146 int_params.irq_source =
3147 dc_interrupt_to_irq_source(dc, i, 0);
3148
3149 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3150
3151 c_irq_params->adev = adev;
3152 c_irq_params->irq_src = int_params.irq_source;
3153
2346ef47
NK
3154 amdgpu_dm_irq_register_interrupt(
3155 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3156 }
3157
86bc2219
WL
3158 /* Use otg vertical line interrupt */
3159#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3160 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3161 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3162 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3163
3164 if (r) {
3165 DRM_ERROR("Failed to add vline0 irq id!\n");
3166 return r;
3167 }
3168
3169 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3170 int_params.irq_source =
660d5406
WL
3171 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3172
3173 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3174 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3175 break;
3176 }
86bc2219
WL
3177
3178 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3179 - DC_IRQ_SOURCE_DC1_VLINE0];
3180
3181 c_irq_params->adev = adev;
3182 c_irq_params->irq_src = int_params.irq_source;
3183
3184 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3185 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3186 }
3187#endif
3188
2346ef47
NK
3189 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3190 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3191 * to trigger at end of each vblank, regardless of state of the lock,
3192 * matching DCE behaviour.
3193 */
3194 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3195 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3196 i++) {
3197 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3198
3199 if (r) {
3200 DRM_ERROR("Failed to add vupdate irq id!\n");
3201 return r;
3202 }
3203
3204 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3205 int_params.irq_source =
3206 dc_interrupt_to_irq_source(dc, i, 0);
3207
3208 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3209
3210 c_irq_params->adev = adev;
3211 c_irq_params->irq_src = int_params.irq_source;
3212
ff5ef992 3213 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3214 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3215 }
3216
ff5ef992
AD
3217 /* Use GRPH_PFLIP interrupt */
3218 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3219 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3220 i++) {
3760f76c 3221 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3222 if (r) {
3223 DRM_ERROR("Failed to add page flip irq id!\n");
3224 return r;
3225 }
3226
3227 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3228 int_params.irq_source =
3229 dc_interrupt_to_irq_source(dc, i, 0);
3230
3231 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3232
3233 c_irq_params->adev = adev;
3234 c_irq_params->irq_src = int_params.irq_source;
3235
3236 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3237 dm_pflip_high_irq, c_irq_params);
3238
3239 }
3240
81927e28
JS
3241 /* HPD */
3242 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3243 &adev->hpd_irq);
3244 if (r) {
3245 DRM_ERROR("Failed to add hpd irq id!\n");
3246 return r;
3247 }
a08f16cf 3248
81927e28 3249 register_hpd_handlers(adev);
a08f16cf 3250
81927e28
JS
3251 return 0;
3252}
3253/* Register Outbox IRQ sources and initialize IRQ callbacks */
3254static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3255{
3256 struct dc *dc = adev->dm.dc;
3257 struct common_irq_params *c_irq_params;
3258 struct dc_interrupt_params int_params = {0};
3259 int r, i;
3260
3261 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3262 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3263
3264 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3265 &adev->dmub_outbox_irq);
3266 if (r) {
3267 DRM_ERROR("Failed to add outbox irq id!\n");
3268 return r;
3269 }
3270
3271 if (dc->ctx->dmub_srv) {
3272 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3273 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3274 int_params.irq_source =
81927e28 3275 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3276
81927e28 3277 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3278
3279 c_irq_params->adev = adev;
3280 c_irq_params->irq_src = int_params.irq_source;
3281
3282 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3283 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3284 }
3285
ff5ef992
AD
3286 return 0;
3287}
3288#endif
3289
eb3dc897
NK
3290/*
3291 * Acquires the lock for the atomic state object and returns
3292 * the new atomic state.
3293 *
3294 * This should only be called during atomic check.
3295 */
3296static int dm_atomic_get_state(struct drm_atomic_state *state,
3297 struct dm_atomic_state **dm_state)
3298{
3299 struct drm_device *dev = state->dev;
1348969a 3300 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3301 struct amdgpu_display_manager *dm = &adev->dm;
3302 struct drm_private_state *priv_state;
eb3dc897
NK
3303
3304 if (*dm_state)
3305 return 0;
3306
eb3dc897
NK
3307 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3308 if (IS_ERR(priv_state))
3309 return PTR_ERR(priv_state);
3310
3311 *dm_state = to_dm_atomic_state(priv_state);
3312
3313 return 0;
3314}
3315
dfd84d90 3316static struct dm_atomic_state *
eb3dc897
NK
3317dm_atomic_get_new_state(struct drm_atomic_state *state)
3318{
3319 struct drm_device *dev = state->dev;
1348969a 3320 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3321 struct amdgpu_display_manager *dm = &adev->dm;
3322 struct drm_private_obj *obj;
3323 struct drm_private_state *new_obj_state;
3324 int i;
3325
3326 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3327 if (obj->funcs == dm->atomic_obj.funcs)
3328 return to_dm_atomic_state(new_obj_state);
3329 }
3330
3331 return NULL;
3332}
3333
eb3dc897
NK
3334static struct drm_private_state *
3335dm_atomic_duplicate_state(struct drm_private_obj *obj)
3336{
3337 struct dm_atomic_state *old_state, *new_state;
3338
3339 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3340 if (!new_state)
3341 return NULL;
3342
3343 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3344
813d20dc
AW
3345 old_state = to_dm_atomic_state(obj->state);
3346
3347 if (old_state && old_state->context)
3348 new_state->context = dc_copy_state(old_state->context);
3349
eb3dc897
NK
3350 if (!new_state->context) {
3351 kfree(new_state);
3352 return NULL;
3353 }
3354
eb3dc897
NK
3355 return &new_state->base;
3356}
3357
3358static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3359 struct drm_private_state *state)
3360{
3361 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3362
3363 if (dm_state && dm_state->context)
3364 dc_release_state(dm_state->context);
3365
3366 kfree(dm_state);
3367}
3368
3369static struct drm_private_state_funcs dm_atomic_state_funcs = {
3370 .atomic_duplicate_state = dm_atomic_duplicate_state,
3371 .atomic_destroy_state = dm_atomic_destroy_state,
3372};
3373
4562236b
HW
3374static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3375{
eb3dc897 3376 struct dm_atomic_state *state;
4562236b
HW
3377 int r;
3378
3379 adev->mode_info.mode_config_initialized = true;
3380
4a580877
LT
3381 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3382 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3383
4a580877
LT
3384 adev_to_drm(adev)->mode_config.max_width = 16384;
3385 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3386
4a580877
LT
3387 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3388 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3389 /* indicates support for immediate flip */
4a580877 3390 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3391
4a580877 3392 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3393
eb3dc897
NK
3394 state = kzalloc(sizeof(*state), GFP_KERNEL);
3395 if (!state)
3396 return -ENOMEM;
3397
813d20dc 3398 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3399 if (!state->context) {
3400 kfree(state);
3401 return -ENOMEM;
3402 }
3403
3404 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3405
4a580877 3406 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3407 &adev->dm.atomic_obj,
eb3dc897
NK
3408 &state->base,
3409 &dm_atomic_state_funcs);
3410
3dc9b1ce 3411 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3412 if (r) {
3413 dc_release_state(state->context);
3414 kfree(state);
4562236b 3415 return r;
b67a468a 3416 }
4562236b 3417
6ce8f316 3418 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3419 if (r) {
3420 dc_release_state(state->context);
3421 kfree(state);
6ce8f316 3422 return r;
b67a468a 3423 }
6ce8f316 3424
4562236b
HW
3425 return 0;
3426}
3427
206bbafe
DF
3428#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3429#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3430#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3431
4562236b
HW
3432#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3433 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3434
206bbafe
DF
3435static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3436{
3437#if defined(CONFIG_ACPI)
3438 struct amdgpu_dm_backlight_caps caps;
3439
58965855
FS
3440 memset(&caps, 0, sizeof(caps));
3441
206bbafe
DF
3442 if (dm->backlight_caps.caps_valid)
3443 return;
3444
f9b7f370 3445 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3446 if (caps.caps_valid) {
94562810
RS
3447 dm->backlight_caps.caps_valid = true;
3448 if (caps.aux_support)
3449 return;
206bbafe
DF
3450 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3451 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3452 } else {
3453 dm->backlight_caps.min_input_signal =
3454 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3455 dm->backlight_caps.max_input_signal =
3456 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3457 }
3458#else
94562810
RS
3459 if (dm->backlight_caps.aux_support)
3460 return;
3461
8bcbc9ef
DF
3462 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3463 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3464#endif
3465}
3466
69d9f427
AM
3467static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3468 unsigned *min, unsigned *max)
94562810 3469{
94562810 3470 if (!caps)
69d9f427 3471 return 0;
94562810 3472
69d9f427
AM
3473 if (caps->aux_support) {
3474 // Firmware limits are in nits, DC API wants millinits.
3475 *max = 1000 * caps->aux_max_input_signal;
3476 *min = 1000 * caps->aux_min_input_signal;
94562810 3477 } else {
69d9f427
AM
3478 // Firmware limits are 8-bit, PWM control is 16-bit.
3479 *max = 0x101 * caps->max_input_signal;
3480 *min = 0x101 * caps->min_input_signal;
94562810 3481 }
69d9f427
AM
3482 return 1;
3483}
94562810 3484
69d9f427
AM
3485static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3486 uint32_t brightness)
3487{
3488 unsigned min, max;
94562810 3489
69d9f427
AM
3490 if (!get_brightness_range(caps, &min, &max))
3491 return brightness;
3492
3493 // Rescale 0..255 to min..max
3494 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3495 AMDGPU_MAX_BL_LEVEL);
3496}
3497
3498static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3499 uint32_t brightness)
3500{
3501 unsigned min, max;
3502
3503 if (!get_brightness_range(caps, &min, &max))
3504 return brightness;
3505
3506 if (brightness < min)
3507 return 0;
3508 // Rescale min..max to 0..255
3509 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3510 max - min);
94562810
RS
3511}
3512
3d6c9164
AD
3513static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3514 u32 user_brightness)
4562236b 3515{
206bbafe 3516 struct amdgpu_dm_backlight_caps caps;
118b4627 3517 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3d6c9164 3518 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
94562810 3519 bool rc;
118b4627 3520 int i;
4562236b 3521
206bbafe
DF
3522 amdgpu_dm_update_backlight_caps(dm);
3523 caps = dm->backlight_caps;
94562810 3524
3d6c9164
AD
3525 for (i = 0; i < dm->num_of_edps; i++) {
3526 dm->brightness[i] = user_brightness;
3527 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
118b4627 3528 link[i] = (struct dc_link *)dm->backlight_link[i];
3d6c9164 3529 }
94562810 3530
3d6c9164 3531 /* Change brightness based on AUX property */
118b4627
ML
3532 if (caps.aux_support) {
3533 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3534 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
118b4627
ML
3535 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3536 if (!rc) {
cd11b58c 3537 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
118b4627
ML
3538 break;
3539 }
3540 }
3541 } else {
3542 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3543 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
118b4627 3544 if (!rc) {
cd11b58c 3545 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
118b4627
ML
3546 break;
3547 }
3548 }
3549 }
94562810
RS
3550
3551 return rc ? 0 : 1;
4562236b
HW
3552}
3553
3d6c9164 3554static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3555{
620a0d27 3556 struct amdgpu_display_manager *dm = bl_get_data(bd);
3d6c9164
AD
3557
3558 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3559
3560 return 0;
3561}
3562
3563static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3564{
0ad3e64e
AD
3565 struct amdgpu_dm_backlight_caps caps;
3566
3567 amdgpu_dm_update_backlight_caps(dm);
3568 caps = dm->backlight_caps;
620a0d27 3569
0ad3e64e 3570 if (caps.aux_support) {
118b4627 3571 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
0ad3e64e
AD
3572 u32 avg, peak;
3573 bool rc;
3574
3575 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3576 if (!rc)
3d6c9164 3577 return dm->brightness[0];
0ad3e64e
AD
3578 return convert_brightness_to_user(&caps, avg);
3579 } else {
118b4627 3580 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
0ad3e64e
AD
3581
3582 if (ret == DC_ERROR_UNEXPECTED)
3d6c9164 3583 return dm->brightness[0];
0ad3e64e
AD
3584 return convert_brightness_to_user(&caps, ret);
3585 }
4562236b
HW
3586}
3587
3d6c9164
AD
3588static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3589{
3590 struct amdgpu_display_manager *dm = bl_get_data(bd);
3591
3592 return amdgpu_dm_backlight_get_level(dm);
3593}
3594
4562236b 3595static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3596 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3597 .get_brightness = amdgpu_dm_backlight_get_brightness,
3598 .update_status = amdgpu_dm_backlight_update_status,
3599};
3600
7578ecda
AD
3601static void
3602amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3603{
3604 char bl_name[16];
3605 struct backlight_properties props = { 0 };
3d6c9164 3606 int i;
4562236b 3607
206bbafe 3608 amdgpu_dm_update_backlight_caps(dm);
3d6c9164
AD
3609 for (i = 0; i < dm->num_of_edps; i++)
3610 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3611
4562236b 3612 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3613 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3614 props.type = BACKLIGHT_RAW;
3615
3616 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3617 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3618
3619 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3620 adev_to_drm(dm->adev)->dev,
3621 dm,
3622 &amdgpu_dm_backlight_ops,
3623 &props);
4562236b 3624
74baea42 3625 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3626 DRM_ERROR("DM: Backlight registration failed!\n");
3627 else
f1ad2f5e 3628 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3629}
3630
3631#endif
3632
df534fff 3633static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3634 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3635 enum drm_plane_type plane_type,
3636 const struct dc_plane_cap *plane_cap)
df534fff 3637{
f180b4bc 3638 struct drm_plane *plane;
df534fff
S
3639 unsigned long possible_crtcs;
3640 int ret = 0;
3641
f180b4bc 3642 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3643 if (!plane) {
3644 DRM_ERROR("KMS: Failed to allocate plane\n");
3645 return -ENOMEM;
3646 }
b2fddb13 3647 plane->type = plane_type;
df534fff
S
3648
3649 /*
b2fddb13
NK
3650 * HACK: IGT tests expect that the primary plane for a CRTC
3651 * can only have one possible CRTC. Only expose support for
3652 * any CRTC if they're not going to be used as a primary plane
3653 * for a CRTC - like overlay or underlay planes.
df534fff
S
3654 */
3655 possible_crtcs = 1 << plane_id;
3656 if (plane_id >= dm->dc->caps.max_streams)
3657 possible_crtcs = 0xff;
3658
cc1fec57 3659 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3660
3661 if (ret) {
3662 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3663 kfree(plane);
df534fff
S
3664 return ret;
3665 }
3666
54087768
NK
3667 if (mode_info)
3668 mode_info->planes[plane_id] = plane;
3669
df534fff
S
3670 return ret;
3671}
3672
89fc8d4e
HW
3673
3674static void register_backlight_device(struct amdgpu_display_manager *dm,
3675 struct dc_link *link)
3676{
3677#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3678 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3679
3680 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3681 link->type != dc_connection_none) {
1f6010a9
DF
3682 /*
3683 * Event if registration failed, we should continue with
89fc8d4e
HW
3684 * DM initialization because not having a backlight control
3685 * is better then a black screen.
3686 */
118b4627
ML
3687 if (!dm->backlight_dev)
3688 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 3689
118b4627
ML
3690 if (dm->backlight_dev) {
3691 dm->backlight_link[dm->num_of_edps] = link;
3692 dm->num_of_edps++;
3693 }
89fc8d4e
HW
3694 }
3695#endif
3696}
3697
3698
1f6010a9
DF
3699/*
3700 * In this architecture, the association
4562236b
HW
3701 * connector -> encoder -> crtc
3702 * id not really requried. The crtc and connector will hold the
3703 * display_index as an abstraction to use with DAL component
3704 *
3705 * Returns 0 on success
3706 */
7578ecda 3707static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3708{
3709 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3710 int32_t i;
c84dec2f 3711 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3712 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3713 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3714 uint32_t link_cnt;
cc1fec57 3715 int32_t primary_planes;
fbbdadf2 3716 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3717 const struct dc_plane_cap *plane;
4562236b 3718
d58159de
AD
3719 dm->display_indexes_num = dm->dc->caps.max_streams;
3720 /* Update the actual used number of crtc */
3721 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3722
4562236b 3723 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3724 if (amdgpu_dm_mode_config_init(dm->adev)) {
3725 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3726 return -EINVAL;
4562236b
HW
3727 }
3728
b2fddb13
NK
3729 /* There is one primary plane per CRTC */
3730 primary_planes = dm->dc->caps.max_streams;
54087768 3731 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3732
b2fddb13
NK
3733 /*
3734 * Initialize primary planes, implicit planes for legacy IOCTLS.
3735 * Order is reversed to match iteration order in atomic check.
3736 */
3737 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3738 plane = &dm->dc->caps.planes[i];
3739
b2fddb13 3740 if (initialize_plane(dm, mode_info, i,
cc1fec57 3741 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3742 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3743 goto fail;
d4e13b0d 3744 }
df534fff 3745 }
92f3ac40 3746
0d579c7e
NK
3747 /*
3748 * Initialize overlay planes, index starting after primary planes.
3749 * These planes have a higher DRM index than the primary planes since
3750 * they should be considered as having a higher z-order.
3751 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3752 *
3753 * Only support DCN for now, and only expose one so we don't encourage
3754 * userspace to use up all the pipes.
0d579c7e 3755 */
cc1fec57
NK
3756 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3757 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3758
3759 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3760 continue;
3761
3762 if (!plane->blends_with_above || !plane->blends_with_below)
3763 continue;
3764
ea36ad34 3765 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3766 continue;
3767
54087768 3768 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3769 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3770 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3771 goto fail;
d4e13b0d 3772 }
cc1fec57
NK
3773
3774 /* Only create one overlay plane. */
3775 break;
d4e13b0d 3776 }
4562236b 3777
d4e13b0d 3778 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3779 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3780 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3781 goto fail;
4562236b 3782 }
4562236b 3783
50610b74 3784#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28
JS
3785 /* Use Outbox interrupt */
3786 switch (adev->asic_type) {
81927e28
JS
3787 case CHIP_SIENNA_CICHLID:
3788 case CHIP_NAVY_FLOUNDER:
1ebcaebd
NK
3789#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
3790 case CHIP_YELLOW_CARP:
3791#endif
81927e28
JS
3792 case CHIP_RENOIR:
3793 if (register_outbox_irq_handlers(dm->adev)) {
3794 DRM_ERROR("DM: Failed to initialize IRQ\n");
3795 goto fail;
3796 }
3797 break;
3798 default:
3799 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3800 }
50610b74 3801#endif
81927e28 3802
4562236b
HW
3803 /* loops over all connectors on the board */
3804 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3805 struct dc_link *link = NULL;
4562236b
HW
3806
3807 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3808 DRM_ERROR(
3809 "KMS: Cannot support more than %d display indexes\n",
3810 AMDGPU_DM_MAX_DISPLAY_INDEX);
3811 continue;
3812 }
3813
3814 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3815 if (!aconnector)
cd8a2ae8 3816 goto fail;
4562236b
HW
3817
3818 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3819 if (!aencoder)
cd8a2ae8 3820 goto fail;
4562236b
HW
3821
3822 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3823 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3824 goto fail;
4562236b
HW
3825 }
3826
3827 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3828 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3829 goto fail;
4562236b
HW
3830 }
3831
89fc8d4e
HW
3832 link = dc_get_link_at_index(dm->dc, i);
3833
fbbdadf2
BL
3834 if (!dc_link_detect_sink(link, &new_connection_type))
3835 DRM_ERROR("KMS: Failed to detect connector\n");
3836
3837 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3838 emulated_link_detect(link);
3839 amdgpu_dm_update_connector_after_detect(aconnector);
3840
3841 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3842 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3843 register_backlight_device(dm, link);
397a9bc5
RL
3844 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3845 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3846 }
3847
3848
4562236b
HW
3849 }
3850
3851 /* Software is initialized. Now we can register interrupt handlers. */
3852 switch (adev->asic_type) {
55e56389
MR
3853#if defined(CONFIG_DRM_AMD_DC_SI)
3854 case CHIP_TAHITI:
3855 case CHIP_PITCAIRN:
3856 case CHIP_VERDE:
3857 case CHIP_OLAND:
3858 if (dce60_register_irq_handlers(dm->adev)) {
3859 DRM_ERROR("DM: Failed to initialize IRQ\n");
3860 goto fail;
3861 }
3862 break;
3863#endif
4562236b
HW
3864 case CHIP_BONAIRE:
3865 case CHIP_HAWAII:
cd4b356f
AD
3866 case CHIP_KAVERI:
3867 case CHIP_KABINI:
3868 case CHIP_MULLINS:
4562236b
HW
3869 case CHIP_TONGA:
3870 case CHIP_FIJI:
3871 case CHIP_CARRIZO:
3872 case CHIP_STONEY:
3873 case CHIP_POLARIS11:
3874 case CHIP_POLARIS10:
b264d345 3875 case CHIP_POLARIS12:
7737de91 3876 case CHIP_VEGAM:
2c8ad2d5 3877 case CHIP_VEGA10:
2325ff30 3878 case CHIP_VEGA12:
1fe6bf2f 3879 case CHIP_VEGA20:
4562236b
HW
3880 if (dce110_register_irq_handlers(dm->adev)) {
3881 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3882 goto fail;
4562236b
HW
3883 }
3884 break;
b86a1aa3 3885#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3886 case CHIP_RAVEN:
fbd2afe5 3887 case CHIP_NAVI12:
476e955d 3888 case CHIP_NAVI10:
fce651e3 3889 case CHIP_NAVI14:
30221ad8 3890 case CHIP_RENOIR:
79037324 3891 case CHIP_SIENNA_CICHLID:
a6c5308f 3892 case CHIP_NAVY_FLOUNDER:
2a411205 3893 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 3894 case CHIP_BEIGE_GOBY:
469989ca 3895 case CHIP_VANGOGH:
1ebcaebd
NK
3896#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
3897 case CHIP_YELLOW_CARP:
3898#endif
ff5ef992
AD
3899 if (dcn10_register_irq_handlers(dm->adev)) {
3900 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3901 goto fail;
ff5ef992
AD
3902 }
3903 break;
3904#endif
4562236b 3905 default:
e63f8673 3906 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3907 goto fail;
4562236b
HW
3908 }
3909
4562236b 3910 return 0;
cd8a2ae8 3911fail:
4562236b 3912 kfree(aencoder);
4562236b 3913 kfree(aconnector);
54087768 3914
59d0f396 3915 return -EINVAL;
4562236b
HW
3916}
3917
7578ecda 3918static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 3919{
eb3dc897 3920 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3921 return;
3922}
3923
3924/******************************************************************************
3925 * amdgpu_display_funcs functions
3926 *****************************************************************************/
3927
1f6010a9 3928/*
4562236b
HW
3929 * dm_bandwidth_update - program display watermarks
3930 *
3931 * @adev: amdgpu_device pointer
3932 *
3933 * Calculate and program the display watermarks and line buffer allocation.
3934 */
3935static void dm_bandwidth_update(struct amdgpu_device *adev)
3936{
49c07a99 3937 /* TODO: implement later */
4562236b
HW
3938}
3939
39cc5be2 3940static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3941 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3942 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3943 .backlight_set_level = NULL, /* never called for DC */
3944 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3945 .hpd_sense = NULL,/* called unconditionally */
3946 .hpd_set_polarity = NULL, /* called unconditionally */
3947 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3948 .page_flip_get_scanoutpos =
3949 dm_crtc_get_scanoutpos,/* called unconditionally */
3950 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3951 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3952};
3953
3954#if defined(CONFIG_DEBUG_KERNEL_DC)
3955
3ee6b26b
AD
3956static ssize_t s3_debug_store(struct device *device,
3957 struct device_attribute *attr,
3958 const char *buf,
3959 size_t count)
4562236b
HW
3960{
3961 int ret;
3962 int s3_state;
ef1de361 3963 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3964 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3965
3966 ret = kstrtoint(buf, 0, &s3_state);
3967
3968 if (ret == 0) {
3969 if (s3_state) {
3970 dm_resume(adev);
4a580877 3971 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3972 } else
3973 dm_suspend(adev);
3974 }
3975
3976 return ret == 0 ? count : 0;
3977}
3978
3979DEVICE_ATTR_WO(s3_debug);
3980
3981#endif
3982
3983static int dm_early_init(void *handle)
3984{
3985 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3986
4562236b 3987 switch (adev->asic_type) {
55e56389
MR
3988#if defined(CONFIG_DRM_AMD_DC_SI)
3989 case CHIP_TAHITI:
3990 case CHIP_PITCAIRN:
3991 case CHIP_VERDE:
3992 adev->mode_info.num_crtc = 6;
3993 adev->mode_info.num_hpd = 6;
3994 adev->mode_info.num_dig = 6;
3995 break;
3996 case CHIP_OLAND:
3997 adev->mode_info.num_crtc = 2;
3998 adev->mode_info.num_hpd = 2;
3999 adev->mode_info.num_dig = 2;
4000 break;
4001#endif
4562236b
HW
4002 case CHIP_BONAIRE:
4003 case CHIP_HAWAII:
4004 adev->mode_info.num_crtc = 6;
4005 adev->mode_info.num_hpd = 6;
4006 adev->mode_info.num_dig = 6;
4562236b 4007 break;
cd4b356f
AD
4008 case CHIP_KAVERI:
4009 adev->mode_info.num_crtc = 4;
4010 adev->mode_info.num_hpd = 6;
4011 adev->mode_info.num_dig = 7;
cd4b356f
AD
4012 break;
4013 case CHIP_KABINI:
4014 case CHIP_MULLINS:
4015 adev->mode_info.num_crtc = 2;
4016 adev->mode_info.num_hpd = 6;
4017 adev->mode_info.num_dig = 6;
cd4b356f 4018 break;
4562236b
HW
4019 case CHIP_FIJI:
4020 case CHIP_TONGA:
4021 adev->mode_info.num_crtc = 6;
4022 adev->mode_info.num_hpd = 6;
4023 adev->mode_info.num_dig = 7;
4562236b
HW
4024 break;
4025 case CHIP_CARRIZO:
4026 adev->mode_info.num_crtc = 3;
4027 adev->mode_info.num_hpd = 6;
4028 adev->mode_info.num_dig = 9;
4562236b
HW
4029 break;
4030 case CHIP_STONEY:
4031 adev->mode_info.num_crtc = 2;
4032 adev->mode_info.num_hpd = 6;
4033 adev->mode_info.num_dig = 9;
4562236b
HW
4034 break;
4035 case CHIP_POLARIS11:
b264d345 4036 case CHIP_POLARIS12:
4562236b
HW
4037 adev->mode_info.num_crtc = 5;
4038 adev->mode_info.num_hpd = 5;
4039 adev->mode_info.num_dig = 5;
4562236b
HW
4040 break;
4041 case CHIP_POLARIS10:
7737de91 4042 case CHIP_VEGAM:
4562236b
HW
4043 adev->mode_info.num_crtc = 6;
4044 adev->mode_info.num_hpd = 6;
4045 adev->mode_info.num_dig = 6;
4562236b 4046 break;
2c8ad2d5 4047 case CHIP_VEGA10:
2325ff30 4048 case CHIP_VEGA12:
1fe6bf2f 4049 case CHIP_VEGA20:
2c8ad2d5
AD
4050 adev->mode_info.num_crtc = 6;
4051 adev->mode_info.num_hpd = 6;
4052 adev->mode_info.num_dig = 6;
4053 break;
b86a1aa3 4054#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 4055 case CHIP_RAVEN:
20f2ffe5
AD
4056 case CHIP_RENOIR:
4057 case CHIP_VANGOGH:
ff5ef992
AD
4058 adev->mode_info.num_crtc = 4;
4059 adev->mode_info.num_hpd = 4;
4060 adev->mode_info.num_dig = 4;
ff5ef992 4061 break;
476e955d 4062 case CHIP_NAVI10:
fbd2afe5 4063 case CHIP_NAVI12:
79037324 4064 case CHIP_SIENNA_CICHLID:
a6c5308f 4065 case CHIP_NAVY_FLOUNDER:
476e955d
HW
4066 adev->mode_info.num_crtc = 6;
4067 adev->mode_info.num_hpd = 6;
4068 adev->mode_info.num_dig = 6;
4069 break;
1ebcaebd
NK
4070#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
4071 case CHIP_YELLOW_CARP:
4072 adev->mode_info.num_crtc = 4;
4073 adev->mode_info.num_hpd = 4;
4074 adev->mode_info.num_dig = 4;
4075 break;
4076#endif
fce651e3 4077 case CHIP_NAVI14:
2a411205 4078 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
4079 adev->mode_info.num_crtc = 5;
4080 adev->mode_info.num_hpd = 5;
4081 adev->mode_info.num_dig = 5;
4082 break;
656fe9b6
AP
4083 case CHIP_BEIGE_GOBY:
4084 adev->mode_info.num_crtc = 2;
4085 adev->mode_info.num_hpd = 2;
4086 adev->mode_info.num_dig = 2;
4087 break;
20f2ffe5 4088#endif
4562236b 4089 default:
e63f8673 4090 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
4091 return -EINVAL;
4092 }
4093
c8dd5715
MD
4094 amdgpu_dm_set_irq_funcs(adev);
4095
39cc5be2
AD
4096 if (adev->mode_info.funcs == NULL)
4097 adev->mode_info.funcs = &dm_display_funcs;
4098
1f6010a9
DF
4099 /*
4100 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4101 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4102 * amdgpu_device_init()
4103 */
4562236b
HW
4104#if defined(CONFIG_DEBUG_KERNEL_DC)
4105 device_create_file(
4a580877 4106 adev_to_drm(adev)->dev,
4562236b
HW
4107 &dev_attr_s3_debug);
4108#endif
4109
4110 return 0;
4111}
4112
9b690ef3 4113static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4114 struct dc_stream_state *new_stream,
4115 struct dc_stream_state *old_stream)
9b690ef3 4116{
2afda735 4117 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4118}
4119
4120static bool modereset_required(struct drm_crtc_state *crtc_state)
4121{
2afda735 4122 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4123}
4124
7578ecda 4125static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4126{
4127 drm_encoder_cleanup(encoder);
4128 kfree(encoder);
4129}
4130
4131static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4132 .destroy = amdgpu_dm_encoder_destroy,
4133};
4134
e7b07cee 4135
6300b3bd
MK
4136static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4137 struct drm_framebuffer *fb,
4138 int *min_downscale, int *max_upscale)
4139{
4140 struct amdgpu_device *adev = drm_to_adev(dev);
4141 struct dc *dc = adev->dm.dc;
4142 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4143 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4144
4145 switch (fb->format->format) {
4146 case DRM_FORMAT_P010:
4147 case DRM_FORMAT_NV12:
4148 case DRM_FORMAT_NV21:
4149 *max_upscale = plane_cap->max_upscale_factor.nv12;
4150 *min_downscale = plane_cap->max_downscale_factor.nv12;
4151 break;
4152
4153 case DRM_FORMAT_XRGB16161616F:
4154 case DRM_FORMAT_ARGB16161616F:
4155 case DRM_FORMAT_XBGR16161616F:
4156 case DRM_FORMAT_ABGR16161616F:
4157 *max_upscale = plane_cap->max_upscale_factor.fp16;
4158 *min_downscale = plane_cap->max_downscale_factor.fp16;
4159 break;
4160
4161 default:
4162 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4163 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4164 break;
4165 }
4166
4167 /*
4168 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4169 * scaling factor of 1.0 == 1000 units.
4170 */
4171 if (*max_upscale == 1)
4172 *max_upscale = 1000;
4173
4174 if (*min_downscale == 1)
4175 *min_downscale = 1000;
4176}
4177
4178
695af5f9
NK
4179static int fill_dc_scaling_info(const struct drm_plane_state *state,
4180 struct dc_scaling_info *scaling_info)
e7b07cee 4181{
6300b3bd 4182 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4183
695af5f9 4184 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4185
695af5f9
NK
4186 /* Source is fixed 16.16 but we ignore mantissa for now... */
4187 scaling_info->src_rect.x = state->src_x >> 16;
4188 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4189
d89f6048
HW
4190 /*
4191 * For reasons we don't (yet) fully understand a non-zero
4192 * src_y coordinate into an NV12 buffer can cause a
4193 * system hang. To avoid hangs (and maybe be overly cautious)
4194 * let's reject both non-zero src_x and src_y.
4195 *
4196 * We currently know of only one use-case to reproduce a
4197 * scenario with non-zero src_x and src_y for NV12, which
4198 * is to gesture the YouTube Android app into full screen
4199 * on ChromeOS.
4200 */
4201 if (state->fb &&
4202 state->fb->format->format == DRM_FORMAT_NV12 &&
4203 (scaling_info->src_rect.x != 0 ||
4204 scaling_info->src_rect.y != 0))
4205 return -EINVAL;
4206
695af5f9
NK
4207 scaling_info->src_rect.width = state->src_w >> 16;
4208 if (scaling_info->src_rect.width == 0)
4209 return -EINVAL;
4210
4211 scaling_info->src_rect.height = state->src_h >> 16;
4212 if (scaling_info->src_rect.height == 0)
4213 return -EINVAL;
4214
4215 scaling_info->dst_rect.x = state->crtc_x;
4216 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4217
4218 if (state->crtc_w == 0)
695af5f9 4219 return -EINVAL;
e7b07cee 4220
695af5f9 4221 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4222
4223 if (state->crtc_h == 0)
695af5f9 4224 return -EINVAL;
e7b07cee 4225
695af5f9 4226 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4227
695af5f9
NK
4228 /* DRM doesn't specify clipping on destination output. */
4229 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4230
6300b3bd
MK
4231 /* Validate scaling per-format with DC plane caps */
4232 if (state->plane && state->plane->dev && state->fb) {
4233 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4234 &min_downscale, &max_upscale);
4235 } else {
4236 min_downscale = 250;
4237 max_upscale = 16000;
4238 }
4239
6491f0c0
NK
4240 scale_w = scaling_info->dst_rect.width * 1000 /
4241 scaling_info->src_rect.width;
e7b07cee 4242
6300b3bd 4243 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4244 return -EINVAL;
4245
4246 scale_h = scaling_info->dst_rect.height * 1000 /
4247 scaling_info->src_rect.height;
4248
6300b3bd 4249 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4250 return -EINVAL;
4251
695af5f9
NK
4252 /*
4253 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4254 * assume reasonable defaults based on the format.
4255 */
e7b07cee 4256
695af5f9 4257 return 0;
4562236b 4258}
695af5f9 4259
a3241991
BN
4260static void
4261fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4262 uint64_t tiling_flags)
e7b07cee 4263{
a3241991
BN
4264 /* Fill GFX8 params */
4265 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4266 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4267
a3241991
BN
4268 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4269 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4270 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4271 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4272 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4273
a3241991
BN
4274 /* XXX fix me for VI */
4275 tiling_info->gfx8.num_banks = num_banks;
4276 tiling_info->gfx8.array_mode =
4277 DC_ARRAY_2D_TILED_THIN1;
4278 tiling_info->gfx8.tile_split = tile_split;
4279 tiling_info->gfx8.bank_width = bankw;
4280 tiling_info->gfx8.bank_height = bankh;
4281 tiling_info->gfx8.tile_aspect = mtaspect;
4282 tiling_info->gfx8.tile_mode =
4283 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4284 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4285 == DC_ARRAY_1D_TILED_THIN1) {
4286 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4287 }
4288
a3241991
BN
4289 tiling_info->gfx8.pipe_config =
4290 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4291}
4292
a3241991
BN
4293static void
4294fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4295 union dc_tiling_info *tiling_info)
4296{
4297 tiling_info->gfx9.num_pipes =
4298 adev->gfx.config.gb_addr_config_fields.num_pipes;
4299 tiling_info->gfx9.num_banks =
4300 adev->gfx.config.gb_addr_config_fields.num_banks;
4301 tiling_info->gfx9.pipe_interleave =
4302 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4303 tiling_info->gfx9.num_shader_engines =
4304 adev->gfx.config.gb_addr_config_fields.num_se;
4305 tiling_info->gfx9.max_compressed_frags =
4306 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4307 tiling_info->gfx9.num_rb_per_se =
4308 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4309 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4310 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4311 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4312 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
656fe9b6 4313 adev->asic_type == CHIP_BEIGE_GOBY ||
1ebcaebd
NK
4314#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
4315 adev->asic_type == CHIP_YELLOW_CARP ||
4316#endif
a3241991
BN
4317 adev->asic_type == CHIP_VANGOGH)
4318 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4319}
4320
695af5f9 4321static int
a3241991
BN
4322validate_dcc(struct amdgpu_device *adev,
4323 const enum surface_pixel_format format,
4324 const enum dc_rotation_angle rotation,
4325 const union dc_tiling_info *tiling_info,
4326 const struct dc_plane_dcc_param *dcc,
4327 const struct dc_plane_address *address,
4328 const struct plane_size *plane_size)
7df7e505
NK
4329{
4330 struct dc *dc = adev->dm.dc;
8daa1218
NC
4331 struct dc_dcc_surface_param input;
4332 struct dc_surface_dcc_cap output;
7df7e505 4333
8daa1218
NC
4334 memset(&input, 0, sizeof(input));
4335 memset(&output, 0, sizeof(output));
4336
a3241991 4337 if (!dcc->enable)
87b7ebc2
RS
4338 return 0;
4339
a3241991
BN
4340 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4341 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4342 return -EINVAL;
7df7e505 4343
695af5f9 4344 input.format = format;
12e2b2d4
DL
4345 input.surface_size.width = plane_size->surface_size.width;
4346 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4347 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4348
695af5f9 4349 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4350 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4351 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4352 input.scan = SCAN_DIRECTION_VERTICAL;
4353
4354 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4355 return -EINVAL;
7df7e505
NK
4356
4357 if (!output.capable)
09e5665a 4358 return -EINVAL;
7df7e505 4359
a3241991
BN
4360 if (dcc->independent_64b_blks == 0 &&
4361 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4362 return -EINVAL;
7df7e505 4363
a3241991
BN
4364 return 0;
4365}
4366
37384b3f
BN
4367static bool
4368modifier_has_dcc(uint64_t modifier)
4369{
4370 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4371}
4372
4373static unsigned
4374modifier_gfx9_swizzle_mode(uint64_t modifier)
4375{
4376 if (modifier == DRM_FORMAT_MOD_LINEAR)
4377 return 0;
4378
4379 return AMD_FMT_MOD_GET(TILE, modifier);
4380}
4381
dfbbfe3c
BN
4382static const struct drm_format_info *
4383amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4384{
816853f9 4385 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4386}
4387
37384b3f
BN
4388static void
4389fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4390 union dc_tiling_info *tiling_info,
4391 uint64_t modifier)
4392{
4393 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4394 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4395 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4396 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4397
4398 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4399
4400 if (!IS_AMD_FMT_MOD(modifier))
4401 return;
4402
4403 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4404 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4405
4406 if (adev->family >= AMDGPU_FAMILY_NV) {
4407 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4408 } else {
4409 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4410
4411 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4412 }
4413}
4414
faa37f54
BN
4415enum dm_micro_swizzle {
4416 MICRO_SWIZZLE_Z = 0,
4417 MICRO_SWIZZLE_S = 1,
4418 MICRO_SWIZZLE_D = 2,
4419 MICRO_SWIZZLE_R = 3
4420};
4421
4422static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4423 uint32_t format,
4424 uint64_t modifier)
4425{
4426 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4427 const struct drm_format_info *info = drm_format_info(format);
fe180178 4428 int i;
faa37f54
BN
4429
4430 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4431
4432 if (!info)
4433 return false;
4434
4435 /*
fe180178
QZ
4436 * We always have to allow these modifiers:
4437 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4438 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4439 */
fe180178
QZ
4440 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4441 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4442 return true;
fe180178 4443 }
faa37f54 4444
fe180178
QZ
4445 /* Check that the modifier is on the list of the plane's supported modifiers. */
4446 for (i = 0; i < plane->modifier_count; i++) {
4447 if (modifier == plane->modifiers[i])
4448 break;
4449 }
4450 if (i == plane->modifier_count)
faa37f54
BN
4451 return false;
4452
4453 /*
4454 * For D swizzle the canonical modifier depends on the bpp, so check
4455 * it here.
4456 */
4457 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4458 adev->family >= AMDGPU_FAMILY_NV) {
4459 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4460 return false;
4461 }
4462
4463 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4464 info->cpp[0] < 8)
4465 return false;
4466
4467 if (modifier_has_dcc(modifier)) {
4468 /* Per radeonsi comments 16/64 bpp are more complicated. */
4469 if (info->cpp[0] != 4)
4470 return false;
951796f2
SS
4471 /* We support multi-planar formats, but not when combined with
4472 * additional DCC metadata planes. */
4473 if (info->num_planes > 1)
4474 return false;
faa37f54
BN
4475 }
4476
4477 return true;
4478}
4479
4480static void
4481add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4482{
4483 if (!*mods)
4484 return;
4485
4486 if (*cap - *size < 1) {
4487 uint64_t new_cap = *cap * 2;
4488 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4489
4490 if (!new_mods) {
4491 kfree(*mods);
4492 *mods = NULL;
4493 return;
4494 }
4495
4496 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4497 kfree(*mods);
4498 *mods = new_mods;
4499 *cap = new_cap;
4500 }
4501
4502 (*mods)[*size] = mod;
4503 *size += 1;
4504}
4505
4506static void
4507add_gfx9_modifiers(const struct amdgpu_device *adev,
4508 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4509{
4510 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4511 int pipe_xor_bits = min(8, pipes +
4512 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4513 int bank_xor_bits = min(8 - pipe_xor_bits,
4514 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4515 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4516 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4517
4518
4519 if (adev->family == AMDGPU_FAMILY_RV) {
4520 /* Raven2 and later */
4521 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4522
4523 /*
4524 * No _D DCC swizzles yet because we only allow 32bpp, which
4525 * doesn't support _D on DCN
4526 */
4527
4528 if (has_constant_encode) {
4529 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4530 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4531 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4532 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4533 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4534 AMD_FMT_MOD_SET(DCC, 1) |
4535 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4536 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4537 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4538 }
4539
4540 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4541 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4542 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4543 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4544 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4545 AMD_FMT_MOD_SET(DCC, 1) |
4546 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4547 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4548 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4549
4550 if (has_constant_encode) {
4551 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4552 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4553 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4554 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4555 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4556 AMD_FMT_MOD_SET(DCC, 1) |
4557 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4558 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4559 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4560
4561 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4562 AMD_FMT_MOD_SET(RB, rb) |
4563 AMD_FMT_MOD_SET(PIPE, pipes));
4564 }
4565
4566 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4567 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4568 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4569 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4570 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4571 AMD_FMT_MOD_SET(DCC, 1) |
4572 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4573 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4574 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4575 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4576 AMD_FMT_MOD_SET(RB, rb) |
4577 AMD_FMT_MOD_SET(PIPE, pipes));
4578 }
4579
4580 /*
4581 * Only supported for 64bpp on Raven, will be filtered on format in
4582 * dm_plane_format_mod_supported.
4583 */
4584 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4585 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4586 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4587 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4588 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4589
4590 if (adev->family == AMDGPU_FAMILY_RV) {
4591 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4592 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4593 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4594 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4595 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4596 }
4597
4598 /*
4599 * Only supported for 64bpp on Raven, will be filtered on format in
4600 * dm_plane_format_mod_supported.
4601 */
4602 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4603 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4604 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4605
4606 if (adev->family == AMDGPU_FAMILY_RV) {
4607 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4610 }
4611}
4612
4613static void
4614add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4615 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4616{
4617 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4618
4619 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4620 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4621 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4622 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4623 AMD_FMT_MOD_SET(DCC, 1) |
4624 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4625 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4626 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4627
4628 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4629 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4630 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4631 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4632 AMD_FMT_MOD_SET(DCC, 1) |
4633 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4634 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4635 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4636 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4637
4638 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4639 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4640 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4641 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4642
4643 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4644 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4645 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4646 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4647
4648
4649 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4650 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4651 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4652 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4653
4654 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4655 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4656 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4657}
4658
4659static void
4660add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4661 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4662{
4663 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4664 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4665
4666 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4667 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4668 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4669 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4670 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4671 AMD_FMT_MOD_SET(DCC, 1) |
4672 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4673 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4674 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4675 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4676
4677 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4678 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4679 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4680 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4681 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4682 AMD_FMT_MOD_SET(DCC, 1) |
4683 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4684 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4685 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4686 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4687 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4688
4689 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4690 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4691 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4692 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4693 AMD_FMT_MOD_SET(PACKERS, pkrs));
4694
4695 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4696 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4697 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4698 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4699 AMD_FMT_MOD_SET(PACKERS, pkrs));
4700
4701 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4702 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4703 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4704 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4705
4706 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4707 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4708 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4709}
4710
4711static int
4712get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4713{
4714 uint64_t size = 0, capacity = 128;
4715 *mods = NULL;
4716
4717 /* We have not hooked up any pre-GFX9 modifiers. */
4718 if (adev->family < AMDGPU_FAMILY_AI)
4719 return 0;
4720
4721 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4722
4723 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4724 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4725 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4726 return *mods ? 0 : -ENOMEM;
4727 }
4728
4729 switch (adev->family) {
4730 case AMDGPU_FAMILY_AI:
4731 case AMDGPU_FAMILY_RV:
4732 add_gfx9_modifiers(adev, mods, &size, &capacity);
4733 break;
4734 case AMDGPU_FAMILY_NV:
4735 case AMDGPU_FAMILY_VGH:
1ebcaebd 4736 case AMDGPU_FAMILY_YC:
faa37f54
BN
4737 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4738 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4739 else
4740 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4741 break;
4742 }
4743
4744 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4745
4746 /* INVALID marks the end of the list. */
4747 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4748
4749 if (!*mods)
4750 return -ENOMEM;
4751
4752 return 0;
4753}
4754
37384b3f
BN
4755static int
4756fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4757 const struct amdgpu_framebuffer *afb,
4758 const enum surface_pixel_format format,
4759 const enum dc_rotation_angle rotation,
4760 const struct plane_size *plane_size,
4761 union dc_tiling_info *tiling_info,
4762 struct dc_plane_dcc_param *dcc,
4763 struct dc_plane_address *address,
4764 const bool force_disable_dcc)
4765{
4766 const uint64_t modifier = afb->base.modifier;
4767 int ret;
4768
4769 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4770 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4771
4772 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4773 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4774
4775 dcc->enable = 1;
4776 dcc->meta_pitch = afb->base.pitches[1];
4777 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4778
4779 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4780 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4781 }
4782
4783 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4784 if (ret)
4785 return ret;
7df7e505 4786
09e5665a
NK
4787 return 0;
4788}
4789
4790static int
320932bf 4791fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4792 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4793 const enum surface_pixel_format format,
4794 const enum dc_rotation_angle rotation,
4795 const uint64_t tiling_flags,
09e5665a 4796 union dc_tiling_info *tiling_info,
12e2b2d4 4797 struct plane_size *plane_size,
09e5665a 4798 struct dc_plane_dcc_param *dcc,
87b7ebc2 4799 struct dc_plane_address *address,
5888f07a 4800 bool tmz_surface,
87b7ebc2 4801 bool force_disable_dcc)
09e5665a 4802{
320932bf 4803 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4804 int ret;
4805
4806 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4807 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4808 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4809 memset(address, 0, sizeof(*address));
4810
5888f07a
HW
4811 address->tmz_surface = tmz_surface;
4812
695af5f9 4813 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4814 uint64_t addr = afb->address + fb->offsets[0];
4815
12e2b2d4
DL
4816 plane_size->surface_size.x = 0;
4817 plane_size->surface_size.y = 0;
4818 plane_size->surface_size.width = fb->width;
4819 plane_size->surface_size.height = fb->height;
4820 plane_size->surface_pitch =
320932bf
NK
4821 fb->pitches[0] / fb->format->cpp[0];
4822
e0634e8d 4823 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4824 address->grph.addr.low_part = lower_32_bits(addr);
4825 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4826 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4827 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4828 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4829
12e2b2d4
DL
4830 plane_size->surface_size.x = 0;
4831 plane_size->surface_size.y = 0;
4832 plane_size->surface_size.width = fb->width;
4833 plane_size->surface_size.height = fb->height;
4834 plane_size->surface_pitch =
320932bf
NK
4835 fb->pitches[0] / fb->format->cpp[0];
4836
12e2b2d4
DL
4837 plane_size->chroma_size.x = 0;
4838 plane_size->chroma_size.y = 0;
320932bf 4839 /* TODO: set these based on surface format */
12e2b2d4
DL
4840 plane_size->chroma_size.width = fb->width / 2;
4841 plane_size->chroma_size.height = fb->height / 2;
320932bf 4842
12e2b2d4 4843 plane_size->chroma_pitch =
320932bf
NK
4844 fb->pitches[1] / fb->format->cpp[1];
4845
e0634e8d
NK
4846 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4847 address->video_progressive.luma_addr.low_part =
be7b9b32 4848 lower_32_bits(luma_addr);
e0634e8d 4849 address->video_progressive.luma_addr.high_part =
be7b9b32 4850 upper_32_bits(luma_addr);
e0634e8d
NK
4851 address->video_progressive.chroma_addr.low_part =
4852 lower_32_bits(chroma_addr);
4853 address->video_progressive.chroma_addr.high_part =
4854 upper_32_bits(chroma_addr);
4855 }
09e5665a 4856
a3241991 4857 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4858 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4859 rotation, plane_size,
4860 tiling_info, dcc,
4861 address,
4862 force_disable_dcc);
09e5665a
NK
4863 if (ret)
4864 return ret;
a3241991
BN
4865 } else {
4866 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4867 }
4868
4869 return 0;
7df7e505
NK
4870}
4871
d74004b6 4872static void
695af5f9 4873fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4874 bool *per_pixel_alpha, bool *global_alpha,
4875 int *global_alpha_value)
4876{
4877 *per_pixel_alpha = false;
4878 *global_alpha = false;
4879 *global_alpha_value = 0xff;
4880
4881 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4882 return;
4883
4884 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4885 static const uint32_t alpha_formats[] = {
4886 DRM_FORMAT_ARGB8888,
4887 DRM_FORMAT_RGBA8888,
4888 DRM_FORMAT_ABGR8888,
4889 };
4890 uint32_t format = plane_state->fb->format->format;
4891 unsigned int i;
4892
4893 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4894 if (format == alpha_formats[i]) {
4895 *per_pixel_alpha = true;
4896 break;
4897 }
4898 }
4899 }
4900
4901 if (plane_state->alpha < 0xffff) {
4902 *global_alpha = true;
4903 *global_alpha_value = plane_state->alpha >> 8;
4904 }
4905}
4906
004fefa3
NK
4907static int
4908fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4909 const enum surface_pixel_format format,
004fefa3
NK
4910 enum dc_color_space *color_space)
4911{
4912 bool full_range;
4913
4914 *color_space = COLOR_SPACE_SRGB;
4915
4916 /* DRM color properties only affect non-RGB formats. */
695af5f9 4917 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4918 return 0;
4919
4920 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4921
4922 switch (plane_state->color_encoding) {
4923 case DRM_COLOR_YCBCR_BT601:
4924 if (full_range)
4925 *color_space = COLOR_SPACE_YCBCR601;
4926 else
4927 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4928 break;
4929
4930 case DRM_COLOR_YCBCR_BT709:
4931 if (full_range)
4932 *color_space = COLOR_SPACE_YCBCR709;
4933 else
4934 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4935 break;
4936
4937 case DRM_COLOR_YCBCR_BT2020:
4938 if (full_range)
4939 *color_space = COLOR_SPACE_2020_YCBCR;
4940 else
4941 return -EINVAL;
4942 break;
4943
4944 default:
4945 return -EINVAL;
4946 }
4947
4948 return 0;
4949}
4950
695af5f9
NK
4951static int
4952fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4953 const struct drm_plane_state *plane_state,
4954 const uint64_t tiling_flags,
4955 struct dc_plane_info *plane_info,
87b7ebc2 4956 struct dc_plane_address *address,
5888f07a 4957 bool tmz_surface,
87b7ebc2 4958 bool force_disable_dcc)
695af5f9
NK
4959{
4960 const struct drm_framebuffer *fb = plane_state->fb;
4961 const struct amdgpu_framebuffer *afb =
4962 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
4963 int ret;
4964
4965 memset(plane_info, 0, sizeof(*plane_info));
4966
4967 switch (fb->format->format) {
4968 case DRM_FORMAT_C8:
4969 plane_info->format =
4970 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4971 break;
4972 case DRM_FORMAT_RGB565:
4973 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4974 break;
4975 case DRM_FORMAT_XRGB8888:
4976 case DRM_FORMAT_ARGB8888:
4977 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4978 break;
4979 case DRM_FORMAT_XRGB2101010:
4980 case DRM_FORMAT_ARGB2101010:
4981 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4982 break;
4983 case DRM_FORMAT_XBGR2101010:
4984 case DRM_FORMAT_ABGR2101010:
4985 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4986 break;
4987 case DRM_FORMAT_XBGR8888:
4988 case DRM_FORMAT_ABGR8888:
4989 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4990 break;
4991 case DRM_FORMAT_NV21:
4992 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4993 break;
4994 case DRM_FORMAT_NV12:
4995 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4996 break;
cbec6477
SW
4997 case DRM_FORMAT_P010:
4998 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4999 break;
492548dc
SW
5000 case DRM_FORMAT_XRGB16161616F:
5001 case DRM_FORMAT_ARGB16161616F:
5002 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5003 break;
2a5195dc
MK
5004 case DRM_FORMAT_XBGR16161616F:
5005 case DRM_FORMAT_ABGR16161616F:
5006 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5007 break;
58020403
MK
5008 case DRM_FORMAT_XRGB16161616:
5009 case DRM_FORMAT_ARGB16161616:
5010 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5011 break;
5012 case DRM_FORMAT_XBGR16161616:
5013 case DRM_FORMAT_ABGR16161616:
5014 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5015 break;
695af5f9
NK
5016 default:
5017 DRM_ERROR(
92f1d09c
SA
5018 "Unsupported screen format %p4cc\n",
5019 &fb->format->format);
695af5f9
NK
5020 return -EINVAL;
5021 }
5022
5023 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5024 case DRM_MODE_ROTATE_0:
5025 plane_info->rotation = ROTATION_ANGLE_0;
5026 break;
5027 case DRM_MODE_ROTATE_90:
5028 plane_info->rotation = ROTATION_ANGLE_90;
5029 break;
5030 case DRM_MODE_ROTATE_180:
5031 plane_info->rotation = ROTATION_ANGLE_180;
5032 break;
5033 case DRM_MODE_ROTATE_270:
5034 plane_info->rotation = ROTATION_ANGLE_270;
5035 break;
5036 default:
5037 plane_info->rotation = ROTATION_ANGLE_0;
5038 break;
5039 }
5040
5041 plane_info->visible = true;
5042 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5043
6d83a32d
MS
5044 plane_info->layer_index = 0;
5045
695af5f9
NK
5046 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5047 &plane_info->color_space);
5048 if (ret)
5049 return ret;
5050
5051 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5052 plane_info->rotation, tiling_flags,
5053 &plane_info->tiling_info,
5054 &plane_info->plane_size,
5888f07a 5055 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5056 force_disable_dcc);
695af5f9
NK
5057 if (ret)
5058 return ret;
5059
5060 fill_blending_from_plane_state(
5061 plane_state, &plane_info->per_pixel_alpha,
5062 &plane_info->global_alpha, &plane_info->global_alpha_value);
5063
5064 return 0;
5065}
5066
5067static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5068 struct dc_plane_state *dc_plane_state,
5069 struct drm_plane_state *plane_state,
5070 struct drm_crtc_state *crtc_state)
e7b07cee 5071{
cf020d49 5072 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5073 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5074 struct dc_scaling_info scaling_info;
5075 struct dc_plane_info plane_info;
695af5f9 5076 int ret;
87b7ebc2 5077 bool force_disable_dcc = false;
e7b07cee 5078
695af5f9
NK
5079 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5080 if (ret)
5081 return ret;
e7b07cee 5082
695af5f9
NK
5083 dc_plane_state->src_rect = scaling_info.src_rect;
5084 dc_plane_state->dst_rect = scaling_info.dst_rect;
5085 dc_plane_state->clip_rect = scaling_info.clip_rect;
5086 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5087
87b7ebc2 5088 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5089 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5090 afb->tiling_flags,
695af5f9 5091 &plane_info,
87b7ebc2 5092 &dc_plane_state->address,
6eed95b0 5093 afb->tmz_surface,
87b7ebc2 5094 force_disable_dcc);
004fefa3
NK
5095 if (ret)
5096 return ret;
5097
695af5f9
NK
5098 dc_plane_state->format = plane_info.format;
5099 dc_plane_state->color_space = plane_info.color_space;
5100 dc_plane_state->format = plane_info.format;
5101 dc_plane_state->plane_size = plane_info.plane_size;
5102 dc_plane_state->rotation = plane_info.rotation;
5103 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5104 dc_plane_state->stereo_format = plane_info.stereo_format;
5105 dc_plane_state->tiling_info = plane_info.tiling_info;
5106 dc_plane_state->visible = plane_info.visible;
5107 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5108 dc_plane_state->global_alpha = plane_info.global_alpha;
5109 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5110 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5111 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5112 dc_plane_state->flip_int_enabled = true;
695af5f9 5113
e277adc5
LSL
5114 /*
5115 * Always set input transfer function, since plane state is refreshed
5116 * every time.
5117 */
cf020d49
NK
5118 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5119 if (ret)
5120 return ret;
e7b07cee 5121
cf020d49 5122 return 0;
e7b07cee
HW
5123}
5124
3ee6b26b
AD
5125static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5126 const struct dm_connector_state *dm_state,
5127 struct dc_stream_state *stream)
e7b07cee
HW
5128{
5129 enum amdgpu_rmx_type rmx_type;
5130
5131 struct rect src = { 0 }; /* viewport in composition space*/
5132 struct rect dst = { 0 }; /* stream addressable area */
5133
5134 /* no mode. nothing to be done */
5135 if (!mode)
5136 return;
5137
5138 /* Full screen scaling by default */
5139 src.width = mode->hdisplay;
5140 src.height = mode->vdisplay;
5141 dst.width = stream->timing.h_addressable;
5142 dst.height = stream->timing.v_addressable;
5143
f4791779
HW
5144 if (dm_state) {
5145 rmx_type = dm_state->scaling;
5146 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5147 if (src.width * dst.height <
5148 src.height * dst.width) {
5149 /* height needs less upscaling/more downscaling */
5150 dst.width = src.width *
5151 dst.height / src.height;
5152 } else {
5153 /* width needs less upscaling/more downscaling */
5154 dst.height = src.height *
5155 dst.width / src.width;
5156 }
5157 } else if (rmx_type == RMX_CENTER) {
5158 dst = src;
e7b07cee 5159 }
e7b07cee 5160
f4791779
HW
5161 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5162 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5163
f4791779
HW
5164 if (dm_state->underscan_enable) {
5165 dst.x += dm_state->underscan_hborder / 2;
5166 dst.y += dm_state->underscan_vborder / 2;
5167 dst.width -= dm_state->underscan_hborder;
5168 dst.height -= dm_state->underscan_vborder;
5169 }
e7b07cee
HW
5170 }
5171
5172 stream->src = src;
5173 stream->dst = dst;
5174
4711c033
LT
5175 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5176 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5177
5178}
5179
3ee6b26b 5180static enum dc_color_depth
42ba01fc 5181convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5182 bool is_y420, int requested_bpc)
e7b07cee 5183{
1bc22f20 5184 uint8_t bpc;
01c22997 5185
1bc22f20
SW
5186 if (is_y420) {
5187 bpc = 8;
5188
5189 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5190 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5191 bpc = 16;
5192 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5193 bpc = 12;
5194 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5195 bpc = 10;
5196 } else {
5197 bpc = (uint8_t)connector->display_info.bpc;
5198 /* Assume 8 bpc by default if no bpc is specified. */
5199 bpc = bpc ? bpc : 8;
5200 }
e7b07cee 5201
cbd14ae7 5202 if (requested_bpc > 0) {
01c22997
NK
5203 /*
5204 * Cap display bpc based on the user requested value.
5205 *
5206 * The value for state->max_bpc may not correctly updated
5207 * depending on when the connector gets added to the state
5208 * or if this was called outside of atomic check, so it
5209 * can't be used directly.
5210 */
cbd14ae7 5211 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5212
1825fd34
NK
5213 /* Round down to the nearest even number. */
5214 bpc = bpc - (bpc & 1);
5215 }
07e3a1cf 5216
e7b07cee
HW
5217 switch (bpc) {
5218 case 0:
1f6010a9
DF
5219 /*
5220 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5221 * EDID revision before 1.4
5222 * TODO: Fix edid parsing
5223 */
5224 return COLOR_DEPTH_888;
5225 case 6:
5226 return COLOR_DEPTH_666;
5227 case 8:
5228 return COLOR_DEPTH_888;
5229 case 10:
5230 return COLOR_DEPTH_101010;
5231 case 12:
5232 return COLOR_DEPTH_121212;
5233 case 14:
5234 return COLOR_DEPTH_141414;
5235 case 16:
5236 return COLOR_DEPTH_161616;
5237 default:
5238 return COLOR_DEPTH_UNDEFINED;
5239 }
5240}
5241
3ee6b26b
AD
5242static enum dc_aspect_ratio
5243get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5244{
e11d4147
LSL
5245 /* 1-1 mapping, since both enums follow the HDMI spec. */
5246 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5247}
5248
3ee6b26b
AD
5249static enum dc_color_space
5250get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5251{
5252 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5253
5254 switch (dc_crtc_timing->pixel_encoding) {
5255 case PIXEL_ENCODING_YCBCR422:
5256 case PIXEL_ENCODING_YCBCR444:
5257 case PIXEL_ENCODING_YCBCR420:
5258 {
5259 /*
5260 * 27030khz is the separation point between HDTV and SDTV
5261 * according to HDMI spec, we use YCbCr709 and YCbCr601
5262 * respectively
5263 */
380604e2 5264 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5265 if (dc_crtc_timing->flags.Y_ONLY)
5266 color_space =
5267 COLOR_SPACE_YCBCR709_LIMITED;
5268 else
5269 color_space = COLOR_SPACE_YCBCR709;
5270 } else {
5271 if (dc_crtc_timing->flags.Y_ONLY)
5272 color_space =
5273 COLOR_SPACE_YCBCR601_LIMITED;
5274 else
5275 color_space = COLOR_SPACE_YCBCR601;
5276 }
5277
5278 }
5279 break;
5280 case PIXEL_ENCODING_RGB:
5281 color_space = COLOR_SPACE_SRGB;
5282 break;
5283
5284 default:
5285 WARN_ON(1);
5286 break;
5287 }
5288
5289 return color_space;
5290}
5291
ea117312
TA
5292static bool adjust_colour_depth_from_display_info(
5293 struct dc_crtc_timing *timing_out,
5294 const struct drm_display_info *info)
400443e8 5295{
ea117312 5296 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5297 int normalized_clk;
400443e8 5298 do {
380604e2 5299 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5300 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5301 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5302 normalized_clk /= 2;
5303 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5304 switch (depth) {
5305 case COLOR_DEPTH_888:
5306 break;
400443e8
ML
5307 case COLOR_DEPTH_101010:
5308 normalized_clk = (normalized_clk * 30) / 24;
5309 break;
5310 case COLOR_DEPTH_121212:
5311 normalized_clk = (normalized_clk * 36) / 24;
5312 break;
5313 case COLOR_DEPTH_161616:
5314 normalized_clk = (normalized_clk * 48) / 24;
5315 break;
5316 default:
ea117312
TA
5317 /* The above depths are the only ones valid for HDMI. */
5318 return false;
400443e8 5319 }
ea117312
TA
5320 if (normalized_clk <= info->max_tmds_clock) {
5321 timing_out->display_color_depth = depth;
5322 return true;
5323 }
5324 } while (--depth > COLOR_DEPTH_666);
5325 return false;
400443e8 5326}
e7b07cee 5327
42ba01fc
NK
5328static void fill_stream_properties_from_drm_display_mode(
5329 struct dc_stream_state *stream,
5330 const struct drm_display_mode *mode_in,
5331 const struct drm_connector *connector,
5332 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5333 const struct dc_stream_state *old_stream,
5334 int requested_bpc)
e7b07cee
HW
5335{
5336 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5337 const struct drm_display_info *info = &connector->display_info;
d4252eee 5338 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5339 struct hdmi_vendor_infoframe hv_frame;
5340 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5341
acf83f86
WL
5342 memset(&hv_frame, 0, sizeof(hv_frame));
5343 memset(&avi_frame, 0, sizeof(avi_frame));
5344
e7b07cee
HW
5345 timing_out->h_border_left = 0;
5346 timing_out->h_border_right = 0;
5347 timing_out->v_border_top = 0;
5348 timing_out->v_border_bottom = 0;
5349 /* TODO: un-hardcode */
fe61a2f1 5350 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5351 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5352 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5353 else if (drm_mode_is_420_also(info, mode_in)
5354 && aconnector->force_yuv420_output)
5355 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5356 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5357 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5358 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5359 else
5360 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5361
5362 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5363 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5364 connector,
5365 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5366 requested_bpc);
e7b07cee
HW
5367 timing_out->scan_type = SCANNING_TYPE_NODATA;
5368 timing_out->hdmi_vic = 0;
b333730d
BL
5369
5370 if(old_stream) {
5371 timing_out->vic = old_stream->timing.vic;
5372 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5373 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5374 } else {
5375 timing_out->vic = drm_match_cea_mode(mode_in);
5376 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5377 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5378 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5379 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5380 }
e7b07cee 5381
1cb1d477
WL
5382 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5383 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5384 timing_out->vic = avi_frame.video_code;
5385 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5386 timing_out->hdmi_vic = hv_frame.vic;
5387 }
5388
fe8858bb
NC
5389 if (is_freesync_video_mode(mode_in, aconnector)) {
5390 timing_out->h_addressable = mode_in->hdisplay;
5391 timing_out->h_total = mode_in->htotal;
5392 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5393 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5394 timing_out->v_total = mode_in->vtotal;
5395 timing_out->v_addressable = mode_in->vdisplay;
5396 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5397 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5398 timing_out->pix_clk_100hz = mode_in->clock * 10;
5399 } else {
5400 timing_out->h_addressable = mode_in->crtc_hdisplay;
5401 timing_out->h_total = mode_in->crtc_htotal;
5402 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5403 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5404 timing_out->v_total = mode_in->crtc_vtotal;
5405 timing_out->v_addressable = mode_in->crtc_vdisplay;
5406 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5407 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5408 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5409 }
a85ba005 5410
e7b07cee 5411 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5412
5413 stream->output_color_space = get_output_color_space(timing_out);
5414
e43a432c
AK
5415 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5416 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5417 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5418 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5419 drm_mode_is_420_also(info, mode_in) &&
5420 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5421 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5422 adjust_colour_depth_from_display_info(timing_out, info);
5423 }
5424 }
e7b07cee
HW
5425}
5426
3ee6b26b
AD
5427static void fill_audio_info(struct audio_info *audio_info,
5428 const struct drm_connector *drm_connector,
5429 const struct dc_sink *dc_sink)
e7b07cee
HW
5430{
5431 int i = 0;
5432 int cea_revision = 0;
5433 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5434
5435 audio_info->manufacture_id = edid_caps->manufacturer_id;
5436 audio_info->product_id = edid_caps->product_id;
5437
5438 cea_revision = drm_connector->display_info.cea_rev;
5439
090afc1e 5440 strscpy(audio_info->display_name,
d2b2562c 5441 edid_caps->display_name,
090afc1e 5442 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5443
b830ebc9 5444 if (cea_revision >= 3) {
e7b07cee
HW
5445 audio_info->mode_count = edid_caps->audio_mode_count;
5446
5447 for (i = 0; i < audio_info->mode_count; ++i) {
5448 audio_info->modes[i].format_code =
5449 (enum audio_format_code)
5450 (edid_caps->audio_modes[i].format_code);
5451 audio_info->modes[i].channel_count =
5452 edid_caps->audio_modes[i].channel_count;
5453 audio_info->modes[i].sample_rates.all =
5454 edid_caps->audio_modes[i].sample_rate;
5455 audio_info->modes[i].sample_size =
5456 edid_caps->audio_modes[i].sample_size;
5457 }
5458 }
5459
5460 audio_info->flags.all = edid_caps->speaker_flags;
5461
5462 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5463 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5464 audio_info->video_latency = drm_connector->video_latency[0];
5465 audio_info->audio_latency = drm_connector->audio_latency[0];
5466 }
5467
5468 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5469
5470}
5471
3ee6b26b
AD
5472static void
5473copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5474 struct drm_display_mode *dst_mode)
e7b07cee
HW
5475{
5476 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5477 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5478 dst_mode->crtc_clock = src_mode->crtc_clock;
5479 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5480 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5481 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5482 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5483 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5484 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5485 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5486 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5487 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5488 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5489 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5490}
5491
3ee6b26b
AD
5492static void
5493decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5494 const struct drm_display_mode *native_mode,
5495 bool scale_enabled)
e7b07cee
HW
5496{
5497 if (scale_enabled) {
5498 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5499 } else if (native_mode->clock == drm_mode->clock &&
5500 native_mode->htotal == drm_mode->htotal &&
5501 native_mode->vtotal == drm_mode->vtotal) {
5502 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5503 } else {
5504 /* no scaling nor amdgpu inserted, no need to patch */
5505 }
5506}
5507
aed15309
ML
5508static struct dc_sink *
5509create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5510{
2e0ac3d6 5511 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5512 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5513 sink_init_data.link = aconnector->dc_link;
5514 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5515
5516 sink = dc_sink_create(&sink_init_data);
423788c7 5517 if (!sink) {
2e0ac3d6 5518 DRM_ERROR("Failed to create sink!\n");
aed15309 5519 return NULL;
423788c7 5520 }
2e0ac3d6 5521 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5522
aed15309 5523 return sink;
2e0ac3d6
HW
5524}
5525
fa2123db
ML
5526static void set_multisync_trigger_params(
5527 struct dc_stream_state *stream)
5528{
ec372186
ML
5529 struct dc_stream_state *master = NULL;
5530
fa2123db 5531 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5532 master = stream->triggered_crtc_reset.event_source;
5533 stream->triggered_crtc_reset.event =
5534 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5535 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5536 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5537 }
5538}
5539
5540static void set_master_stream(struct dc_stream_state *stream_set[],
5541 int stream_count)
5542{
5543 int j, highest_rfr = 0, master_stream = 0;
5544
5545 for (j = 0; j < stream_count; j++) {
5546 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5547 int refresh_rate = 0;
5548
380604e2 5549 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5550 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5551 if (refresh_rate > highest_rfr) {
5552 highest_rfr = refresh_rate;
5553 master_stream = j;
5554 }
5555 }
5556 }
5557 for (j = 0; j < stream_count; j++) {
03736f4c 5558 if (stream_set[j])
fa2123db
ML
5559 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5560 }
5561}
5562
5563static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5564{
5565 int i = 0;
ec372186 5566 struct dc_stream_state *stream;
fa2123db
ML
5567
5568 if (context->stream_count < 2)
5569 return;
5570 for (i = 0; i < context->stream_count ; i++) {
5571 if (!context->streams[i])
5572 continue;
1f6010a9
DF
5573 /*
5574 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5575 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5576 * For now it's set to false
fa2123db 5577 */
fa2123db 5578 }
ec372186 5579
fa2123db 5580 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5581
5582 for (i = 0; i < context->stream_count ; i++) {
5583 stream = context->streams[i];
5584
5585 if (!stream)
5586 continue;
5587
5588 set_multisync_trigger_params(stream);
5589 }
fa2123db
ML
5590}
5591
ea2be5c0 5592#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5593static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5594 struct dc_sink *sink, struct dc_stream_state *stream,
5595 struct dsc_dec_dpcd_caps *dsc_caps)
5596{
5597 stream->timing.flags.DSC = 0;
5598
5599 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
5600 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5601 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5602 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5603 dsc_caps);
998b7ad2
FZ
5604 }
5605}
5606
5607static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5608 struct dc_sink *sink, struct dc_stream_state *stream,
5609 struct dsc_dec_dpcd_caps *dsc_caps)
5610{
5611 struct drm_connector *drm_connector = &aconnector->base;
5612 uint32_t link_bandwidth_kbps;
5613
5614 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5615 dc_link_get_link_cap(aconnector->dc_link));
998b7ad2
FZ
5616 /* Set DSC policy according to dsc_clock_en */
5617 dc_dsc_policy_set_enable_dsc_when_not_needed(
5618 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5619
5620 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5621
5622 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5623 dsc_caps,
5624 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5625 0,
5626 link_bandwidth_kbps,
5627 &stream->timing,
5628 &stream->timing.dsc_cfg)) {
5629 stream->timing.flags.DSC = 1;
5630 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5631 }
5632 }
5633
5634 /* Overwrite the stream flag if DSC is enabled through debugfs */
5635 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5636 stream->timing.flags.DSC = 1;
5637
5638 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5639 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5640
5641 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5642 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5643
5644 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5645 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 5646}
ea2be5c0 5647#endif
998b7ad2 5648
a85ba005
NC
5649static struct drm_display_mode *
5650get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5651 bool use_probed_modes)
5652{
5653 struct drm_display_mode *m, *m_pref = NULL;
5654 u16 current_refresh, highest_refresh;
5655 struct list_head *list_head = use_probed_modes ?
5656 &aconnector->base.probed_modes :
5657 &aconnector->base.modes;
5658
5659 if (aconnector->freesync_vid_base.clock != 0)
5660 return &aconnector->freesync_vid_base;
5661
5662 /* Find the preferred mode */
5663 list_for_each_entry (m, list_head, head) {
5664 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5665 m_pref = m;
5666 break;
5667 }
5668 }
5669
5670 if (!m_pref) {
5671 /* Probably an EDID with no preferred mode. Fallback to first entry */
5672 m_pref = list_first_entry_or_null(
5673 &aconnector->base.modes, struct drm_display_mode, head);
5674 if (!m_pref) {
5675 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5676 return NULL;
5677 }
5678 }
5679
5680 highest_refresh = drm_mode_vrefresh(m_pref);
5681
5682 /*
5683 * Find the mode with highest refresh rate with same resolution.
5684 * For some monitors, preferred mode is not the mode with highest
5685 * supported refresh rate.
5686 */
5687 list_for_each_entry (m, list_head, head) {
5688 current_refresh = drm_mode_vrefresh(m);
5689
5690 if (m->hdisplay == m_pref->hdisplay &&
5691 m->vdisplay == m_pref->vdisplay &&
5692 highest_refresh < current_refresh) {
5693 highest_refresh = current_refresh;
5694 m_pref = m;
5695 }
5696 }
5697
5698 aconnector->freesync_vid_base = *m_pref;
5699 return m_pref;
5700}
5701
fe8858bb 5702static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
5703 struct amdgpu_dm_connector *aconnector)
5704{
5705 struct drm_display_mode *high_mode;
5706 int timing_diff;
5707
5708 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5709 if (!high_mode || !mode)
5710 return false;
5711
5712 timing_diff = high_mode->vtotal - mode->vtotal;
5713
5714 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5715 high_mode->hdisplay != mode->hdisplay ||
5716 high_mode->vdisplay != mode->vdisplay ||
5717 high_mode->hsync_start != mode->hsync_start ||
5718 high_mode->hsync_end != mode->hsync_end ||
5719 high_mode->htotal != mode->htotal ||
5720 high_mode->hskew != mode->hskew ||
5721 high_mode->vscan != mode->vscan ||
5722 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5723 high_mode->vsync_end - mode->vsync_end != timing_diff)
5724 return false;
5725 else
5726 return true;
5727}
5728
3ee6b26b
AD
5729static struct dc_stream_state *
5730create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5731 const struct drm_display_mode *drm_mode,
b333730d 5732 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5733 const struct dc_stream_state *old_stream,
5734 int requested_bpc)
e7b07cee
HW
5735{
5736 struct drm_display_mode *preferred_mode = NULL;
391ef035 5737 struct drm_connector *drm_connector;
42ba01fc
NK
5738 const struct drm_connector_state *con_state =
5739 dm_state ? &dm_state->base : NULL;
0971c40e 5740 struct dc_stream_state *stream = NULL;
e7b07cee 5741 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
5742 struct drm_display_mode saved_mode;
5743 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5744 bool native_mode_found = false;
b0781603
NK
5745 bool recalculate_timing = false;
5746 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5747 int mode_refresh;
58124bf8 5748 int preferred_refresh = 0;
defeb878 5749#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 5750 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 5751#endif
aed15309 5752 struct dc_sink *sink = NULL;
a85ba005
NC
5753
5754 memset(&saved_mode, 0, sizeof(saved_mode));
5755
b830ebc9 5756 if (aconnector == NULL) {
e7b07cee 5757 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5758 return stream;
e7b07cee
HW
5759 }
5760
e7b07cee 5761 drm_connector = &aconnector->base;
2e0ac3d6 5762
f4ac176e 5763 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5764 sink = create_fake_sink(aconnector);
5765 if (!sink)
5766 return stream;
aed15309
ML
5767 } else {
5768 sink = aconnector->dc_sink;
dcd5fb82 5769 dc_sink_retain(sink);
f4ac176e 5770 }
2e0ac3d6 5771
aed15309 5772 stream = dc_create_stream_for_sink(sink);
4562236b 5773
b830ebc9 5774 if (stream == NULL) {
e7b07cee 5775 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5776 goto finish;
e7b07cee
HW
5777 }
5778
ceb3dbb4
JL
5779 stream->dm_stream_context = aconnector;
5780
4a36fcba
WL
5781 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5782 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5783
e7b07cee
HW
5784 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5785 /* Search for preferred mode */
5786 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5787 native_mode_found = true;
5788 break;
5789 }
5790 }
5791 if (!native_mode_found)
5792 preferred_mode = list_first_entry_or_null(
5793 &aconnector->base.modes,
5794 struct drm_display_mode,
5795 head);
5796
b333730d
BL
5797 mode_refresh = drm_mode_vrefresh(&mode);
5798
b830ebc9 5799 if (preferred_mode == NULL) {
1f6010a9
DF
5800 /*
5801 * This may not be an error, the use case is when we have no
e7b07cee
HW
5802 * usermode calls to reset and set mode upon hotplug. In this
5803 * case, we call set mode ourselves to restore the previous mode
5804 * and the modelist may not be filled in in time.
5805 */
f1ad2f5e 5806 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5807 } else {
b0781603 5808 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
5809 is_freesync_video_mode(&mode, aconnector);
5810 if (recalculate_timing) {
5811 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5812 saved_mode = mode;
5813 mode = *freesync_mode;
5814 } else {
5815 decide_crtc_timing_for_drm_display_mode(
b0781603 5816 &mode, preferred_mode, scale);
a85ba005 5817
b0781603
NK
5818 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5819 }
e7b07cee
HW
5820 }
5821
a85ba005
NC
5822 if (recalculate_timing)
5823 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 5824 else if (!dm_state)
f783577c
JFZ
5825 drm_mode_set_crtcinfo(&mode, 0);
5826
a85ba005 5827 /*
b333730d
BL
5828 * If scaling is enabled and refresh rate didn't change
5829 * we copy the vic and polarities of the old timings
5830 */
b0781603 5831 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
5832 fill_stream_properties_from_drm_display_mode(
5833 stream, &mode, &aconnector->base, con_state, NULL,
5834 requested_bpc);
b333730d 5835 else
a85ba005
NC
5836 fill_stream_properties_from_drm_display_mode(
5837 stream, &mode, &aconnector->base, con_state, old_stream,
5838 requested_bpc);
b333730d 5839
defeb878 5840#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5841 /* SST DSC determination policy */
5842 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5843 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5844 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
5845#endif
5846
e7b07cee
HW
5847 update_stream_scaling_settings(&mode, dm_state, stream);
5848
5849 fill_audio_info(
5850 &stream->audio_info,
5851 drm_connector,
aed15309 5852 sink);
e7b07cee 5853
ceb3dbb4 5854 update_stream_signal(stream, sink);
9182b4cb 5855
d832fc3b 5856 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5857 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5858
8a488f5d
RL
5859 if (stream->link->psr_settings.psr_feature_enabled) {
5860 //
5861 // should decide stream support vsc sdp colorimetry capability
5862 // before building vsc info packet
5863 //
5864 stream->use_vsc_sdp_for_colorimetry = false;
5865 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5866 stream->use_vsc_sdp_for_colorimetry =
5867 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5868 } else {
5869 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5870 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5871 }
8a488f5d 5872 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5873 }
aed15309 5874finish:
dcd5fb82 5875 dc_sink_release(sink);
9e3efe3e 5876
e7b07cee
HW
5877 return stream;
5878}
5879
7578ecda 5880static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5881{
5882 drm_crtc_cleanup(crtc);
5883 kfree(crtc);
5884}
5885
5886static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5887 struct drm_crtc_state *state)
e7b07cee
HW
5888{
5889 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5890
5891 /* TODO Destroy dc_stream objects are stream object is flattened */
5892 if (cur->stream)
5893 dc_stream_release(cur->stream);
5894
5895
5896 __drm_atomic_helper_crtc_destroy_state(state);
5897
5898
5899 kfree(state);
5900}
5901
5902static void dm_crtc_reset_state(struct drm_crtc *crtc)
5903{
5904 struct dm_crtc_state *state;
5905
5906 if (crtc->state)
5907 dm_crtc_destroy_state(crtc, crtc->state);
5908
5909 state = kzalloc(sizeof(*state), GFP_KERNEL);
5910 if (WARN_ON(!state))
5911 return;
5912
1f8a52ec 5913 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5914}
5915
5916static struct drm_crtc_state *
5917dm_crtc_duplicate_state(struct drm_crtc *crtc)
5918{
5919 struct dm_crtc_state *state, *cur;
5920
5921 cur = to_dm_crtc_state(crtc->state);
5922
5923 if (WARN_ON(!crtc->state))
5924 return NULL;
5925
2004f45e 5926 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5927 if (!state)
5928 return NULL;
e7b07cee
HW
5929
5930 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5931
5932 if (cur->stream) {
5933 state->stream = cur->stream;
5934 dc_stream_retain(state->stream);
5935 }
5936
d6ef9b41 5937 state->active_planes = cur->active_planes;
98e6436d 5938 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5939 state->abm_level = cur->abm_level;
bb47de73
NK
5940 state->vrr_supported = cur->vrr_supported;
5941 state->freesync_config = cur->freesync_config;
cf020d49
NK
5942 state->cm_has_degamma = cur->cm_has_degamma;
5943 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e7b07cee
HW
5944 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5945
5946 return &state->base;
5947}
5948
86bc2219 5949#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 5950static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
5951{
5952 crtc_debugfs_init(crtc);
5953
5954 return 0;
5955}
5956#endif
5957
d2574c33
MK
5958static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5959{
5960 enum dc_irq_source irq_source;
5961 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5962 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5963 int rc;
5964
5965 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5966
5967 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5968
4711c033
LT
5969 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5970 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
5971 return rc;
5972}
589d2739
HW
5973
5974static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5975{
5976 enum dc_irq_source irq_source;
5977 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5978 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5979 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 5980#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5981 struct amdgpu_display_manager *dm = &adev->dm;
ea3b4242
QZ
5982 unsigned long flags;
5983#endif
d2574c33
MK
5984 int rc = 0;
5985
5986 if (enable) {
5987 /* vblank irq on -> Only need vupdate irq in vrr mode */
5988 if (amdgpu_dm_vrr_active(acrtc_state))
5989 rc = dm_set_vupdate_irq(crtc, true);
5990 } else {
5991 /* vblank irq off -> vupdate irq off */
5992 rc = dm_set_vupdate_irq(crtc, false);
5993 }
5994
5995 if (rc)
5996 return rc;
589d2739
HW
5997
5998 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5999
6000 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6001 return -EBUSY;
6002
98ab5f35
BL
6003 if (amdgpu_in_reset(adev))
6004 return 0;
6005
4928b480 6006#if defined(CONFIG_DRM_AMD_DC_DCN)
ea3b4242
QZ
6007 spin_lock_irqsave(&dm->vblank_lock, flags);
6008 dm->vblank_workqueue->dm = dm;
6009 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6010 dm->vblank_workqueue->enable = enable;
6011 spin_unlock_irqrestore(&dm->vblank_lock, flags);
6012 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 6013#endif
71338cb4 6014
71338cb4 6015 return 0;
589d2739
HW
6016}
6017
6018static int dm_enable_vblank(struct drm_crtc *crtc)
6019{
6020 return dm_set_vblank(crtc, true);
6021}
6022
6023static void dm_disable_vblank(struct drm_crtc *crtc)
6024{
6025 dm_set_vblank(crtc, false);
6026}
6027
e7b07cee
HW
6028/* Implemented only the options currently availible for the driver */
6029static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6030 .reset = dm_crtc_reset_state,
6031 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6032 .set_config = drm_atomic_helper_set_config,
6033 .page_flip = drm_atomic_helper_page_flip,
6034 .atomic_duplicate_state = dm_crtc_duplicate_state,
6035 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6036 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6037 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6038 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6039 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6040 .enable_vblank = dm_enable_vblank,
6041 .disable_vblank = dm_disable_vblank,
e3eff4b5 6042 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6043#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6044 .late_register = amdgpu_dm_crtc_late_register,
6045#endif
e7b07cee
HW
6046};
6047
6048static enum drm_connector_status
6049amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6050{
6051 bool connected;
c84dec2f 6052 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6053
1f6010a9
DF
6054 /*
6055 * Notes:
e7b07cee
HW
6056 * 1. This interface is NOT called in context of HPD irq.
6057 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6058 * makes it a bad place for *any* MST-related activity.
6059 */
e7b07cee 6060
8580d60b
HW
6061 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6062 !aconnector->fake_enable)
e7b07cee
HW
6063 connected = (aconnector->dc_sink != NULL);
6064 else
6065 connected = (aconnector->base.force == DRM_FORCE_ON);
6066
0f877894
OV
6067 update_subconnector_property(aconnector);
6068
e7b07cee
HW
6069 return (connected ? connector_status_connected :
6070 connector_status_disconnected);
6071}
6072
3ee6b26b
AD
6073int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6074 struct drm_connector_state *connector_state,
6075 struct drm_property *property,
6076 uint64_t val)
e7b07cee
HW
6077{
6078 struct drm_device *dev = connector->dev;
1348969a 6079 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6080 struct dm_connector_state *dm_old_state =
6081 to_dm_connector_state(connector->state);
6082 struct dm_connector_state *dm_new_state =
6083 to_dm_connector_state(connector_state);
6084
6085 int ret = -EINVAL;
6086
6087 if (property == dev->mode_config.scaling_mode_property) {
6088 enum amdgpu_rmx_type rmx_type;
6089
6090 switch (val) {
6091 case DRM_MODE_SCALE_CENTER:
6092 rmx_type = RMX_CENTER;
6093 break;
6094 case DRM_MODE_SCALE_ASPECT:
6095 rmx_type = RMX_ASPECT;
6096 break;
6097 case DRM_MODE_SCALE_FULLSCREEN:
6098 rmx_type = RMX_FULL;
6099 break;
6100 case DRM_MODE_SCALE_NONE:
6101 default:
6102 rmx_type = RMX_OFF;
6103 break;
6104 }
6105
6106 if (dm_old_state->scaling == rmx_type)
6107 return 0;
6108
6109 dm_new_state->scaling = rmx_type;
6110 ret = 0;
6111 } else if (property == adev->mode_info.underscan_hborder_property) {
6112 dm_new_state->underscan_hborder = val;
6113 ret = 0;
6114 } else if (property == adev->mode_info.underscan_vborder_property) {
6115 dm_new_state->underscan_vborder = val;
6116 ret = 0;
6117 } else if (property == adev->mode_info.underscan_property) {
6118 dm_new_state->underscan_enable = val;
6119 ret = 0;
c1ee92f9
DF
6120 } else if (property == adev->mode_info.abm_level_property) {
6121 dm_new_state->abm_level = val;
6122 ret = 0;
e7b07cee
HW
6123 }
6124
6125 return ret;
6126}
6127
3ee6b26b
AD
6128int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6129 const struct drm_connector_state *state,
6130 struct drm_property *property,
6131 uint64_t *val)
e7b07cee
HW
6132{
6133 struct drm_device *dev = connector->dev;
1348969a 6134 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6135 struct dm_connector_state *dm_state =
6136 to_dm_connector_state(state);
6137 int ret = -EINVAL;
6138
6139 if (property == dev->mode_config.scaling_mode_property) {
6140 switch (dm_state->scaling) {
6141 case RMX_CENTER:
6142 *val = DRM_MODE_SCALE_CENTER;
6143 break;
6144 case RMX_ASPECT:
6145 *val = DRM_MODE_SCALE_ASPECT;
6146 break;
6147 case RMX_FULL:
6148 *val = DRM_MODE_SCALE_FULLSCREEN;
6149 break;
6150 case RMX_OFF:
6151 default:
6152 *val = DRM_MODE_SCALE_NONE;
6153 break;
6154 }
6155 ret = 0;
6156 } else if (property == adev->mode_info.underscan_hborder_property) {
6157 *val = dm_state->underscan_hborder;
6158 ret = 0;
6159 } else if (property == adev->mode_info.underscan_vborder_property) {
6160 *val = dm_state->underscan_vborder;
6161 ret = 0;
6162 } else if (property == adev->mode_info.underscan_property) {
6163 *val = dm_state->underscan_enable;
6164 ret = 0;
c1ee92f9
DF
6165 } else if (property == adev->mode_info.abm_level_property) {
6166 *val = dm_state->abm_level;
6167 ret = 0;
e7b07cee 6168 }
c1ee92f9 6169
e7b07cee
HW
6170 return ret;
6171}
6172
526c654a
ED
6173static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6174{
6175 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6176
6177 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6178}
6179
7578ecda 6180static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6181{
c84dec2f 6182 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6183 const struct dc_link *link = aconnector->dc_link;
1348969a 6184 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6185 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 6186
5dff80bd
AG
6187 /*
6188 * Call only if mst_mgr was iniitalized before since it's not done
6189 * for all connector types.
6190 */
6191 if (aconnector->mst_mgr.dev)
6192 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6193
e7b07cee
HW
6194#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6195 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6196
89fc8d4e 6197 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
6198 link->type != dc_connection_none &&
6199 dm->backlight_dev) {
6200 backlight_device_unregister(dm->backlight_dev);
6201 dm->backlight_dev = NULL;
e7b07cee
HW
6202 }
6203#endif
dcd5fb82
MF
6204
6205 if (aconnector->dc_em_sink)
6206 dc_sink_release(aconnector->dc_em_sink);
6207 aconnector->dc_em_sink = NULL;
6208 if (aconnector->dc_sink)
6209 dc_sink_release(aconnector->dc_sink);
6210 aconnector->dc_sink = NULL;
6211
e86e8947 6212 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6213 drm_connector_unregister(connector);
6214 drm_connector_cleanup(connector);
526c654a
ED
6215 if (aconnector->i2c) {
6216 i2c_del_adapter(&aconnector->i2c->base);
6217 kfree(aconnector->i2c);
6218 }
7daec99f 6219 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6220
e7b07cee
HW
6221 kfree(connector);
6222}
6223
6224void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6225{
6226 struct dm_connector_state *state =
6227 to_dm_connector_state(connector->state);
6228
df099b9b
LSL
6229 if (connector->state)
6230 __drm_atomic_helper_connector_destroy_state(connector->state);
6231
e7b07cee
HW
6232 kfree(state);
6233
6234 state = kzalloc(sizeof(*state), GFP_KERNEL);
6235
6236 if (state) {
6237 state->scaling = RMX_OFF;
6238 state->underscan_enable = false;
6239 state->underscan_hborder = 0;
6240 state->underscan_vborder = 0;
01933ba4 6241 state->base.max_requested_bpc = 8;
3261e013
ML
6242 state->vcpi_slots = 0;
6243 state->pbn = 0;
c3e50f89
NK
6244 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6245 state->abm_level = amdgpu_dm_abm_level;
6246
df099b9b 6247 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6248 }
6249}
6250
3ee6b26b
AD
6251struct drm_connector_state *
6252amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6253{
6254 struct dm_connector_state *state =
6255 to_dm_connector_state(connector->state);
6256
6257 struct dm_connector_state *new_state =
6258 kmemdup(state, sizeof(*state), GFP_KERNEL);
6259
98e6436d
AK
6260 if (!new_state)
6261 return NULL;
e7b07cee 6262
98e6436d
AK
6263 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6264
6265 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6266 new_state->abm_level = state->abm_level;
922454c2
NK
6267 new_state->scaling = state->scaling;
6268 new_state->underscan_enable = state->underscan_enable;
6269 new_state->underscan_hborder = state->underscan_hborder;
6270 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6271 new_state->vcpi_slots = state->vcpi_slots;
6272 new_state->pbn = state->pbn;
98e6436d 6273 return &new_state->base;
e7b07cee
HW
6274}
6275
14f04fa4
AD
6276static int
6277amdgpu_dm_connector_late_register(struct drm_connector *connector)
6278{
6279 struct amdgpu_dm_connector *amdgpu_dm_connector =
6280 to_amdgpu_dm_connector(connector);
00a8037e 6281 int r;
14f04fa4 6282
00a8037e
AD
6283 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6284 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6285 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6286 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6287 if (r)
6288 return r;
6289 }
6290
6291#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6292 connector_debugfs_init(amdgpu_dm_connector);
6293#endif
6294
6295 return 0;
6296}
6297
e7b07cee
HW
6298static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6299 .reset = amdgpu_dm_connector_funcs_reset,
6300 .detect = amdgpu_dm_connector_detect,
6301 .fill_modes = drm_helper_probe_single_connector_modes,
6302 .destroy = amdgpu_dm_connector_destroy,
6303 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6304 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6305 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6306 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6307 .late_register = amdgpu_dm_connector_late_register,
526c654a 6308 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6309};
6310
e7b07cee
HW
6311static int get_modes(struct drm_connector *connector)
6312{
6313 return amdgpu_dm_connector_get_modes(connector);
6314}
6315
c84dec2f 6316static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6317{
6318 struct dc_sink_init_data init_params = {
6319 .link = aconnector->dc_link,
6320 .sink_signal = SIGNAL_TYPE_VIRTUAL
6321 };
70e8ffc5 6322 struct edid *edid;
e7b07cee 6323
a89ff457 6324 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6325 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6326 aconnector->base.name);
6327
6328 aconnector->base.force = DRM_FORCE_OFF;
6329 aconnector->base.override_edid = false;
6330 return;
6331 }
6332
70e8ffc5
HW
6333 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6334
e7b07cee
HW
6335 aconnector->edid = edid;
6336
6337 aconnector->dc_em_sink = dc_link_add_remote_sink(
6338 aconnector->dc_link,
6339 (uint8_t *)edid,
6340 (edid->extensions + 1) * EDID_LENGTH,
6341 &init_params);
6342
dcd5fb82 6343 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6344 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6345 aconnector->dc_link->local_sink :
6346 aconnector->dc_em_sink;
dcd5fb82
MF
6347 dc_sink_retain(aconnector->dc_sink);
6348 }
e7b07cee
HW
6349}
6350
c84dec2f 6351static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6352{
6353 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6354
1f6010a9
DF
6355 /*
6356 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6357 * Those settings have to be != 0 to get initial modeset
6358 */
6359 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6360 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6361 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6362 }
6363
6364
6365 aconnector->base.override_edid = true;
6366 create_eml_sink(aconnector);
6367}
6368
cbd14ae7
SW
6369static struct dc_stream_state *
6370create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6371 const struct drm_display_mode *drm_mode,
6372 const struct dm_connector_state *dm_state,
6373 const struct dc_stream_state *old_stream)
6374{
6375 struct drm_connector *connector = &aconnector->base;
1348969a 6376 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6377 struct dc_stream_state *stream;
4b7da34b
SW
6378 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6379 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6380 enum dc_status dc_result = DC_OK;
6381
6382 do {
6383 stream = create_stream_for_sink(aconnector, drm_mode,
6384 dm_state, old_stream,
6385 requested_bpc);
6386 if (stream == NULL) {
6387 DRM_ERROR("Failed to create stream for sink!\n");
6388 break;
6389 }
6390
6391 dc_result = dc_validate_stream(adev->dm.dc, stream);
6392
6393 if (dc_result != DC_OK) {
74a16675 6394 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6395 drm_mode->hdisplay,
6396 drm_mode->vdisplay,
6397 drm_mode->clock,
74a16675
RS
6398 dc_result,
6399 dc_status_to_str(dc_result));
cbd14ae7
SW
6400
6401 dc_stream_release(stream);
6402 stream = NULL;
6403 requested_bpc -= 2; /* lower bpc to retry validation */
6404 }
6405
6406 } while (stream == NULL && requested_bpc >= 6);
6407
68eb3ae3
WS
6408 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6409 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6410
6411 aconnector->force_yuv420_output = true;
6412 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6413 dm_state, old_stream);
6414 aconnector->force_yuv420_output = false;
6415 }
6416
cbd14ae7
SW
6417 return stream;
6418}
6419
ba9ca088 6420enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6421 struct drm_display_mode *mode)
e7b07cee
HW
6422{
6423 int result = MODE_ERROR;
6424 struct dc_sink *dc_sink;
e7b07cee 6425 /* TODO: Unhardcode stream count */
0971c40e 6426 struct dc_stream_state *stream;
c84dec2f 6427 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6428
6429 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6430 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6431 return result;
6432
1f6010a9
DF
6433 /*
6434 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6435 * EDID mgmt
6436 */
6437 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6438 !aconnector->dc_em_sink)
6439 handle_edid_mgmt(aconnector);
6440
c84dec2f 6441 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6442
ad975f44
VL
6443 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6444 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6445 DRM_ERROR("dc_sink is NULL!\n");
6446 goto fail;
6447 }
6448
cbd14ae7
SW
6449 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6450 if (stream) {
6451 dc_stream_release(stream);
e7b07cee 6452 result = MODE_OK;
cbd14ae7 6453 }
e7b07cee
HW
6454
6455fail:
6456 /* TODO: error handling*/
6457 return result;
6458}
6459
88694af9
NK
6460static int fill_hdr_info_packet(const struct drm_connector_state *state,
6461 struct dc_info_packet *out)
6462{
6463 struct hdmi_drm_infoframe frame;
6464 unsigned char buf[30]; /* 26 + 4 */
6465 ssize_t len;
6466 int ret, i;
6467
6468 memset(out, 0, sizeof(*out));
6469
6470 if (!state->hdr_output_metadata)
6471 return 0;
6472
6473 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6474 if (ret)
6475 return ret;
6476
6477 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6478 if (len < 0)
6479 return (int)len;
6480
6481 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6482 if (len != 30)
6483 return -EINVAL;
6484
6485 /* Prepare the infopacket for DC. */
6486 switch (state->connector->connector_type) {
6487 case DRM_MODE_CONNECTOR_HDMIA:
6488 out->hb0 = 0x87; /* type */
6489 out->hb1 = 0x01; /* version */
6490 out->hb2 = 0x1A; /* length */
6491 out->sb[0] = buf[3]; /* checksum */
6492 i = 1;
6493 break;
6494
6495 case DRM_MODE_CONNECTOR_DisplayPort:
6496 case DRM_MODE_CONNECTOR_eDP:
6497 out->hb0 = 0x00; /* sdp id, zero */
6498 out->hb1 = 0x87; /* type */
6499 out->hb2 = 0x1D; /* payload len - 1 */
6500 out->hb3 = (0x13 << 2); /* sdp version */
6501 out->sb[0] = 0x01; /* version */
6502 out->sb[1] = 0x1A; /* length */
6503 i = 2;
6504 break;
6505
6506 default:
6507 return -EINVAL;
6508 }
6509
6510 memcpy(&out->sb[i], &buf[4], 26);
6511 out->valid = true;
6512
6513 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6514 sizeof(out->sb), false);
6515
6516 return 0;
6517}
6518
88694af9
NK
6519static int
6520amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6521 struct drm_atomic_state *state)
88694af9 6522{
51e857af
SP
6523 struct drm_connector_state *new_con_state =
6524 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6525 struct drm_connector_state *old_con_state =
6526 drm_atomic_get_old_connector_state(state, conn);
6527 struct drm_crtc *crtc = new_con_state->crtc;
6528 struct drm_crtc_state *new_crtc_state;
6529 int ret;
6530
e8a98235
RS
6531 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6532
88694af9
NK
6533 if (!crtc)
6534 return 0;
6535
72921cdf 6536 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6537 struct dc_info_packet hdr_infopacket;
6538
6539 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6540 if (ret)
6541 return ret;
6542
6543 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6544 if (IS_ERR(new_crtc_state))
6545 return PTR_ERR(new_crtc_state);
6546
6547 /*
6548 * DC considers the stream backends changed if the
6549 * static metadata changes. Forcing the modeset also
6550 * gives a simple way for userspace to switch from
b232d4ed
NK
6551 * 8bpc to 10bpc when setting the metadata to enter
6552 * or exit HDR.
6553 *
6554 * Changing the static metadata after it's been
6555 * set is permissible, however. So only force a
6556 * modeset if we're entering or exiting HDR.
88694af9 6557 */
b232d4ed
NK
6558 new_crtc_state->mode_changed =
6559 !old_con_state->hdr_output_metadata ||
6560 !new_con_state->hdr_output_metadata;
88694af9
NK
6561 }
6562
6563 return 0;
6564}
6565
e7b07cee
HW
6566static const struct drm_connector_helper_funcs
6567amdgpu_dm_connector_helper_funcs = {
6568 /*
1f6010a9 6569 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6570 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6571 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6572 * in get_modes call back, not just return the modes count
6573 */
e7b07cee
HW
6574 .get_modes = get_modes,
6575 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6576 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6577};
6578
6579static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6580{
6581}
6582
d6ef9b41 6583static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6584{
6585 struct drm_atomic_state *state = new_crtc_state->state;
6586 struct drm_plane *plane;
6587 int num_active = 0;
6588
6589 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6590 struct drm_plane_state *new_plane_state;
6591
6592 /* Cursor planes are "fake". */
6593 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6594 continue;
6595
6596 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6597
6598 if (!new_plane_state) {
6599 /*
6600 * The plane is enable on the CRTC and hasn't changed
6601 * state. This means that it previously passed
6602 * validation and is therefore enabled.
6603 */
6604 num_active += 1;
6605 continue;
6606 }
6607
6608 /* We need a framebuffer to be considered enabled. */
6609 num_active += (new_plane_state->fb != NULL);
6610 }
6611
d6ef9b41
NK
6612 return num_active;
6613}
6614
8fe684e9
NK
6615static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6616 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6617{
6618 struct dm_crtc_state *dm_new_crtc_state =
6619 to_dm_crtc_state(new_crtc_state);
6620
6621 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6622
6623 if (!dm_new_crtc_state->stream)
6624 return;
6625
6626 dm_new_crtc_state->active_planes =
6627 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6628}
6629
3ee6b26b 6630static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6631 struct drm_atomic_state *state)
e7b07cee 6632{
29b77ad7
MR
6633 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6634 crtc);
1348969a 6635 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6636 struct dc *dc = adev->dm.dc;
29b77ad7 6637 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6638 int ret = -EINVAL;
6639
5b8c5969 6640 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6641
29b77ad7 6642 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6643
bcd74374
ND
6644 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6645 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
6646 return ret;
6647 }
6648
bc92c065 6649 /*
b836a274
MD
6650 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6651 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6652 * planes are disabled, which is not supported by the hardware. And there is legacy
6653 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6654 */
29b77ad7 6655 if (crtc_state->enable &&
ea9522f5
SS
6656 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6657 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6658 return -EINVAL;
ea9522f5 6659 }
c14a005c 6660
b836a274
MD
6661 /* In some use cases, like reset, no stream is attached */
6662 if (!dm_crtc_state->stream)
6663 return 0;
6664
62c933f9 6665 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6666 return 0;
6667
ea9522f5 6668 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6669 return ret;
6670}
6671
3ee6b26b
AD
6672static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6673 const struct drm_display_mode *mode,
6674 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6675{
6676 return true;
6677}
6678
6679static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6680 .disable = dm_crtc_helper_disable,
6681 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6682 .mode_fixup = dm_crtc_helper_mode_fixup,
6683 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6684};
6685
6686static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6687{
6688
6689}
6690
3261e013
ML
6691static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6692{
6693 switch (display_color_depth) {
6694 case COLOR_DEPTH_666:
6695 return 6;
6696 case COLOR_DEPTH_888:
6697 return 8;
6698 case COLOR_DEPTH_101010:
6699 return 10;
6700 case COLOR_DEPTH_121212:
6701 return 12;
6702 case COLOR_DEPTH_141414:
6703 return 14;
6704 case COLOR_DEPTH_161616:
6705 return 16;
6706 default:
6707 break;
6708 }
6709 return 0;
6710}
6711
3ee6b26b
AD
6712static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6713 struct drm_crtc_state *crtc_state,
6714 struct drm_connector_state *conn_state)
e7b07cee 6715{
3261e013
ML
6716 struct drm_atomic_state *state = crtc_state->state;
6717 struct drm_connector *connector = conn_state->connector;
6718 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6719 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6720 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6721 struct drm_dp_mst_topology_mgr *mst_mgr;
6722 struct drm_dp_mst_port *mst_port;
6723 enum dc_color_depth color_depth;
6724 int clock, bpp = 0;
1bc22f20 6725 bool is_y420 = false;
3261e013
ML
6726
6727 if (!aconnector->port || !aconnector->dc_sink)
6728 return 0;
6729
6730 mst_port = aconnector->port;
6731 mst_mgr = &aconnector->mst_port->mst_mgr;
6732
6733 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6734 return 0;
6735
6736 if (!state->duplicated) {
cbd14ae7 6737 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6738 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6739 aconnector->force_yuv420_output;
cbd14ae7
SW
6740 color_depth = convert_color_depth_from_display_info(connector,
6741 is_y420,
6742 max_bpc);
3261e013
ML
6743 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6744 clock = adjusted_mode->clock;
dc48529f 6745 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6746 }
6747 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6748 mst_mgr,
6749 mst_port,
1c6c1cb5 6750 dm_new_connector_state->pbn,
03ca9600 6751 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6752 if (dm_new_connector_state->vcpi_slots < 0) {
6753 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6754 return dm_new_connector_state->vcpi_slots;
6755 }
e7b07cee
HW
6756 return 0;
6757}
6758
6759const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6760 .disable = dm_encoder_helper_disable,
6761 .atomic_check = dm_encoder_helper_atomic_check
6762};
6763
d9fe1a4c 6764#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6765static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6766 struct dc_state *dc_state)
6767{
6768 struct dc_stream_state *stream = NULL;
6769 struct drm_connector *connector;
5760dcb9 6770 struct drm_connector_state *new_con_state;
29b9ba74
ML
6771 struct amdgpu_dm_connector *aconnector;
6772 struct dm_connector_state *dm_conn_state;
6773 int i, j, clock, bpp;
6774 int vcpi, pbn_div, pbn = 0;
6775
5760dcb9 6776 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
6777
6778 aconnector = to_amdgpu_dm_connector(connector);
6779
6780 if (!aconnector->port)
6781 continue;
6782
6783 if (!new_con_state || !new_con_state->crtc)
6784 continue;
6785
6786 dm_conn_state = to_dm_connector_state(new_con_state);
6787
6788 for (j = 0; j < dc_state->stream_count; j++) {
6789 stream = dc_state->streams[j];
6790 if (!stream)
6791 continue;
6792
6793 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6794 break;
6795
6796 stream = NULL;
6797 }
6798
6799 if (!stream)
6800 continue;
6801
6802 if (stream->timing.flags.DSC != 1) {
6803 drm_dp_mst_atomic_enable_dsc(state,
6804 aconnector->port,
6805 dm_conn_state->pbn,
6806 0,
6807 false);
6808 continue;
6809 }
6810
6811 pbn_div = dm_mst_get_pbn_divider(stream->link);
6812 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6813 clock = stream->timing.pix_clk_100hz / 10;
6814 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6815 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6816 aconnector->port,
6817 pbn, pbn_div,
6818 true);
6819 if (vcpi < 0)
6820 return vcpi;
6821
6822 dm_conn_state->pbn = pbn;
6823 dm_conn_state->vcpi_slots = vcpi;
6824 }
6825 return 0;
6826}
d9fe1a4c 6827#endif
29b9ba74 6828
e7b07cee
HW
6829static void dm_drm_plane_reset(struct drm_plane *plane)
6830{
6831 struct dm_plane_state *amdgpu_state = NULL;
6832
6833 if (plane->state)
6834 plane->funcs->atomic_destroy_state(plane, plane->state);
6835
6836 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6837 WARN_ON(amdgpu_state == NULL);
1f6010a9 6838
7ddaef96
NK
6839 if (amdgpu_state)
6840 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6841}
6842
6843static struct drm_plane_state *
6844dm_drm_plane_duplicate_state(struct drm_plane *plane)
6845{
6846 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6847
6848 old_dm_plane_state = to_dm_plane_state(plane->state);
6849 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6850 if (!dm_plane_state)
6851 return NULL;
6852
6853 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6854
3be5262e
HW
6855 if (old_dm_plane_state->dc_state) {
6856 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6857 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6858 }
6859
6860 return &dm_plane_state->base;
6861}
6862
dfd84d90 6863static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6864 struct drm_plane_state *state)
e7b07cee
HW
6865{
6866 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6867
3be5262e
HW
6868 if (dm_plane_state->dc_state)
6869 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6870
0627bbd3 6871 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6872}
6873
6874static const struct drm_plane_funcs dm_plane_funcs = {
6875 .update_plane = drm_atomic_helper_update_plane,
6876 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6877 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6878 .reset = dm_drm_plane_reset,
6879 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6880 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6881 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6882};
6883
3ee6b26b
AD
6884static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6885 struct drm_plane_state *new_state)
e7b07cee
HW
6886{
6887 struct amdgpu_framebuffer *afb;
6888 struct drm_gem_object *obj;
5d43be0c 6889 struct amdgpu_device *adev;
e7b07cee 6890 struct amdgpu_bo *rbo;
e7b07cee 6891 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6892 struct list_head list;
6893 struct ttm_validate_buffer tv;
6894 struct ww_acquire_ctx ticket;
5d43be0c
CK
6895 uint32_t domain;
6896 int r;
e7b07cee
HW
6897
6898 if (!new_state->fb) {
4711c033 6899 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
6900 return 0;
6901 }
6902
6903 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6904 obj = new_state->fb->obj[0];
e7b07cee 6905 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6906 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6907 INIT_LIST_HEAD(&list);
6908
6909 tv.bo = &rbo->tbo;
6910 tv.num_shared = 1;
6911 list_add(&tv.head, &list);
6912
9165fb87 6913 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6914 if (r) {
6915 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6916 return r;
0f257b09 6917 }
e7b07cee 6918
5d43be0c 6919 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6920 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6921 else
6922 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6923
7b7c6c81 6924 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6925 if (unlikely(r != 0)) {
30b7c614
HW
6926 if (r != -ERESTARTSYS)
6927 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6928 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6929 return r;
6930 }
6931
bb812f1e
JZ
6932 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6933 if (unlikely(r != 0)) {
6934 amdgpu_bo_unpin(rbo);
0f257b09 6935 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6936 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6937 return r;
6938 }
7df7e505 6939
0f257b09 6940 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6941
7b7c6c81 6942 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6943
6944 amdgpu_bo_ref(rbo);
6945
cf322b49
NK
6946 /**
6947 * We don't do surface updates on planes that have been newly created,
6948 * but we also don't have the afb->address during atomic check.
6949 *
6950 * Fill in buffer attributes depending on the address here, but only on
6951 * newly created planes since they're not being used by DC yet and this
6952 * won't modify global state.
6953 */
6954 dm_plane_state_old = to_dm_plane_state(plane->state);
6955 dm_plane_state_new = to_dm_plane_state(new_state);
6956
3be5262e 6957 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6958 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6959 struct dc_plane_state *plane_state =
6960 dm_plane_state_new->dc_state;
6961 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6962
320932bf 6963 fill_plane_buffer_attributes(
695af5f9 6964 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6965 afb->tiling_flags,
cf322b49
NK
6966 &plane_state->tiling_info, &plane_state->plane_size,
6967 &plane_state->dcc, &plane_state->address,
6eed95b0 6968 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6969 }
6970
e7b07cee
HW
6971 return 0;
6972}
6973
3ee6b26b
AD
6974static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6975 struct drm_plane_state *old_state)
e7b07cee
HW
6976{
6977 struct amdgpu_bo *rbo;
e7b07cee
HW
6978 int r;
6979
6980 if (!old_state->fb)
6981 return;
6982
e68d14dd 6983 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6984 r = amdgpu_bo_reserve(rbo, false);
6985 if (unlikely(r)) {
6986 DRM_ERROR("failed to reserve rbo before unpin\n");
6987 return;
b830ebc9
HW
6988 }
6989
6990 amdgpu_bo_unpin(rbo);
6991 amdgpu_bo_unreserve(rbo);
6992 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6993}
6994
8c44515b
AP
6995static int dm_plane_helper_check_state(struct drm_plane_state *state,
6996 struct drm_crtc_state *new_crtc_state)
6997{
6300b3bd
MK
6998 struct drm_framebuffer *fb = state->fb;
6999 int min_downscale, max_upscale;
7000 int min_scale = 0;
7001 int max_scale = INT_MAX;
7002
40d916a2 7003 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7004 if (fb && state->crtc) {
40d916a2
NC
7005 /* Validate viewport to cover the case when only the position changes */
7006 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7007 int viewport_width = state->crtc_w;
7008 int viewport_height = state->crtc_h;
7009
7010 if (state->crtc_x < 0)
7011 viewport_width += state->crtc_x;
7012 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7013 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7014
7015 if (state->crtc_y < 0)
7016 viewport_height += state->crtc_y;
7017 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7018 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7019
4abdb72b
NC
7020 if (viewport_width < 0 || viewport_height < 0) {
7021 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7022 return -EINVAL;
7023 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7024 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7025 return -EINVAL;
4abdb72b
NC
7026 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7027 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7028 return -EINVAL;
4abdb72b
NC
7029 }
7030
40d916a2
NC
7031 }
7032
7033 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7034 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7035 &min_downscale, &max_upscale);
7036 /*
7037 * Convert to drm convention: 16.16 fixed point, instead of dc's
7038 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7039 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7040 */
7041 min_scale = (1000 << 16) / max_upscale;
7042 max_scale = (1000 << 16) / min_downscale;
7043 }
8c44515b 7044
8c44515b 7045 return drm_atomic_helper_check_plane_state(
6300b3bd 7046 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7047}
7048
7578ecda 7049static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7050 struct drm_atomic_state *state)
cbd19488 7051{
7c11b99a
MR
7052 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7053 plane);
1348969a 7054 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7055 struct dc *dc = adev->dm.dc;
78171832 7056 struct dm_plane_state *dm_plane_state;
695af5f9 7057 struct dc_scaling_info scaling_info;
8c44515b 7058 struct drm_crtc_state *new_crtc_state;
695af5f9 7059 int ret;
78171832 7060
ba5c1649 7061 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7062
ba5c1649 7063 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7064
3be5262e 7065 if (!dm_plane_state->dc_state)
9a3329b1 7066 return 0;
cbd19488 7067
8c44515b 7068 new_crtc_state =
dec92020 7069 drm_atomic_get_new_crtc_state(state,
ba5c1649 7070 new_plane_state->crtc);
8c44515b
AP
7071 if (!new_crtc_state)
7072 return -EINVAL;
7073
ba5c1649 7074 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7075 if (ret)
7076 return ret;
7077
ba5c1649 7078 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7079 if (ret)
7080 return ret;
a05bcff1 7081
62c933f9 7082 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7083 return 0;
7084
7085 return -EINVAL;
7086}
7087
674e78ac 7088static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7089 struct drm_atomic_state *state)
674e78ac
NK
7090{
7091 /* Only support async updates on cursor planes. */
7092 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7093 return -EINVAL;
7094
7095 return 0;
7096}
7097
7098static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7099 struct drm_atomic_state *state)
674e78ac 7100{
5ddb0bd4
MR
7101 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7102 plane);
674e78ac 7103 struct drm_plane_state *old_state =
5ddb0bd4 7104 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7105
e8a98235
RS
7106 trace_amdgpu_dm_atomic_update_cursor(new_state);
7107
332af874 7108 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7109
7110 plane->state->src_x = new_state->src_x;
7111 plane->state->src_y = new_state->src_y;
7112 plane->state->src_w = new_state->src_w;
7113 plane->state->src_h = new_state->src_h;
7114 plane->state->crtc_x = new_state->crtc_x;
7115 plane->state->crtc_y = new_state->crtc_y;
7116 plane->state->crtc_w = new_state->crtc_w;
7117 plane->state->crtc_h = new_state->crtc_h;
7118
7119 handle_cursor_update(plane, old_state);
7120}
7121
e7b07cee
HW
7122static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7123 .prepare_fb = dm_plane_helper_prepare_fb,
7124 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7125 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7126 .atomic_async_check = dm_plane_atomic_async_check,
7127 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7128};
7129
7130/*
7131 * TODO: these are currently initialized to rgb formats only.
7132 * For future use cases we should either initialize them dynamically based on
7133 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7134 * check will succeed, and let DC implement proper check
e7b07cee 7135 */
d90371b0 7136static const uint32_t rgb_formats[] = {
e7b07cee
HW
7137 DRM_FORMAT_XRGB8888,
7138 DRM_FORMAT_ARGB8888,
7139 DRM_FORMAT_RGBA8888,
7140 DRM_FORMAT_XRGB2101010,
7141 DRM_FORMAT_XBGR2101010,
7142 DRM_FORMAT_ARGB2101010,
7143 DRM_FORMAT_ABGR2101010,
58020403
MK
7144 DRM_FORMAT_XRGB16161616,
7145 DRM_FORMAT_XBGR16161616,
7146 DRM_FORMAT_ARGB16161616,
7147 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7148 DRM_FORMAT_XBGR8888,
7149 DRM_FORMAT_ABGR8888,
46dd9ff7 7150 DRM_FORMAT_RGB565,
e7b07cee
HW
7151};
7152
0d579c7e
NK
7153static const uint32_t overlay_formats[] = {
7154 DRM_FORMAT_XRGB8888,
7155 DRM_FORMAT_ARGB8888,
7156 DRM_FORMAT_RGBA8888,
7157 DRM_FORMAT_XBGR8888,
7158 DRM_FORMAT_ABGR8888,
7267a1a9 7159 DRM_FORMAT_RGB565
e7b07cee
HW
7160};
7161
7162static const u32 cursor_formats[] = {
7163 DRM_FORMAT_ARGB8888
7164};
7165
37c6a93b
NK
7166static int get_plane_formats(const struct drm_plane *plane,
7167 const struct dc_plane_cap *plane_cap,
7168 uint32_t *formats, int max_formats)
e7b07cee 7169{
37c6a93b
NK
7170 int i, num_formats = 0;
7171
7172 /*
7173 * TODO: Query support for each group of formats directly from
7174 * DC plane caps. This will require adding more formats to the
7175 * caps list.
7176 */
e7b07cee 7177
f180b4bc 7178 switch (plane->type) {
e7b07cee 7179 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7180 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7181 if (num_formats >= max_formats)
7182 break;
7183
7184 formats[num_formats++] = rgb_formats[i];
7185 }
7186
ea36ad34 7187 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7188 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7189 if (plane_cap && plane_cap->pixel_format_support.p010)
7190 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7191 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7192 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7193 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7194 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7195 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7196 }
e7b07cee 7197 break;
37c6a93b 7198
e7b07cee 7199 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7200 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7201 if (num_formats >= max_formats)
7202 break;
7203
7204 formats[num_formats++] = overlay_formats[i];
7205 }
e7b07cee 7206 break;
37c6a93b 7207
e7b07cee 7208 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7209 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7210 if (num_formats >= max_formats)
7211 break;
7212
7213 formats[num_formats++] = cursor_formats[i];
7214 }
e7b07cee
HW
7215 break;
7216 }
7217
37c6a93b
NK
7218 return num_formats;
7219}
7220
7221static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7222 struct drm_plane *plane,
7223 unsigned long possible_crtcs,
7224 const struct dc_plane_cap *plane_cap)
7225{
7226 uint32_t formats[32];
7227 int num_formats;
7228 int res = -EPERM;
ecc874a6 7229 unsigned int supported_rotations;
faa37f54 7230 uint64_t *modifiers = NULL;
37c6a93b
NK
7231
7232 num_formats = get_plane_formats(plane, plane_cap, formats,
7233 ARRAY_SIZE(formats));
7234
faa37f54
BN
7235 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7236 if (res)
7237 return res;
7238
4a580877 7239 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7240 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7241 modifiers, plane->type, NULL);
7242 kfree(modifiers);
37c6a93b
NK
7243 if (res)
7244 return res;
7245
cc1fec57
NK
7246 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7247 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7248 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7249 BIT(DRM_MODE_BLEND_PREMULTI);
7250
7251 drm_plane_create_alpha_property(plane);
7252 drm_plane_create_blend_mode_property(plane, blend_caps);
7253 }
7254
fc8e5230 7255 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7256 plane_cap &&
7257 (plane_cap->pixel_format_support.nv12 ||
7258 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7259 /* This only affects YUV formats. */
7260 drm_plane_create_color_properties(
7261 plane,
7262 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7263 BIT(DRM_COLOR_YCBCR_BT709) |
7264 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7265 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7266 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7267 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7268 }
7269
ecc874a6
PLG
7270 supported_rotations =
7271 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7272 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7273
1347385f
SS
7274 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7275 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7276 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7277 supported_rotations);
ecc874a6 7278
f180b4bc 7279 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7280
96719c54 7281 /* Create (reset) the plane state */
f180b4bc
HW
7282 if (plane->funcs->reset)
7283 plane->funcs->reset(plane);
96719c54 7284
37c6a93b 7285 return 0;
e7b07cee
HW
7286}
7287
7578ecda
AD
7288static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7289 struct drm_plane *plane,
7290 uint32_t crtc_index)
e7b07cee
HW
7291{
7292 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7293 struct drm_plane *cursor_plane;
e7b07cee
HW
7294
7295 int res = -ENOMEM;
7296
7297 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7298 if (!cursor_plane)
7299 goto fail;
7300
f180b4bc 7301 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7302 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7303
7304 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7305 if (!acrtc)
7306 goto fail;
7307
7308 res = drm_crtc_init_with_planes(
7309 dm->ddev,
7310 &acrtc->base,
7311 plane,
f180b4bc 7312 cursor_plane,
e7b07cee
HW
7313 &amdgpu_dm_crtc_funcs, NULL);
7314
7315 if (res)
7316 goto fail;
7317
7318 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7319
96719c54
HW
7320 /* Create (reset) the plane state */
7321 if (acrtc->base.funcs->reset)
7322 acrtc->base.funcs->reset(&acrtc->base);
7323
e7b07cee
HW
7324 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7325 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7326
7327 acrtc->crtc_id = crtc_index;
7328 acrtc->base.enabled = false;
c37e2d29 7329 acrtc->otg_inst = -1;
e7b07cee
HW
7330
7331 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7332 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7333 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7334 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7335
e7b07cee
HW
7336 return 0;
7337
7338fail:
b830ebc9
HW
7339 kfree(acrtc);
7340 kfree(cursor_plane);
e7b07cee
HW
7341 return res;
7342}
7343
7344
7345static int to_drm_connector_type(enum signal_type st)
7346{
7347 switch (st) {
7348 case SIGNAL_TYPE_HDMI_TYPE_A:
7349 return DRM_MODE_CONNECTOR_HDMIA;
7350 case SIGNAL_TYPE_EDP:
7351 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7352 case SIGNAL_TYPE_LVDS:
7353 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7354 case SIGNAL_TYPE_RGB:
7355 return DRM_MODE_CONNECTOR_VGA;
7356 case SIGNAL_TYPE_DISPLAY_PORT:
7357 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7358 return DRM_MODE_CONNECTOR_DisplayPort;
7359 case SIGNAL_TYPE_DVI_DUAL_LINK:
7360 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7361 return DRM_MODE_CONNECTOR_DVID;
7362 case SIGNAL_TYPE_VIRTUAL:
7363 return DRM_MODE_CONNECTOR_VIRTUAL;
7364
7365 default:
7366 return DRM_MODE_CONNECTOR_Unknown;
7367 }
7368}
7369
2b4c1c05
DV
7370static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7371{
62afb4ad
JRS
7372 struct drm_encoder *encoder;
7373
7374 /* There is only one encoder per connector */
7375 drm_connector_for_each_possible_encoder(connector, encoder)
7376 return encoder;
7377
7378 return NULL;
2b4c1c05
DV
7379}
7380
e7b07cee
HW
7381static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7382{
e7b07cee
HW
7383 struct drm_encoder *encoder;
7384 struct amdgpu_encoder *amdgpu_encoder;
7385
2b4c1c05 7386 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7387
7388 if (encoder == NULL)
7389 return;
7390
7391 amdgpu_encoder = to_amdgpu_encoder(encoder);
7392
7393 amdgpu_encoder->native_mode.clock = 0;
7394
7395 if (!list_empty(&connector->probed_modes)) {
7396 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7397
e7b07cee 7398 list_for_each_entry(preferred_mode,
b830ebc9
HW
7399 &connector->probed_modes,
7400 head) {
7401 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7402 amdgpu_encoder->native_mode = *preferred_mode;
7403
e7b07cee
HW
7404 break;
7405 }
7406
7407 }
7408}
7409
3ee6b26b
AD
7410static struct drm_display_mode *
7411amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7412 char *name,
7413 int hdisplay, int vdisplay)
e7b07cee
HW
7414{
7415 struct drm_device *dev = encoder->dev;
7416 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7417 struct drm_display_mode *mode = NULL;
7418 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7419
7420 mode = drm_mode_duplicate(dev, native_mode);
7421
b830ebc9 7422 if (mode == NULL)
e7b07cee
HW
7423 return NULL;
7424
7425 mode->hdisplay = hdisplay;
7426 mode->vdisplay = vdisplay;
7427 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7428 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7429
7430 return mode;
7431
7432}
7433
7434static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7435 struct drm_connector *connector)
e7b07cee
HW
7436{
7437 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7438 struct drm_display_mode *mode = NULL;
7439 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7440 struct amdgpu_dm_connector *amdgpu_dm_connector =
7441 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7442 int i;
7443 int n;
7444 struct mode_size {
7445 char name[DRM_DISPLAY_MODE_LEN];
7446 int w;
7447 int h;
b830ebc9 7448 } common_modes[] = {
e7b07cee
HW
7449 { "640x480", 640, 480},
7450 { "800x600", 800, 600},
7451 { "1024x768", 1024, 768},
7452 { "1280x720", 1280, 720},
7453 { "1280x800", 1280, 800},
7454 {"1280x1024", 1280, 1024},
7455 { "1440x900", 1440, 900},
7456 {"1680x1050", 1680, 1050},
7457 {"1600x1200", 1600, 1200},
7458 {"1920x1080", 1920, 1080},
7459 {"1920x1200", 1920, 1200}
7460 };
7461
b830ebc9 7462 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7463
7464 for (i = 0; i < n; i++) {
7465 struct drm_display_mode *curmode = NULL;
7466 bool mode_existed = false;
7467
7468 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7469 common_modes[i].h > native_mode->vdisplay ||
7470 (common_modes[i].w == native_mode->hdisplay &&
7471 common_modes[i].h == native_mode->vdisplay))
7472 continue;
e7b07cee
HW
7473
7474 list_for_each_entry(curmode, &connector->probed_modes, head) {
7475 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7476 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7477 mode_existed = true;
7478 break;
7479 }
7480 }
7481
7482 if (mode_existed)
7483 continue;
7484
7485 mode = amdgpu_dm_create_common_mode(encoder,
7486 common_modes[i].name, common_modes[i].w,
7487 common_modes[i].h);
7488 drm_mode_probed_add(connector, mode);
c84dec2f 7489 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7490 }
7491}
7492
3ee6b26b
AD
7493static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7494 struct edid *edid)
e7b07cee 7495{
c84dec2f
HW
7496 struct amdgpu_dm_connector *amdgpu_dm_connector =
7497 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7498
7499 if (edid) {
7500 /* empty probed_modes */
7501 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7502 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7503 drm_add_edid_modes(connector, edid);
7504
f1e5e913
YMM
7505 /* sorting the probed modes before calling function
7506 * amdgpu_dm_get_native_mode() since EDID can have
7507 * more than one preferred mode. The modes that are
7508 * later in the probed mode list could be of higher
7509 * and preferred resolution. For example, 3840x2160
7510 * resolution in base EDID preferred timing and 4096x2160
7511 * preferred resolution in DID extension block later.
7512 */
7513 drm_mode_sort(&connector->probed_modes);
e7b07cee 7514 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7515
7516 /* Freesync capabilities are reset by calling
7517 * drm_add_edid_modes() and need to be
7518 * restored here.
7519 */
7520 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7521 } else {
c84dec2f 7522 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7523 }
e7b07cee
HW
7524}
7525
a85ba005
NC
7526static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7527 struct drm_display_mode *mode)
7528{
7529 struct drm_display_mode *m;
7530
7531 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7532 if (drm_mode_equal(m, mode))
7533 return true;
7534 }
7535
7536 return false;
7537}
7538
7539static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7540{
7541 const struct drm_display_mode *m;
7542 struct drm_display_mode *new_mode;
7543 uint i;
7544 uint32_t new_modes_count = 0;
7545
7546 /* Standard FPS values
7547 *
7548 * 23.976 - TV/NTSC
7549 * 24 - Cinema
7550 * 25 - TV/PAL
7551 * 29.97 - TV/NTSC
7552 * 30 - TV/NTSC
7553 * 48 - Cinema HFR
7554 * 50 - TV/PAL
7555 * 60 - Commonly used
7556 * 48,72,96 - Multiples of 24
7557 */
7558 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7559 48000, 50000, 60000, 72000, 96000 };
7560
7561 /*
7562 * Find mode with highest refresh rate with the same resolution
7563 * as the preferred mode. Some monitors report a preferred mode
7564 * with lower resolution than the highest refresh rate supported.
7565 */
7566
7567 m = get_highest_refresh_rate_mode(aconnector, true);
7568 if (!m)
7569 return 0;
7570
7571 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7572 uint64_t target_vtotal, target_vtotal_diff;
7573 uint64_t num, den;
7574
7575 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7576 continue;
7577
7578 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7579 common_rates[i] > aconnector->max_vfreq * 1000)
7580 continue;
7581
7582 num = (unsigned long long)m->clock * 1000 * 1000;
7583 den = common_rates[i] * (unsigned long long)m->htotal;
7584 target_vtotal = div_u64(num, den);
7585 target_vtotal_diff = target_vtotal - m->vtotal;
7586
7587 /* Check for illegal modes */
7588 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7589 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7590 m->vtotal + target_vtotal_diff < m->vsync_end)
7591 continue;
7592
7593 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7594 if (!new_mode)
7595 goto out;
7596
7597 new_mode->vtotal += (u16)target_vtotal_diff;
7598 new_mode->vsync_start += (u16)target_vtotal_diff;
7599 new_mode->vsync_end += (u16)target_vtotal_diff;
7600 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7601 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7602
7603 if (!is_duplicate_mode(aconnector, new_mode)) {
7604 drm_mode_probed_add(&aconnector->base, new_mode);
7605 new_modes_count += 1;
7606 } else
7607 drm_mode_destroy(aconnector->base.dev, new_mode);
7608 }
7609 out:
7610 return new_modes_count;
7611}
7612
7613static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7614 struct edid *edid)
7615{
7616 struct amdgpu_dm_connector *amdgpu_dm_connector =
7617 to_amdgpu_dm_connector(connector);
7618
7619 if (!(amdgpu_freesync_vid_mode && edid))
7620 return;
fe8858bb 7621
a85ba005
NC
7622 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7623 amdgpu_dm_connector->num_modes +=
7624 add_fs_modes(amdgpu_dm_connector);
7625}
7626
7578ecda 7627static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7628{
c84dec2f
HW
7629 struct amdgpu_dm_connector *amdgpu_dm_connector =
7630 to_amdgpu_dm_connector(connector);
e7b07cee 7631 struct drm_encoder *encoder;
c84dec2f 7632 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7633
2b4c1c05 7634 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7635
5c0e6840 7636 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7637 amdgpu_dm_connector->num_modes =
7638 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7639 } else {
7640 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7641 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7642 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7643 }
3e332d3a 7644 amdgpu_dm_fbc_init(connector);
5099114b 7645
c84dec2f 7646 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7647}
7648
3ee6b26b
AD
7649void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7650 struct amdgpu_dm_connector *aconnector,
7651 int connector_type,
7652 struct dc_link *link,
7653 int link_index)
e7b07cee 7654{
1348969a 7655 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7656
f04bee34
NK
7657 /*
7658 * Some of the properties below require access to state, like bpc.
7659 * Allocate some default initial connector state with our reset helper.
7660 */
7661 if (aconnector->base.funcs->reset)
7662 aconnector->base.funcs->reset(&aconnector->base);
7663
e7b07cee
HW
7664 aconnector->connector_id = link_index;
7665 aconnector->dc_link = link;
7666 aconnector->base.interlace_allowed = false;
7667 aconnector->base.doublescan_allowed = false;
7668 aconnector->base.stereo_allowed = false;
7669 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7670 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7671 aconnector->audio_inst = -1;
e7b07cee
HW
7672 mutex_init(&aconnector->hpd_lock);
7673
1f6010a9
DF
7674 /*
7675 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7676 * which means HPD hot plug not supported
7677 */
e7b07cee
HW
7678 switch (connector_type) {
7679 case DRM_MODE_CONNECTOR_HDMIA:
7680 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7681 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7682 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7683 break;
7684 case DRM_MODE_CONNECTOR_DisplayPort:
7685 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7686 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7687 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7688 break;
7689 case DRM_MODE_CONNECTOR_DVID:
7690 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7691 break;
7692 default:
7693 break;
7694 }
7695
7696 drm_object_attach_property(&aconnector->base.base,
7697 dm->ddev->mode_config.scaling_mode_property,
7698 DRM_MODE_SCALE_NONE);
7699
7700 drm_object_attach_property(&aconnector->base.base,
7701 adev->mode_info.underscan_property,
7702 UNDERSCAN_OFF);
7703 drm_object_attach_property(&aconnector->base.base,
7704 adev->mode_info.underscan_hborder_property,
7705 0);
7706 drm_object_attach_property(&aconnector->base.base,
7707 adev->mode_info.underscan_vborder_property,
7708 0);
1825fd34 7709
8c61b31e
JFZ
7710 if (!aconnector->mst_port)
7711 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7712
4a8ca46b
RL
7713 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7714 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7715 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7716
c1ee92f9 7717 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7718 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7719 drm_object_attach_property(&aconnector->base.base,
7720 adev->mode_info.abm_level_property, 0);
7721 }
bb47de73
NK
7722
7723 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7724 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7725 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 7726 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 7727
8c61b31e
JFZ
7728 if (!aconnector->mst_port)
7729 drm_connector_attach_vrr_capable_property(&aconnector->base);
7730
0c8620d6 7731#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7732 if (adev->dm.hdcp_workqueue)
53e108aa 7733 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7734#endif
bb47de73 7735 }
e7b07cee
HW
7736}
7737
7578ecda
AD
7738static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7739 struct i2c_msg *msgs, int num)
e7b07cee
HW
7740{
7741 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7742 struct ddc_service *ddc_service = i2c->ddc_service;
7743 struct i2c_command cmd;
7744 int i;
7745 int result = -EIO;
7746
b830ebc9 7747 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7748
7749 if (!cmd.payloads)
7750 return result;
7751
7752 cmd.number_of_payloads = num;
7753 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7754 cmd.speed = 100;
7755
7756 for (i = 0; i < num; i++) {
7757 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7758 cmd.payloads[i].address = msgs[i].addr;
7759 cmd.payloads[i].length = msgs[i].len;
7760 cmd.payloads[i].data = msgs[i].buf;
7761 }
7762
c85e6e54
DF
7763 if (dc_submit_i2c(
7764 ddc_service->ctx->dc,
7765 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7766 &cmd))
7767 result = num;
7768
7769 kfree(cmd.payloads);
7770 return result;
7771}
7772
7578ecda 7773static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7774{
7775 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7776}
7777
7778static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7779 .master_xfer = amdgpu_dm_i2c_xfer,
7780 .functionality = amdgpu_dm_i2c_func,
7781};
7782
3ee6b26b
AD
7783static struct amdgpu_i2c_adapter *
7784create_i2c(struct ddc_service *ddc_service,
7785 int link_index,
7786 int *res)
e7b07cee
HW
7787{
7788 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7789 struct amdgpu_i2c_adapter *i2c;
7790
b830ebc9 7791 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7792 if (!i2c)
7793 return NULL;
e7b07cee
HW
7794 i2c->base.owner = THIS_MODULE;
7795 i2c->base.class = I2C_CLASS_DDC;
7796 i2c->base.dev.parent = &adev->pdev->dev;
7797 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7798 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7799 i2c_set_adapdata(&i2c->base, i2c);
7800 i2c->ddc_service = ddc_service;
c85e6e54 7801 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7802
7803 return i2c;
7804}
7805
89fc8d4e 7806
1f6010a9
DF
7807/*
7808 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7809 * dc_link which will be represented by this aconnector.
7810 */
7578ecda
AD
7811static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7812 struct amdgpu_dm_connector *aconnector,
7813 uint32_t link_index,
7814 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7815{
7816 int res = 0;
7817 int connector_type;
7818 struct dc *dc = dm->dc;
7819 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7820 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7821
7822 link->priv = aconnector;
e7b07cee 7823
f1ad2f5e 7824 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7825
7826 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7827 if (!i2c) {
7828 DRM_ERROR("Failed to create i2c adapter data\n");
7829 return -ENOMEM;
7830 }
7831
e7b07cee
HW
7832 aconnector->i2c = i2c;
7833 res = i2c_add_adapter(&i2c->base);
7834
7835 if (res) {
7836 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7837 goto out_free;
7838 }
7839
7840 connector_type = to_drm_connector_type(link->connector_signal);
7841
17165de2 7842 res = drm_connector_init_with_ddc(
e7b07cee
HW
7843 dm->ddev,
7844 &aconnector->base,
7845 &amdgpu_dm_connector_funcs,
17165de2
AP
7846 connector_type,
7847 &i2c->base);
e7b07cee
HW
7848
7849 if (res) {
7850 DRM_ERROR("connector_init failed\n");
7851 aconnector->connector_id = -1;
7852 goto out_free;
7853 }
7854
7855 drm_connector_helper_add(
7856 &aconnector->base,
7857 &amdgpu_dm_connector_helper_funcs);
7858
7859 amdgpu_dm_connector_init_helper(
7860 dm,
7861 aconnector,
7862 connector_type,
7863 link,
7864 link_index);
7865
cde4c44d 7866 drm_connector_attach_encoder(
e7b07cee
HW
7867 &aconnector->base, &aencoder->base);
7868
e7b07cee
HW
7869 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7870 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7871 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7872
e7b07cee
HW
7873out_free:
7874 if (res) {
7875 kfree(i2c);
7876 aconnector->i2c = NULL;
7877 }
7878 return res;
7879}
7880
7881int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7882{
7883 switch (adev->mode_info.num_crtc) {
7884 case 1:
7885 return 0x1;
7886 case 2:
7887 return 0x3;
7888 case 3:
7889 return 0x7;
7890 case 4:
7891 return 0xf;
7892 case 5:
7893 return 0x1f;
7894 case 6:
7895 default:
7896 return 0x3f;
7897 }
7898}
7899
7578ecda
AD
7900static int amdgpu_dm_encoder_init(struct drm_device *dev,
7901 struct amdgpu_encoder *aencoder,
7902 uint32_t link_index)
e7b07cee 7903{
1348969a 7904 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7905
7906 int res = drm_encoder_init(dev,
7907 &aencoder->base,
7908 &amdgpu_dm_encoder_funcs,
7909 DRM_MODE_ENCODER_TMDS,
7910 NULL);
7911
7912 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7913
7914 if (!res)
7915 aencoder->encoder_id = link_index;
7916 else
7917 aencoder->encoder_id = -1;
7918
7919 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7920
7921 return res;
7922}
7923
3ee6b26b
AD
7924static void manage_dm_interrupts(struct amdgpu_device *adev,
7925 struct amdgpu_crtc *acrtc,
7926 bool enable)
e7b07cee
HW
7927{
7928 /*
8fe684e9
NK
7929 * We have no guarantee that the frontend index maps to the same
7930 * backend index - some even map to more than one.
7931 *
7932 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7933 */
7934 int irq_type =
734dd01d 7935 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7936 adev,
7937 acrtc->crtc_id);
7938
7939 if (enable) {
7940 drm_crtc_vblank_on(&acrtc->base);
7941 amdgpu_irq_get(
7942 adev,
7943 &adev->pageflip_irq,
7944 irq_type);
86bc2219
WL
7945#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7946 amdgpu_irq_get(
7947 adev,
7948 &adev->vline0_irq,
7949 irq_type);
7950#endif
e7b07cee 7951 } else {
86bc2219
WL
7952#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7953 amdgpu_irq_put(
7954 adev,
7955 &adev->vline0_irq,
7956 irq_type);
7957#endif
e7b07cee
HW
7958 amdgpu_irq_put(
7959 adev,
7960 &adev->pageflip_irq,
7961 irq_type);
7962 drm_crtc_vblank_off(&acrtc->base);
7963 }
7964}
7965
8fe684e9
NK
7966static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7967 struct amdgpu_crtc *acrtc)
7968{
7969 int irq_type =
7970 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7971
7972 /**
7973 * This reads the current state for the IRQ and force reapplies
7974 * the setting to hardware.
7975 */
7976 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7977}
7978
3ee6b26b
AD
7979static bool
7980is_scaling_state_different(const struct dm_connector_state *dm_state,
7981 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7982{
7983 if (dm_state->scaling != old_dm_state->scaling)
7984 return true;
7985 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7986 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7987 return true;
7988 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7989 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7990 return true;
b830ebc9
HW
7991 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7992 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7993 return true;
e7b07cee
HW
7994 return false;
7995}
7996
0c8620d6
BL
7997#ifdef CONFIG_DRM_AMD_DC_HDCP
7998static bool is_content_protection_different(struct drm_connector_state *state,
7999 const struct drm_connector_state *old_state,
8000 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8001{
8002 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8003 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8004
31c0ed90 8005 /* Handle: Type0/1 change */
53e108aa
BL
8006 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8007 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8008 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8009 return true;
8010 }
8011
31c0ed90
BL
8012 /* CP is being re enabled, ignore this
8013 *
8014 * Handles: ENABLED -> DESIRED
8015 */
0c8620d6
BL
8016 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8017 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8018 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8019 return false;
8020 }
8021
31c0ed90
BL
8022 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8023 *
8024 * Handles: UNDESIRED -> ENABLED
8025 */
0c8620d6
BL
8026 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8027 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8028 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8029
8030 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8031 * hot-plug, headless s3, dpms
31c0ed90
BL
8032 *
8033 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8034 */
97f6c917
BL
8035 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8036 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8037 dm_con_state->update_hdcp = false;
0c8620d6 8038 return true;
97f6c917 8039 }
0c8620d6 8040
31c0ed90
BL
8041 /*
8042 * Handles: UNDESIRED -> UNDESIRED
8043 * DESIRED -> DESIRED
8044 * ENABLED -> ENABLED
8045 */
0c8620d6
BL
8046 if (old_state->content_protection == state->content_protection)
8047 return false;
8048
31c0ed90
BL
8049 /*
8050 * Handles: UNDESIRED -> DESIRED
8051 * DESIRED -> UNDESIRED
8052 * ENABLED -> UNDESIRED
8053 */
97f6c917 8054 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8055 return true;
8056
31c0ed90
BL
8057 /*
8058 * Handles: DESIRED -> ENABLED
8059 */
0c8620d6
BL
8060 return false;
8061}
8062
0c8620d6 8063#endif
3ee6b26b
AD
8064static void remove_stream(struct amdgpu_device *adev,
8065 struct amdgpu_crtc *acrtc,
8066 struct dc_stream_state *stream)
e7b07cee
HW
8067{
8068 /* this is the update mode case */
e7b07cee
HW
8069
8070 acrtc->otg_inst = -1;
8071 acrtc->enabled = false;
8072}
8073
7578ecda
AD
8074static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8075 struct dc_cursor_position *position)
2a8f6ccb 8076{
f4c2cc43 8077 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8078 int x, y;
8079 int xorigin = 0, yorigin = 0;
8080
e371e19c 8081 if (!crtc || !plane->state->fb)
2a8f6ccb 8082 return 0;
2a8f6ccb
HW
8083
8084 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8085 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8086 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8087 __func__,
8088 plane->state->crtc_w,
8089 plane->state->crtc_h);
8090 return -EINVAL;
8091 }
8092
8093 x = plane->state->crtc_x;
8094 y = plane->state->crtc_y;
c14a005c 8095
e371e19c
NK
8096 if (x <= -amdgpu_crtc->max_cursor_width ||
8097 y <= -amdgpu_crtc->max_cursor_height)
8098 return 0;
8099
2a8f6ccb
HW
8100 if (x < 0) {
8101 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8102 x = 0;
8103 }
8104 if (y < 0) {
8105 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8106 y = 0;
8107 }
8108 position->enable = true;
d243b6ff 8109 position->translate_by_source = true;
2a8f6ccb
HW
8110 position->x = x;
8111 position->y = y;
8112 position->x_hotspot = xorigin;
8113 position->y_hotspot = yorigin;
8114
8115 return 0;
8116}
8117
3ee6b26b
AD
8118static void handle_cursor_update(struct drm_plane *plane,
8119 struct drm_plane_state *old_plane_state)
e7b07cee 8120{
1348969a 8121 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8122 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8123 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8124 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8125 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8126 uint64_t address = afb ? afb->address : 0;
6a30a929 8127 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8128 struct dc_cursor_attributes attributes;
8129 int ret;
8130
e7b07cee
HW
8131 if (!plane->state->fb && !old_plane_state->fb)
8132 return;
8133
cb2318b7 8134 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8135 __func__,
8136 amdgpu_crtc->crtc_id,
8137 plane->state->crtc_w,
8138 plane->state->crtc_h);
2a8f6ccb
HW
8139
8140 ret = get_cursor_position(plane, crtc, &position);
8141 if (ret)
8142 return;
8143
8144 if (!position.enable) {
8145 /* turn off cursor */
674e78ac
NK
8146 if (crtc_state && crtc_state->stream) {
8147 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8148 dc_stream_set_cursor_position(crtc_state->stream,
8149 &position);
674e78ac
NK
8150 mutex_unlock(&adev->dm.dc_lock);
8151 }
2a8f6ccb 8152 return;
e7b07cee 8153 }
e7b07cee 8154
2a8f6ccb
HW
8155 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8156 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8157
c1cefe11 8158 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8159 attributes.address.high_part = upper_32_bits(address);
8160 attributes.address.low_part = lower_32_bits(address);
8161 attributes.width = plane->state->crtc_w;
8162 attributes.height = plane->state->crtc_h;
8163 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8164 attributes.rotation_angle = 0;
8165 attributes.attribute_flags.value = 0;
8166
03a66367 8167 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8168
886daac9 8169 if (crtc_state->stream) {
674e78ac 8170 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8171 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8172 &attributes))
8173 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8174
2a8f6ccb
HW
8175 if (!dc_stream_set_cursor_position(crtc_state->stream,
8176 &position))
8177 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8178 mutex_unlock(&adev->dm.dc_lock);
886daac9 8179 }
2a8f6ccb 8180}
e7b07cee
HW
8181
8182static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8183{
8184
8185 assert_spin_locked(&acrtc->base.dev->event_lock);
8186 WARN_ON(acrtc->event);
8187
8188 acrtc->event = acrtc->base.state->event;
8189
8190 /* Set the flip status */
8191 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8192
8193 /* Mark this event as consumed */
8194 acrtc->base.state->event = NULL;
8195
cb2318b7
VL
8196 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8197 acrtc->crtc_id);
e7b07cee
HW
8198}
8199
bb47de73
NK
8200static void update_freesync_state_on_stream(
8201 struct amdgpu_display_manager *dm,
8202 struct dm_crtc_state *new_crtc_state,
180db303
NK
8203 struct dc_stream_state *new_stream,
8204 struct dc_plane_state *surface,
8205 u32 flip_timestamp_in_us)
bb47de73 8206{
09aef2c4 8207 struct mod_vrr_params vrr_params;
bb47de73 8208 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8209 struct amdgpu_device *adev = dm->adev;
585d450c 8210 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8211 unsigned long flags;
4cda3243 8212 bool pack_sdp_v1_3 = false;
bb47de73
NK
8213
8214 if (!new_stream)
8215 return;
8216
8217 /*
8218 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8219 * For now it's sufficient to just guard against these conditions.
8220 */
8221
8222 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8223 return;
8224
4a580877 8225 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8226 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8227
180db303
NK
8228 if (surface) {
8229 mod_freesync_handle_preflip(
8230 dm->freesync_module,
8231 surface,
8232 new_stream,
8233 flip_timestamp_in_us,
8234 &vrr_params);
09aef2c4
MK
8235
8236 if (adev->family < AMDGPU_FAMILY_AI &&
8237 amdgpu_dm_vrr_active(new_crtc_state)) {
8238 mod_freesync_handle_v_update(dm->freesync_module,
8239 new_stream, &vrr_params);
e63e2491
EB
8240
8241 /* Need to call this before the frame ends. */
8242 dc_stream_adjust_vmin_vmax(dm->dc,
8243 new_crtc_state->stream,
8244 &vrr_params.adjust);
09aef2c4 8245 }
180db303 8246 }
bb47de73
NK
8247
8248 mod_freesync_build_vrr_infopacket(
8249 dm->freesync_module,
8250 new_stream,
180db303 8251 &vrr_params,
ecd0136b
HT
8252 PACKET_TYPE_VRR,
8253 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8254 &vrr_infopacket,
8255 pack_sdp_v1_3);
bb47de73 8256
8a48b44c 8257 new_crtc_state->freesync_timing_changed |=
585d450c 8258 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8259 &vrr_params.adjust,
8260 sizeof(vrr_params.adjust)) != 0);
bb47de73 8261
8a48b44c 8262 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8263 (memcmp(&new_crtc_state->vrr_infopacket,
8264 &vrr_infopacket,
8265 sizeof(vrr_infopacket)) != 0);
8266
585d450c 8267 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8268 new_crtc_state->vrr_infopacket = vrr_infopacket;
8269
585d450c 8270 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8271 new_stream->vrr_infopacket = vrr_infopacket;
8272
8273 if (new_crtc_state->freesync_vrr_info_changed)
8274 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8275 new_crtc_state->base.crtc->base.id,
8276 (int)new_crtc_state->base.vrr_enabled,
180db303 8277 (int)vrr_params.state);
09aef2c4 8278
4a580877 8279 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8280}
8281
585d450c 8282static void update_stream_irq_parameters(
e854194c
MK
8283 struct amdgpu_display_manager *dm,
8284 struct dm_crtc_state *new_crtc_state)
8285{
8286 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8287 struct mod_vrr_params vrr_params;
e854194c 8288 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8289 struct amdgpu_device *adev = dm->adev;
585d450c 8290 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8291 unsigned long flags;
e854194c
MK
8292
8293 if (!new_stream)
8294 return;
8295
8296 /*
8297 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8298 * For now it's sufficient to just guard against these conditions.
8299 */
8300 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8301 return;
8302
4a580877 8303 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8304 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8305
e854194c
MK
8306 if (new_crtc_state->vrr_supported &&
8307 config.min_refresh_in_uhz &&
8308 config.max_refresh_in_uhz) {
a85ba005
NC
8309 /*
8310 * if freesync compatible mode was set, config.state will be set
8311 * in atomic check
8312 */
8313 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8314 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8315 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8316 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8317 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8318 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8319 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8320 } else {
8321 config.state = new_crtc_state->base.vrr_enabled ?
8322 VRR_STATE_ACTIVE_VARIABLE :
8323 VRR_STATE_INACTIVE;
8324 }
e854194c
MK
8325 } else {
8326 config.state = VRR_STATE_UNSUPPORTED;
8327 }
8328
8329 mod_freesync_build_vrr_params(dm->freesync_module,
8330 new_stream,
8331 &config, &vrr_params);
8332
8333 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8334 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8335 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8336
585d450c
AP
8337 new_crtc_state->freesync_config = config;
8338 /* Copy state for access from DM IRQ handler */
8339 acrtc->dm_irq_params.freesync_config = config;
8340 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8341 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8342 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8343}
8344
66b0c973
MK
8345static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8346 struct dm_crtc_state *new_state)
8347{
8348 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8349 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8350
8351 if (!old_vrr_active && new_vrr_active) {
8352 /* Transition VRR inactive -> active:
8353 * While VRR is active, we must not disable vblank irq, as a
8354 * reenable after disable would compute bogus vblank/pflip
8355 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8356 *
8357 * We also need vupdate irq for the actual core vblank handling
8358 * at end of vblank.
66b0c973 8359 */
d2574c33 8360 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8361 drm_crtc_vblank_get(new_state->base.crtc);
8362 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8363 __func__, new_state->base.crtc->base.id);
8364 } else if (old_vrr_active && !new_vrr_active) {
8365 /* Transition VRR active -> inactive:
8366 * Allow vblank irq disable again for fixed refresh rate.
8367 */
d2574c33 8368 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8369 drm_crtc_vblank_put(new_state->base.crtc);
8370 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8371 __func__, new_state->base.crtc->base.id);
8372 }
8373}
8374
8ad27806
NK
8375static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8376{
8377 struct drm_plane *plane;
5760dcb9 8378 struct drm_plane_state *old_plane_state;
8ad27806
NK
8379 int i;
8380
8381 /*
8382 * TODO: Make this per-stream so we don't issue redundant updates for
8383 * commits with multiple streams.
8384 */
5760dcb9 8385 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8386 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8387 handle_cursor_update(plane, old_plane_state);
8388}
8389
3be5262e 8390static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8391 struct dc_state *dc_state,
3ee6b26b
AD
8392 struct drm_device *dev,
8393 struct amdgpu_display_manager *dm,
8394 struct drm_crtc *pcrtc,
420cd472 8395 bool wait_for_vblank)
e7b07cee 8396{
efc8278e 8397 uint32_t i;
8a48b44c 8398 uint64_t timestamp_ns;
e7b07cee 8399 struct drm_plane *plane;
0bc9706d 8400 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8401 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8402 struct drm_crtc_state *new_pcrtc_state =
8403 drm_atomic_get_new_crtc_state(state, pcrtc);
8404 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8405 struct dm_crtc_state *dm_old_crtc_state =
8406 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8407 int planes_count = 0, vpos, hpos;
570c91d5 8408 long r;
e7b07cee 8409 unsigned long flags;
8a48b44c 8410 struct amdgpu_bo *abo;
fdd1fe57
MK
8411 uint32_t target_vblank, last_flip_vblank;
8412 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8413 bool pflip_present = false;
bc7f670e
DF
8414 struct {
8415 struct dc_surface_update surface_updates[MAX_SURFACES];
8416 struct dc_plane_info plane_infos[MAX_SURFACES];
8417 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8418 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8419 struct dc_stream_update stream_update;
74aa7bd4 8420 } *bundle;
bc7f670e 8421
74aa7bd4 8422 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8423
74aa7bd4
DF
8424 if (!bundle) {
8425 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8426 goto cleanup;
8427 }
e7b07cee 8428
8ad27806
NK
8429 /*
8430 * Disable the cursor first if we're disabling all the planes.
8431 * It'll remain on the screen after the planes are re-enabled
8432 * if we don't.
8433 */
8434 if (acrtc_state->active_planes == 0)
8435 amdgpu_dm_commit_cursors(state);
8436
e7b07cee 8437 /* update planes when needed */
efc8278e 8438 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8439 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8440 struct drm_crtc_state *new_crtc_state;
0bc9706d 8441 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8442 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8443 bool plane_needs_flip;
c7af5f77 8444 struct dc_plane_state *dc_plane;
54d76575 8445 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8446
80c218d5
NK
8447 /* Cursor plane is handled after stream updates */
8448 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8449 continue;
e7b07cee 8450
f5ba60fe
DD
8451 if (!fb || !crtc || pcrtc != crtc)
8452 continue;
8453
8454 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8455 if (!new_crtc_state->active)
e7b07cee
HW
8456 continue;
8457
bc7f670e 8458 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8459
74aa7bd4 8460 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8461 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8462 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8463 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8464 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8465 }
8a48b44c 8466
695af5f9
NK
8467 fill_dc_scaling_info(new_plane_state,
8468 &bundle->scaling_infos[planes_count]);
8a48b44c 8469
695af5f9
NK
8470 bundle->surface_updates[planes_count].scaling_info =
8471 &bundle->scaling_infos[planes_count];
8a48b44c 8472
f5031000 8473 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8474
f5031000 8475 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8476
f5031000
DF
8477 if (!plane_needs_flip) {
8478 planes_count += 1;
8479 continue;
8480 }
8a48b44c 8481
2fac0f53
CK
8482 abo = gem_to_amdgpu_bo(fb->obj[0]);
8483
f8308898
AG
8484 /*
8485 * Wait for all fences on this FB. Do limited wait to avoid
8486 * deadlock during GPU reset when this fence will not signal
8487 * but we hold reservation lock for the BO.
8488 */
d3fae3b3
CK
8489 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8490 msecs_to_jiffies(5000));
f8308898 8491 if (unlikely(r <= 0))
ed8a5fb2 8492 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8493
695af5f9 8494 fill_dc_plane_info_and_addr(
8ce5d842 8495 dm->adev, new_plane_state,
6eed95b0 8496 afb->tiling_flags,
695af5f9 8497 &bundle->plane_infos[planes_count],
87b7ebc2 8498 &bundle->flip_addrs[planes_count].address,
6eed95b0 8499 afb->tmz_surface, false);
87b7ebc2 8500
4711c033 8501 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8502 new_plane_state->plane->index,
8503 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8504
8505 bundle->surface_updates[planes_count].plane_info =
8506 &bundle->plane_infos[planes_count];
8a48b44c 8507
caff0e66
NK
8508 /*
8509 * Only allow immediate flips for fast updates that don't
8510 * change FB pitch, DCC state, rotation or mirroing.
8511 */
f5031000 8512 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8513 crtc->state->async_flip &&
caff0e66 8514 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8515
f5031000
DF
8516 timestamp_ns = ktime_get_ns();
8517 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8518 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8519 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8520
f5031000
DF
8521 if (!bundle->surface_updates[planes_count].surface) {
8522 DRM_ERROR("No surface for CRTC: id=%d\n",
8523 acrtc_attach->crtc_id);
8524 continue;
bc7f670e
DF
8525 }
8526
f5031000
DF
8527 if (plane == pcrtc->primary)
8528 update_freesync_state_on_stream(
8529 dm,
8530 acrtc_state,
8531 acrtc_state->stream,
8532 dc_plane,
8533 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8534
4711c033 8535 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8536 __func__,
8537 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8538 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8539
8540 planes_count += 1;
8541
8a48b44c
DF
8542 }
8543
74aa7bd4 8544 if (pflip_present) {
634092b1
MK
8545 if (!vrr_active) {
8546 /* Use old throttling in non-vrr fixed refresh rate mode
8547 * to keep flip scheduling based on target vblank counts
8548 * working in a backwards compatible way, e.g., for
8549 * clients using the GLX_OML_sync_control extension or
8550 * DRI3/Present extension with defined target_msc.
8551 */
e3eff4b5 8552 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8553 }
8554 else {
8555 /* For variable refresh rate mode only:
8556 * Get vblank of last completed flip to avoid > 1 vrr
8557 * flips per video frame by use of throttling, but allow
8558 * flip programming anywhere in the possibly large
8559 * variable vrr vblank interval for fine-grained flip
8560 * timing control and more opportunity to avoid stutter
8561 * on late submission of flips.
8562 */
8563 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8564 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8565 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8566 }
8567
fdd1fe57 8568 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8569
8570 /*
8571 * Wait until we're out of the vertical blank period before the one
8572 * targeted by the flip
8573 */
8574 while ((acrtc_attach->enabled &&
8575 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8576 0, &vpos, &hpos, NULL,
8577 NULL, &pcrtc->hwmode)
8578 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8579 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8580 (int)(target_vblank -
e3eff4b5 8581 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8582 usleep_range(1000, 1100);
8583 }
8584
8fe684e9
NK
8585 /**
8586 * Prepare the flip event for the pageflip interrupt to handle.
8587 *
8588 * This only works in the case where we've already turned on the
8589 * appropriate hardware blocks (eg. HUBP) so in the transition case
8590 * from 0 -> n planes we have to skip a hardware generated event
8591 * and rely on sending it from software.
8592 */
8593 if (acrtc_attach->base.state->event &&
8594 acrtc_state->active_planes > 0) {
8a48b44c
DF
8595 drm_crtc_vblank_get(pcrtc);
8596
8597 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8598
8599 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8600 prepare_flip_isr(acrtc_attach);
8601
8602 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8603 }
8604
8605 if (acrtc_state->stream) {
8a48b44c 8606 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8607 bundle->stream_update.vrr_infopacket =
8a48b44c 8608 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8609 }
e7b07cee
HW
8610 }
8611
bc92c065 8612 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8613 if ((planes_count || acrtc_state->active_planes == 0) &&
8614 acrtc_state->stream) {
b6e881c9 8615 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8616 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8617 bundle->stream_update.src = acrtc_state->stream->src;
8618 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8619 }
8620
cf020d49
NK
8621 if (new_pcrtc_state->color_mgmt_changed) {
8622 /*
8623 * TODO: This isn't fully correct since we've actually
8624 * already modified the stream in place.
8625 */
8626 bundle->stream_update.gamut_remap =
8627 &acrtc_state->stream->gamut_remap_matrix;
8628 bundle->stream_update.output_csc_transform =
8629 &acrtc_state->stream->csc_color_matrix;
8630 bundle->stream_update.out_transfer_func =
8631 acrtc_state->stream->out_transfer_func;
8632 }
bc7f670e 8633
8a48b44c 8634 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8635 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8636 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8637
e63e2491
EB
8638 /*
8639 * If FreeSync state on the stream has changed then we need to
8640 * re-adjust the min/max bounds now that DC doesn't handle this
8641 * as part of commit.
8642 */
a85ba005 8643 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8644 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8645 dc_stream_adjust_vmin_vmax(
8646 dm->dc, acrtc_state->stream,
585d450c 8647 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8648 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8649 }
bc7f670e 8650 mutex_lock(&dm->dc_lock);
8c322309 8651 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8652 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8653 amdgpu_dm_psr_disable(acrtc_state->stream);
8654
bc7f670e 8655 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8656 bundle->surface_updates,
bc7f670e
DF
8657 planes_count,
8658 acrtc_state->stream,
efc8278e
AJ
8659 &bundle->stream_update,
8660 dc_state);
8c322309 8661
8fe684e9
NK
8662 /**
8663 * Enable or disable the interrupts on the backend.
8664 *
8665 * Most pipes are put into power gating when unused.
8666 *
8667 * When power gating is enabled on a pipe we lose the
8668 * interrupt enablement state when power gating is disabled.
8669 *
8670 * So we need to update the IRQ control state in hardware
8671 * whenever the pipe turns on (since it could be previously
8672 * power gated) or off (since some pipes can't be power gated
8673 * on some ASICs).
8674 */
8675 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8676 dm_update_pflip_irq_state(drm_to_adev(dev),
8677 acrtc_attach);
8fe684e9 8678
8c322309 8679 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8680 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8681 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8682 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8683 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8684 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8685 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8686 amdgpu_dm_psr_enable(acrtc_state->stream);
8687 }
8688
bc7f670e 8689 mutex_unlock(&dm->dc_lock);
e7b07cee 8690 }
4b510503 8691
8ad27806
NK
8692 /*
8693 * Update cursor state *after* programming all the planes.
8694 * This avoids redundant programming in the case where we're going
8695 * to be disabling a single plane - those pipes are being disabled.
8696 */
8697 if (acrtc_state->active_planes)
8698 amdgpu_dm_commit_cursors(state);
80c218d5 8699
4b510503 8700cleanup:
74aa7bd4 8701 kfree(bundle);
e7b07cee
HW
8702}
8703
6ce8f316
NK
8704static void amdgpu_dm_commit_audio(struct drm_device *dev,
8705 struct drm_atomic_state *state)
8706{
1348969a 8707 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8708 struct amdgpu_dm_connector *aconnector;
8709 struct drm_connector *connector;
8710 struct drm_connector_state *old_con_state, *new_con_state;
8711 struct drm_crtc_state *new_crtc_state;
8712 struct dm_crtc_state *new_dm_crtc_state;
8713 const struct dc_stream_status *status;
8714 int i, inst;
8715
8716 /* Notify device removals. */
8717 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8718 if (old_con_state->crtc != new_con_state->crtc) {
8719 /* CRTC changes require notification. */
8720 goto notify;
8721 }
8722
8723 if (!new_con_state->crtc)
8724 continue;
8725
8726 new_crtc_state = drm_atomic_get_new_crtc_state(
8727 state, new_con_state->crtc);
8728
8729 if (!new_crtc_state)
8730 continue;
8731
8732 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8733 continue;
8734
8735 notify:
8736 aconnector = to_amdgpu_dm_connector(connector);
8737
8738 mutex_lock(&adev->dm.audio_lock);
8739 inst = aconnector->audio_inst;
8740 aconnector->audio_inst = -1;
8741 mutex_unlock(&adev->dm.audio_lock);
8742
8743 amdgpu_dm_audio_eld_notify(adev, inst);
8744 }
8745
8746 /* Notify audio device additions. */
8747 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8748 if (!new_con_state->crtc)
8749 continue;
8750
8751 new_crtc_state = drm_atomic_get_new_crtc_state(
8752 state, new_con_state->crtc);
8753
8754 if (!new_crtc_state)
8755 continue;
8756
8757 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8758 continue;
8759
8760 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8761 if (!new_dm_crtc_state->stream)
8762 continue;
8763
8764 status = dc_stream_get_status(new_dm_crtc_state->stream);
8765 if (!status)
8766 continue;
8767
8768 aconnector = to_amdgpu_dm_connector(connector);
8769
8770 mutex_lock(&adev->dm.audio_lock);
8771 inst = status->audio_inst;
8772 aconnector->audio_inst = inst;
8773 mutex_unlock(&adev->dm.audio_lock);
8774
8775 amdgpu_dm_audio_eld_notify(adev, inst);
8776 }
8777}
8778
1f6010a9 8779/*
27b3f4fc
LSL
8780 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8781 * @crtc_state: the DRM CRTC state
8782 * @stream_state: the DC stream state.
8783 *
8784 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8785 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8786 */
8787static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8788 struct dc_stream_state *stream_state)
8789{
b9952f93 8790 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8791}
e7b07cee 8792
b8592b48
LL
8793/**
8794 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8795 * @state: The atomic state to commit
8796 *
8797 * This will tell DC to commit the constructed DC state from atomic_check,
8798 * programming the hardware. Any failures here implies a hardware failure, since
8799 * atomic check should have filtered anything non-kosher.
8800 */
7578ecda 8801static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8802{
8803 struct drm_device *dev = state->dev;
1348969a 8804 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8805 struct amdgpu_display_manager *dm = &adev->dm;
8806 struct dm_atomic_state *dm_state;
eb3dc897 8807 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8808 uint32_t i, j;
5cc6dcbd 8809 struct drm_crtc *crtc;
0bc9706d 8810 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8811 unsigned long flags;
8812 bool wait_for_vblank = true;
8813 struct drm_connector *connector;
c2cea706 8814 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8815 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8816 int crtc_disable_count = 0;
6ee90e88 8817 bool mode_set_reset_required = false;
e7b07cee 8818
e8a98235
RS
8819 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8820
e7b07cee
HW
8821 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8822
eb3dc897
NK
8823 dm_state = dm_atomic_get_new_state(state);
8824 if (dm_state && dm_state->context) {
8825 dc_state = dm_state->context;
8826 } else {
8827 /* No state changes, retain current state. */
813d20dc 8828 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8829 ASSERT(dc_state_temp);
8830 dc_state = dc_state_temp;
8831 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8832 }
e7b07cee 8833
6d90a208
AP
8834 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8835 new_crtc_state, i) {
8836 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8837
8838 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8839
8840 if (old_crtc_state->active &&
8841 (!new_crtc_state->active ||
8842 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8843 manage_dm_interrupts(adev, acrtc, false);
8844 dc_stream_release(dm_old_crtc_state->stream);
8845 }
8846 }
8847
8976f73b
RS
8848 drm_atomic_helper_calc_timestamping_constants(state);
8849
e7b07cee 8850 /* update changed items */
0bc9706d 8851 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8852 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8853
54d76575
LSL
8854 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8855 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8856
4711c033 8857 DRM_DEBUG_ATOMIC(
e7b07cee
HW
8858 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8859 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8860 "connectors_changed:%d\n",
8861 acrtc->crtc_id,
0bc9706d
LSL
8862 new_crtc_state->enable,
8863 new_crtc_state->active,
8864 new_crtc_state->planes_changed,
8865 new_crtc_state->mode_changed,
8866 new_crtc_state->active_changed,
8867 new_crtc_state->connectors_changed);
e7b07cee 8868
5c68c652
VL
8869 /* Disable cursor if disabling crtc */
8870 if (old_crtc_state->active && !new_crtc_state->active) {
8871 struct dc_cursor_position position;
8872
8873 memset(&position, 0, sizeof(position));
8874 mutex_lock(&dm->dc_lock);
8875 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8876 mutex_unlock(&dm->dc_lock);
8877 }
8878
27b3f4fc
LSL
8879 /* Copy all transient state flags into dc state */
8880 if (dm_new_crtc_state->stream) {
8881 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8882 dm_new_crtc_state->stream);
8883 }
8884
e7b07cee
HW
8885 /* handles headless hotplug case, updating new_state and
8886 * aconnector as needed
8887 */
8888
54d76575 8889 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8890
4711c033 8891 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8892
54d76575 8893 if (!dm_new_crtc_state->stream) {
e7b07cee 8894 /*
b830ebc9
HW
8895 * this could happen because of issues with
8896 * userspace notifications delivery.
8897 * In this case userspace tries to set mode on
1f6010a9
DF
8898 * display which is disconnected in fact.
8899 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8900 * We expect reset mode will come soon.
8901 *
8902 * This can also happen when unplug is done
8903 * during resume sequence ended
8904 *
8905 * In this case, we want to pretend we still
8906 * have a sink to keep the pipe running so that
8907 * hw state is consistent with the sw state
8908 */
f1ad2f5e 8909 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8910 __func__, acrtc->base.base.id);
8911 continue;
8912 }
8913
54d76575
LSL
8914 if (dm_old_crtc_state->stream)
8915 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8916
97028037
LP
8917 pm_runtime_get_noresume(dev->dev);
8918
e7b07cee 8919 acrtc->enabled = true;
0bc9706d
LSL
8920 acrtc->hw_mode = new_crtc_state->mode;
8921 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8922 mode_set_reset_required = true;
0bc9706d 8923 } else if (modereset_required(new_crtc_state)) {
4711c033 8924 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8925 /* i.e. reset mode */
6ee90e88 8926 if (dm_old_crtc_state->stream)
54d76575 8927 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8928
6ee90e88 8929 mode_set_reset_required = true;
e7b07cee
HW
8930 }
8931 } /* for_each_crtc_in_state() */
8932
eb3dc897 8933 if (dc_state) {
6ee90e88 8934 /* if there mode set or reset, disable eDP PSR */
8935 if (mode_set_reset_required)
8936 amdgpu_dm_psr_disable_all(dm);
8937
eb3dc897 8938 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8939 mutex_lock(&dm->dc_lock);
eb3dc897 8940 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
8941#if defined(CONFIG_DRM_AMD_DC_DCN)
8942 /* Allow idle optimization when vblank count is 0 for display off */
8943 if (dm->active_vblank_irq_count == 0)
8944 dc_allow_idle_optimizations(dm->dc,true);
8945#endif
674e78ac 8946 mutex_unlock(&dm->dc_lock);
fa2123db 8947 }
fe8858bb 8948
0bc9706d 8949 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8950 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8951
54d76575 8952 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8953
54d76575 8954 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8955 const struct dc_stream_status *status =
54d76575 8956 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8957
eb3dc897 8958 if (!status)
09f609c3
LL
8959 status = dc_stream_get_status_from_state(dc_state,
8960 dm_new_crtc_state->stream);
e7b07cee 8961 if (!status)
54d76575 8962 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8963 else
8964 acrtc->otg_inst = status->primary_otg_inst;
8965 }
8966 }
0c8620d6
BL
8967#ifdef CONFIG_DRM_AMD_DC_HDCP
8968 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8969 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8970 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8971 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8972
8973 new_crtc_state = NULL;
8974
8975 if (acrtc)
8976 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8977
8978 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8979
8980 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8981 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8982 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8983 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8984 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8985 continue;
8986 }
8987
8988 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8989 hdcp_update_display(
8990 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8991 new_con_state->hdcp_content_type,
0e86d3d4 8992 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
8993 }
8994#endif
e7b07cee 8995
02d6a6fc 8996 /* Handle connector state changes */
c2cea706 8997 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8998 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8999 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9000 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9001 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9002 struct dc_stream_update stream_update;
b232d4ed 9003 struct dc_info_packet hdr_packet;
e7b07cee 9004 struct dc_stream_status *status = NULL;
b232d4ed 9005 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9006
efc8278e 9007 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9008 memset(&stream_update, 0, sizeof(stream_update));
9009
44d09c6a 9010 if (acrtc) {
0bc9706d 9011 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9012 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9013 }
0bc9706d 9014
e7b07cee 9015 /* Skip any modesets/resets */
0bc9706d 9016 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9017 continue;
9018
54d76575 9019 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9020 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9021
b232d4ed
NK
9022 scaling_changed = is_scaling_state_different(dm_new_con_state,
9023 dm_old_con_state);
9024
9025 abm_changed = dm_new_crtc_state->abm_level !=
9026 dm_old_crtc_state->abm_level;
9027
9028 hdr_changed =
72921cdf 9029 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9030
9031 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9032 continue;
e7b07cee 9033
b6e881c9 9034 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9035 if (scaling_changed) {
02d6a6fc 9036 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9037 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9038
02d6a6fc
DF
9039 stream_update.src = dm_new_crtc_state->stream->src;
9040 stream_update.dst = dm_new_crtc_state->stream->dst;
9041 }
9042
b232d4ed 9043 if (abm_changed) {
02d6a6fc
DF
9044 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9045
9046 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9047 }
70e8ffc5 9048
b232d4ed
NK
9049 if (hdr_changed) {
9050 fill_hdr_info_packet(new_con_state, &hdr_packet);
9051 stream_update.hdr_static_metadata = &hdr_packet;
9052 }
9053
54d76575 9054 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9055
9056 if (WARN_ON(!status))
9057 continue;
9058
3be5262e 9059 WARN_ON(!status->plane_count);
e7b07cee 9060
02d6a6fc
DF
9061 /*
9062 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9063 * Here we create an empty update on each plane.
9064 * To fix this, DC should permit updating only stream properties.
9065 */
9066 for (j = 0; j < status->plane_count; j++)
efc8278e 9067 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9068
9069
9070 mutex_lock(&dm->dc_lock);
9071 dc_commit_updates_for_stream(dm->dc,
efc8278e 9072 dummy_updates,
02d6a6fc
DF
9073 status->plane_count,
9074 dm_new_crtc_state->stream,
efc8278e
AJ
9075 &stream_update,
9076 dc_state);
02d6a6fc 9077 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9078 }
9079
b5e83f6f 9080 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9081 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9082 new_crtc_state, i) {
fe2a1965
LP
9083 if (old_crtc_state->active && !new_crtc_state->active)
9084 crtc_disable_count++;
9085
54d76575 9086 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9087 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9088
585d450c
AP
9089 /* For freesync config update on crtc state and params for irq */
9090 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9091
66b0c973
MK
9092 /* Handle vrr on->off / off->on transitions */
9093 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9094 dm_new_crtc_state);
e7b07cee
HW
9095 }
9096
8fe684e9
NK
9097 /**
9098 * Enable interrupts for CRTCs that are newly enabled or went through
9099 * a modeset. It was intentionally deferred until after the front end
9100 * state was modified to wait until the OTG was on and so the IRQ
9101 * handlers didn't access stale or invalid state.
9102 */
9103 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9104 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9105#ifdef CONFIG_DEBUG_FS
86bc2219 9106 bool configure_crc = false;
8e7b6fee 9107 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9108#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9109 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9110#endif
9111 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9112 cur_crc_src = acrtc->dm_irq_params.crc_src;
9113 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9114#endif
585d450c
AP
9115 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9116
8fe684e9
NK
9117 if (new_crtc_state->active &&
9118 (!old_crtc_state->active ||
9119 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9120 dc_stream_retain(dm_new_crtc_state->stream);
9121 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9122 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9123
24eb9374 9124#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9125 /**
9126 * Frontend may have changed so reapply the CRC capture
9127 * settings for the stream.
9128 */
9129 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9130
8e7b6fee 9131 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9132 configure_crc = true;
9133#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9134 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9135 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9136 acrtc->dm_irq_params.crc_window.update_win = true;
9137 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9138 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9139 crc_rd_wrk->crtc = crtc;
9140 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9141 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9142 }
86bc2219 9143#endif
e2881d6d 9144 }
c920888c 9145
86bc2219 9146 if (configure_crc)
bbc49fc0
WL
9147 if (amdgpu_dm_crtc_configure_crc_source(
9148 crtc, dm_new_crtc_state, cur_crc_src))
9149 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9150#endif
8fe684e9
NK
9151 }
9152 }
e7b07cee 9153
420cd472 9154 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9155 if (new_crtc_state->async_flip)
420cd472
DF
9156 wait_for_vblank = false;
9157
e7b07cee 9158 /* update planes when needed per crtc*/
5cc6dcbd 9159 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9160 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9161
54d76575 9162 if (dm_new_crtc_state->stream)
eb3dc897 9163 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9164 dm, crtc, wait_for_vblank);
e7b07cee
HW
9165 }
9166
6ce8f316
NK
9167 /* Update audio instances for each connector. */
9168 amdgpu_dm_commit_audio(dev, state);
9169
7230362c
AD
9170#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9171 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9172 /* restore the backlight level */
9173 if (dm->backlight_dev)
9174 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9175#endif
e7b07cee
HW
9176 /*
9177 * send vblank event on all events not handled in flip and
9178 * mark consumed event for drm_atomic_helper_commit_hw_done
9179 */
4a580877 9180 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9181 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9182
0bc9706d
LSL
9183 if (new_crtc_state->event)
9184 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9185
0bc9706d 9186 new_crtc_state->event = NULL;
e7b07cee 9187 }
4a580877 9188 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9189
29c8f234
LL
9190 /* Signal HW programming completion */
9191 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9192
9193 if (wait_for_vblank)
320a1274 9194 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9195
9196 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9197
5f6fab24
AD
9198 /* return the stolen vga memory back to VRAM */
9199 if (!adev->mman.keep_stolen_vga_memory)
9200 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9201 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9202
1f6010a9
DF
9203 /*
9204 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9205 * so we can put the GPU into runtime suspend if we're not driving any
9206 * displays anymore
9207 */
fe2a1965
LP
9208 for (i = 0; i < crtc_disable_count; i++)
9209 pm_runtime_put_autosuspend(dev->dev);
97028037 9210 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9211
9212 if (dc_state_temp)
9213 dc_release_state(dc_state_temp);
e7b07cee
HW
9214}
9215
9216
9217static int dm_force_atomic_commit(struct drm_connector *connector)
9218{
9219 int ret = 0;
9220 struct drm_device *ddev = connector->dev;
9221 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9222 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9223 struct drm_plane *plane = disconnected_acrtc->base.primary;
9224 struct drm_connector_state *conn_state;
9225 struct drm_crtc_state *crtc_state;
9226 struct drm_plane_state *plane_state;
9227
9228 if (!state)
9229 return -ENOMEM;
9230
9231 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9232
9233 /* Construct an atomic state to restore previous display setting */
9234
9235 /*
9236 * Attach connectors to drm_atomic_state
9237 */
9238 conn_state = drm_atomic_get_connector_state(state, connector);
9239
9240 ret = PTR_ERR_OR_ZERO(conn_state);
9241 if (ret)
2dc39051 9242 goto out;
e7b07cee
HW
9243
9244 /* Attach crtc to drm_atomic_state*/
9245 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9246
9247 ret = PTR_ERR_OR_ZERO(crtc_state);
9248 if (ret)
2dc39051 9249 goto out;
e7b07cee
HW
9250
9251 /* force a restore */
9252 crtc_state->mode_changed = true;
9253
9254 /* Attach plane to drm_atomic_state */
9255 plane_state = drm_atomic_get_plane_state(state, plane);
9256
9257 ret = PTR_ERR_OR_ZERO(plane_state);
9258 if (ret)
2dc39051 9259 goto out;
e7b07cee
HW
9260
9261 /* Call commit internally with the state we just constructed */
9262 ret = drm_atomic_commit(state);
e7b07cee 9263
2dc39051 9264out:
e7b07cee 9265 drm_atomic_state_put(state);
2dc39051
VL
9266 if (ret)
9267 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9268
9269 return ret;
9270}
9271
9272/*
1f6010a9
DF
9273 * This function handles all cases when set mode does not come upon hotplug.
9274 * This includes when a display is unplugged then plugged back into the
9275 * same port and when running without usermode desktop manager supprot
e7b07cee 9276 */
3ee6b26b
AD
9277void dm_restore_drm_connector_state(struct drm_device *dev,
9278 struct drm_connector *connector)
e7b07cee 9279{
c84dec2f 9280 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9281 struct amdgpu_crtc *disconnected_acrtc;
9282 struct dm_crtc_state *acrtc_state;
9283
9284 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9285 return;
9286
9287 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9288 if (!disconnected_acrtc)
9289 return;
e7b07cee 9290
70e8ffc5
HW
9291 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9292 if (!acrtc_state->stream)
e7b07cee
HW
9293 return;
9294
9295 /*
9296 * If the previous sink is not released and different from the current,
9297 * we deduce we are in a state where we can not rely on usermode call
9298 * to turn on the display, so we do it here
9299 */
9300 if (acrtc_state->stream->sink != aconnector->dc_sink)
9301 dm_force_atomic_commit(&aconnector->base);
9302}
9303
1f6010a9 9304/*
e7b07cee
HW
9305 * Grabs all modesetting locks to serialize against any blocking commits,
9306 * Waits for completion of all non blocking commits.
9307 */
3ee6b26b
AD
9308static int do_aquire_global_lock(struct drm_device *dev,
9309 struct drm_atomic_state *state)
e7b07cee
HW
9310{
9311 struct drm_crtc *crtc;
9312 struct drm_crtc_commit *commit;
9313 long ret;
9314
1f6010a9
DF
9315 /*
9316 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9317 * ensure that when the framework release it the
9318 * extra locks we are locking here will get released to
9319 */
9320 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9321 if (ret)
9322 return ret;
9323
9324 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9325 spin_lock(&crtc->commit_lock);
9326 commit = list_first_entry_or_null(&crtc->commit_list,
9327 struct drm_crtc_commit, commit_entry);
9328 if (commit)
9329 drm_crtc_commit_get(commit);
9330 spin_unlock(&crtc->commit_lock);
9331
9332 if (!commit)
9333 continue;
9334
1f6010a9
DF
9335 /*
9336 * Make sure all pending HW programming completed and
e7b07cee
HW
9337 * page flips done
9338 */
9339 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9340
9341 if (ret > 0)
9342 ret = wait_for_completion_interruptible_timeout(
9343 &commit->flip_done, 10*HZ);
9344
9345 if (ret == 0)
9346 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9347 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9348
9349 drm_crtc_commit_put(commit);
9350 }
9351
9352 return ret < 0 ? ret : 0;
9353}
9354
bb47de73
NK
9355static void get_freesync_config_for_crtc(
9356 struct dm_crtc_state *new_crtc_state,
9357 struct dm_connector_state *new_con_state)
98e6436d
AK
9358{
9359 struct mod_freesync_config config = {0};
98e6436d
AK
9360 struct amdgpu_dm_connector *aconnector =
9361 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9362 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9363 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9364 bool fs_vid_mode = false;
98e6436d 9365
a057ec46 9366 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9367 vrefresh >= aconnector->min_vfreq &&
9368 vrefresh <= aconnector->max_vfreq;
bb47de73 9369
a057ec46
IB
9370 if (new_crtc_state->vrr_supported) {
9371 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9372 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9373
9374 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9375 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9376 config.vsif_supported = true;
180db303 9377 config.btr = true;
98e6436d 9378
a85ba005
NC
9379 if (fs_vid_mode) {
9380 config.state = VRR_STATE_ACTIVE_FIXED;
9381 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9382 goto out;
9383 } else if (new_crtc_state->base.vrr_enabled) {
9384 config.state = VRR_STATE_ACTIVE_VARIABLE;
9385 } else {
9386 config.state = VRR_STATE_INACTIVE;
9387 }
9388 }
9389out:
bb47de73
NK
9390 new_crtc_state->freesync_config = config;
9391}
98e6436d 9392
bb47de73
NK
9393static void reset_freesync_config_for_crtc(
9394 struct dm_crtc_state *new_crtc_state)
9395{
9396 new_crtc_state->vrr_supported = false;
98e6436d 9397
bb47de73
NK
9398 memset(&new_crtc_state->vrr_infopacket, 0,
9399 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9400}
9401
a85ba005
NC
9402static bool
9403is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9404 struct drm_crtc_state *new_crtc_state)
9405{
9406 struct drm_display_mode old_mode, new_mode;
9407
9408 if (!old_crtc_state || !new_crtc_state)
9409 return false;
9410
9411 old_mode = old_crtc_state->mode;
9412 new_mode = new_crtc_state->mode;
9413
9414 if (old_mode.clock == new_mode.clock &&
9415 old_mode.hdisplay == new_mode.hdisplay &&
9416 old_mode.vdisplay == new_mode.vdisplay &&
9417 old_mode.htotal == new_mode.htotal &&
9418 old_mode.vtotal != new_mode.vtotal &&
9419 old_mode.hsync_start == new_mode.hsync_start &&
9420 old_mode.vsync_start != new_mode.vsync_start &&
9421 old_mode.hsync_end == new_mode.hsync_end &&
9422 old_mode.vsync_end != new_mode.vsync_end &&
9423 old_mode.hskew == new_mode.hskew &&
9424 old_mode.vscan == new_mode.vscan &&
9425 (old_mode.vsync_end - old_mode.vsync_start) ==
9426 (new_mode.vsync_end - new_mode.vsync_start))
9427 return true;
9428
9429 return false;
9430}
9431
9432static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9433 uint64_t num, den, res;
9434 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9435
9436 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9437
9438 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9439 den = (unsigned long long)new_crtc_state->mode.htotal *
9440 (unsigned long long)new_crtc_state->mode.vtotal;
9441
9442 res = div_u64(num, den);
9443 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9444}
9445
4b9674e5
LL
9446static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9447 struct drm_atomic_state *state,
9448 struct drm_crtc *crtc,
9449 struct drm_crtc_state *old_crtc_state,
9450 struct drm_crtc_state *new_crtc_state,
9451 bool enable,
9452 bool *lock_and_validation_needed)
e7b07cee 9453{
eb3dc897 9454 struct dm_atomic_state *dm_state = NULL;
54d76575 9455 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9456 struct dc_stream_state *new_stream;
62f55537 9457 int ret = 0;
d4d4a645 9458
1f6010a9
DF
9459 /*
9460 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9461 * update changed items
9462 */
4b9674e5
LL
9463 struct amdgpu_crtc *acrtc = NULL;
9464 struct amdgpu_dm_connector *aconnector = NULL;
9465 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9466 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9467
4b9674e5 9468 new_stream = NULL;
9635b754 9469
4b9674e5
LL
9470 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9471 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9472 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9473 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9474
4b9674e5
LL
9475 /* TODO This hack should go away */
9476 if (aconnector && enable) {
9477 /* Make sure fake sink is created in plug-in scenario */
9478 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9479 &aconnector->base);
9480 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9481 &aconnector->base);
19f89e23 9482
4b9674e5
LL
9483 if (IS_ERR(drm_new_conn_state)) {
9484 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9485 goto fail;
9486 }
19f89e23 9487
4b9674e5
LL
9488 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9489 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9490
02d35a67
JFZ
9491 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9492 goto skip_modeset;
9493
cbd14ae7
SW
9494 new_stream = create_validate_stream_for_sink(aconnector,
9495 &new_crtc_state->mode,
9496 dm_new_conn_state,
9497 dm_old_crtc_state->stream);
19f89e23 9498
4b9674e5
LL
9499 /*
9500 * we can have no stream on ACTION_SET if a display
9501 * was disconnected during S3, in this case it is not an
9502 * error, the OS will be updated after detection, and
9503 * will do the right thing on next atomic commit
9504 */
19f89e23 9505
4b9674e5
LL
9506 if (!new_stream) {
9507 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9508 __func__, acrtc->base.base.id);
9509 ret = -ENOMEM;
9510 goto fail;
9511 }
e7b07cee 9512
3d4e52d0
VL
9513 /*
9514 * TODO: Check VSDB bits to decide whether this should
9515 * be enabled or not.
9516 */
9517 new_stream->triggered_crtc_reset.enabled =
9518 dm->force_timing_sync;
9519
4b9674e5 9520 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9521
88694af9
NK
9522 ret = fill_hdr_info_packet(drm_new_conn_state,
9523 &new_stream->hdr_static_metadata);
9524 if (ret)
9525 goto fail;
9526
7e930949
NK
9527 /*
9528 * If we already removed the old stream from the context
9529 * (and set the new stream to NULL) then we can't reuse
9530 * the old stream even if the stream and scaling are unchanged.
9531 * We'll hit the BUG_ON and black screen.
9532 *
9533 * TODO: Refactor this function to allow this check to work
9534 * in all conditions.
9535 */
a85ba005
NC
9536 if (amdgpu_freesync_vid_mode &&
9537 dm_new_crtc_state->stream &&
9538 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9539 goto skip_modeset;
9540
7e930949
NK
9541 if (dm_new_crtc_state->stream &&
9542 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9543 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9544 new_crtc_state->mode_changed = false;
9545 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9546 new_crtc_state->mode_changed);
62f55537 9547 }
4b9674e5 9548 }
b830ebc9 9549
02d35a67 9550 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9551 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9552 goto skip_modeset;
e7b07cee 9553
4711c033 9554 DRM_DEBUG_ATOMIC(
4b9674e5
LL
9555 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9556 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9557 "connectors_changed:%d\n",
9558 acrtc->crtc_id,
9559 new_crtc_state->enable,
9560 new_crtc_state->active,
9561 new_crtc_state->planes_changed,
9562 new_crtc_state->mode_changed,
9563 new_crtc_state->active_changed,
9564 new_crtc_state->connectors_changed);
62f55537 9565
4b9674e5
LL
9566 /* Remove stream for any changed/disabled CRTC */
9567 if (!enable) {
62f55537 9568
4b9674e5
LL
9569 if (!dm_old_crtc_state->stream)
9570 goto skip_modeset;
eb3dc897 9571
a85ba005
NC
9572 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9573 is_timing_unchanged_for_freesync(new_crtc_state,
9574 old_crtc_state)) {
9575 new_crtc_state->mode_changed = false;
9576 DRM_DEBUG_DRIVER(
9577 "Mode change not required for front porch change, "
9578 "setting mode_changed to %d",
9579 new_crtc_state->mode_changed);
9580
9581 set_freesync_fixed_config(dm_new_crtc_state);
9582
9583 goto skip_modeset;
9584 } else if (amdgpu_freesync_vid_mode && aconnector &&
9585 is_freesync_video_mode(&new_crtc_state->mode,
9586 aconnector)) {
9587 set_freesync_fixed_config(dm_new_crtc_state);
9588 }
9589
4b9674e5
LL
9590 ret = dm_atomic_get_state(state, &dm_state);
9591 if (ret)
9592 goto fail;
e7b07cee 9593
4b9674e5
LL
9594 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9595 crtc->base.id);
62f55537 9596
4b9674e5
LL
9597 /* i.e. reset mode */
9598 if (dc_remove_stream_from_ctx(
9599 dm->dc,
9600 dm_state->context,
9601 dm_old_crtc_state->stream) != DC_OK) {
9602 ret = -EINVAL;
9603 goto fail;
9604 }
62f55537 9605
4b9674e5
LL
9606 dc_stream_release(dm_old_crtc_state->stream);
9607 dm_new_crtc_state->stream = NULL;
bb47de73 9608
4b9674e5 9609 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9610
4b9674e5 9611 *lock_and_validation_needed = true;
62f55537 9612
4b9674e5
LL
9613 } else {/* Add stream for any updated/enabled CRTC */
9614 /*
9615 * Quick fix to prevent NULL pointer on new_stream when
9616 * added MST connectors not found in existing crtc_state in the chained mode
9617 * TODO: need to dig out the root cause of that
9618 */
9619 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9620 goto skip_modeset;
62f55537 9621
4b9674e5
LL
9622 if (modereset_required(new_crtc_state))
9623 goto skip_modeset;
62f55537 9624
4b9674e5
LL
9625 if (modeset_required(new_crtc_state, new_stream,
9626 dm_old_crtc_state->stream)) {
62f55537 9627
4b9674e5 9628 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9629
4b9674e5
LL
9630 ret = dm_atomic_get_state(state, &dm_state);
9631 if (ret)
9632 goto fail;
27b3f4fc 9633
4b9674e5 9634 dm_new_crtc_state->stream = new_stream;
62f55537 9635
4b9674e5 9636 dc_stream_retain(new_stream);
1dc90497 9637
4711c033
LT
9638 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9639 crtc->base.id);
1dc90497 9640
4b9674e5
LL
9641 if (dc_add_stream_to_ctx(
9642 dm->dc,
9643 dm_state->context,
9644 dm_new_crtc_state->stream) != DC_OK) {
9645 ret = -EINVAL;
9646 goto fail;
9b690ef3
BL
9647 }
9648
4b9674e5
LL
9649 *lock_and_validation_needed = true;
9650 }
9651 }
e277adc5 9652
4b9674e5
LL
9653skip_modeset:
9654 /* Release extra reference */
9655 if (new_stream)
9656 dc_stream_release(new_stream);
e277adc5 9657
4b9674e5
LL
9658 /*
9659 * We want to do dc stream updates that do not require a
9660 * full modeset below.
9661 */
2afda735 9662 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9663 return 0;
9664 /*
9665 * Given above conditions, the dc state cannot be NULL because:
9666 * 1. We're in the process of enabling CRTCs (just been added
9667 * to the dc context, or already is on the context)
9668 * 2. Has a valid connector attached, and
9669 * 3. Is currently active and enabled.
9670 * => The dc stream state currently exists.
9671 */
9672 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9673
4b9674e5 9674 /* Scaling or underscan settings */
c521fc31
RL
9675 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9676 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
9677 update_stream_scaling_settings(
9678 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9679
b05e2c5e
DF
9680 /* ABM settings */
9681 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9682
4b9674e5
LL
9683 /*
9684 * Color management settings. We also update color properties
9685 * when a modeset is needed, to ensure it gets reprogrammed.
9686 */
9687 if (dm_new_crtc_state->base.color_mgmt_changed ||
9688 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9689 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9690 if (ret)
9691 goto fail;
62f55537 9692 }
e7b07cee 9693
4b9674e5
LL
9694 /* Update Freesync settings. */
9695 get_freesync_config_for_crtc(dm_new_crtc_state,
9696 dm_new_conn_state);
9697
62f55537 9698 return ret;
9635b754
DS
9699
9700fail:
9701 if (new_stream)
9702 dc_stream_release(new_stream);
9703 return ret;
62f55537 9704}
9b690ef3 9705
f6ff2a08
NK
9706static bool should_reset_plane(struct drm_atomic_state *state,
9707 struct drm_plane *plane,
9708 struct drm_plane_state *old_plane_state,
9709 struct drm_plane_state *new_plane_state)
9710{
9711 struct drm_plane *other;
9712 struct drm_plane_state *old_other_state, *new_other_state;
9713 struct drm_crtc_state *new_crtc_state;
9714 int i;
9715
70a1efac
NK
9716 /*
9717 * TODO: Remove this hack once the checks below are sufficient
9718 * enough to determine when we need to reset all the planes on
9719 * the stream.
9720 */
9721 if (state->allow_modeset)
9722 return true;
9723
f6ff2a08
NK
9724 /* Exit early if we know that we're adding or removing the plane. */
9725 if (old_plane_state->crtc != new_plane_state->crtc)
9726 return true;
9727
9728 /* old crtc == new_crtc == NULL, plane not in context. */
9729 if (!new_plane_state->crtc)
9730 return false;
9731
9732 new_crtc_state =
9733 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9734
9735 if (!new_crtc_state)
9736 return true;
9737
7316c4ad
NK
9738 /* CRTC Degamma changes currently require us to recreate planes. */
9739 if (new_crtc_state->color_mgmt_changed)
9740 return true;
9741
f6ff2a08
NK
9742 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9743 return true;
9744
9745 /*
9746 * If there are any new primary or overlay planes being added or
9747 * removed then the z-order can potentially change. To ensure
9748 * correct z-order and pipe acquisition the current DC architecture
9749 * requires us to remove and recreate all existing planes.
9750 *
9751 * TODO: Come up with a more elegant solution for this.
9752 */
9753 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9754 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9755 if (other->type == DRM_PLANE_TYPE_CURSOR)
9756 continue;
9757
9758 if (old_other_state->crtc != new_plane_state->crtc &&
9759 new_other_state->crtc != new_plane_state->crtc)
9760 continue;
9761
9762 if (old_other_state->crtc != new_other_state->crtc)
9763 return true;
9764
dc4cb30d
NK
9765 /* Src/dst size and scaling updates. */
9766 if (old_other_state->src_w != new_other_state->src_w ||
9767 old_other_state->src_h != new_other_state->src_h ||
9768 old_other_state->crtc_w != new_other_state->crtc_w ||
9769 old_other_state->crtc_h != new_other_state->crtc_h)
9770 return true;
9771
9772 /* Rotation / mirroring updates. */
9773 if (old_other_state->rotation != new_other_state->rotation)
9774 return true;
9775
9776 /* Blending updates. */
9777 if (old_other_state->pixel_blend_mode !=
9778 new_other_state->pixel_blend_mode)
9779 return true;
9780
9781 /* Alpha updates. */
9782 if (old_other_state->alpha != new_other_state->alpha)
9783 return true;
9784
9785 /* Colorspace changes. */
9786 if (old_other_state->color_range != new_other_state->color_range ||
9787 old_other_state->color_encoding != new_other_state->color_encoding)
9788 return true;
9789
9a81cc60
NK
9790 /* Framebuffer checks fall at the end. */
9791 if (!old_other_state->fb || !new_other_state->fb)
9792 continue;
9793
9794 /* Pixel format changes can require bandwidth updates. */
9795 if (old_other_state->fb->format != new_other_state->fb->format)
9796 return true;
9797
6eed95b0
BN
9798 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9799 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9800
9801 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9802 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9803 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9804 return true;
9805 }
9806
9807 return false;
9808}
9809
b0455fda
SS
9810static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9811 struct drm_plane_state *new_plane_state,
9812 struct drm_framebuffer *fb)
9813{
e72868c4
SS
9814 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9815 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9816 unsigned int pitch;
e72868c4 9817 bool linear;
b0455fda
SS
9818
9819 if (fb->width > new_acrtc->max_cursor_width ||
9820 fb->height > new_acrtc->max_cursor_height) {
9821 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9822 new_plane_state->fb->width,
9823 new_plane_state->fb->height);
9824 return -EINVAL;
9825 }
9826 if (new_plane_state->src_w != fb->width << 16 ||
9827 new_plane_state->src_h != fb->height << 16) {
9828 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9829 return -EINVAL;
9830 }
9831
9832 /* Pitch in pixels */
9833 pitch = fb->pitches[0] / fb->format->cpp[0];
9834
9835 if (fb->width != pitch) {
9836 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9837 fb->width, pitch);
9838 return -EINVAL;
9839 }
9840
9841 switch (pitch) {
9842 case 64:
9843 case 128:
9844 case 256:
9845 /* FB pitch is supported by cursor plane */
9846 break;
9847 default:
9848 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9849 return -EINVAL;
9850 }
9851
e72868c4
SS
9852 /* Core DRM takes care of checking FB modifiers, so we only need to
9853 * check tiling flags when the FB doesn't have a modifier. */
9854 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9855 if (adev->family < AMDGPU_FAMILY_AI) {
9856 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9857 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9858 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9859 } else {
9860 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9861 }
9862 if (!linear) {
9863 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9864 return -EINVAL;
9865 }
9866 }
9867
b0455fda
SS
9868 return 0;
9869}
9870
9e869063
LL
9871static int dm_update_plane_state(struct dc *dc,
9872 struct drm_atomic_state *state,
9873 struct drm_plane *plane,
9874 struct drm_plane_state *old_plane_state,
9875 struct drm_plane_state *new_plane_state,
9876 bool enable,
9877 bool *lock_and_validation_needed)
62f55537 9878{
eb3dc897
NK
9879
9880 struct dm_atomic_state *dm_state = NULL;
62f55537 9881 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9882 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9883 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9884 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9885 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9886 bool needs_reset;
62f55537 9887 int ret = 0;
e7b07cee 9888
9b690ef3 9889
9e869063
LL
9890 new_plane_crtc = new_plane_state->crtc;
9891 old_plane_crtc = old_plane_state->crtc;
9892 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9893 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9894
626bf90f
SS
9895 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9896 if (!enable || !new_plane_crtc ||
9897 drm_atomic_plane_disabling(plane->state, new_plane_state))
9898 return 0;
9899
9900 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9901
5f581248
SS
9902 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9903 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9904 return -EINVAL;
9905 }
9906
24f99d2b 9907 if (new_plane_state->fb) {
b0455fda
SS
9908 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9909 new_plane_state->fb);
9910 if (ret)
9911 return ret;
24f99d2b
SS
9912 }
9913
9e869063 9914 return 0;
626bf90f 9915 }
9b690ef3 9916
f6ff2a08
NK
9917 needs_reset = should_reset_plane(state, plane, old_plane_state,
9918 new_plane_state);
9919
9e869063
LL
9920 /* Remove any changed/removed planes */
9921 if (!enable) {
f6ff2a08 9922 if (!needs_reset)
9e869063 9923 return 0;
a7b06724 9924
9e869063
LL
9925 if (!old_plane_crtc)
9926 return 0;
62f55537 9927
9e869063
LL
9928 old_crtc_state = drm_atomic_get_old_crtc_state(
9929 state, old_plane_crtc);
9930 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9931
9e869063
LL
9932 if (!dm_old_crtc_state->stream)
9933 return 0;
62f55537 9934
9e869063
LL
9935 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9936 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9937
9e869063
LL
9938 ret = dm_atomic_get_state(state, &dm_state);
9939 if (ret)
9940 return ret;
eb3dc897 9941
9e869063
LL
9942 if (!dc_remove_plane_from_context(
9943 dc,
9944 dm_old_crtc_state->stream,
9945 dm_old_plane_state->dc_state,
9946 dm_state->context)) {
62f55537 9947
c3537613 9948 return -EINVAL;
9e869063 9949 }
e7b07cee 9950
9b690ef3 9951
9e869063
LL
9952 dc_plane_state_release(dm_old_plane_state->dc_state);
9953 dm_new_plane_state->dc_state = NULL;
1dc90497 9954
9e869063 9955 *lock_and_validation_needed = true;
1dc90497 9956
9e869063
LL
9957 } else { /* Add new planes */
9958 struct dc_plane_state *dc_new_plane_state;
1dc90497 9959
9e869063
LL
9960 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9961 return 0;
e7b07cee 9962
9e869063
LL
9963 if (!new_plane_crtc)
9964 return 0;
e7b07cee 9965
9e869063
LL
9966 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9967 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9968
9e869063
LL
9969 if (!dm_new_crtc_state->stream)
9970 return 0;
62f55537 9971
f6ff2a08 9972 if (!needs_reset)
9e869063 9973 return 0;
62f55537 9974
8c44515b
AP
9975 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9976 if (ret)
9977 return ret;
9978
9e869063 9979 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9980
9e869063
LL
9981 dc_new_plane_state = dc_create_plane_state(dc);
9982 if (!dc_new_plane_state)
9983 return -ENOMEM;
62f55537 9984
4711c033
LT
9985 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9986 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9987
695af5f9 9988 ret = fill_dc_plane_attributes(
1348969a 9989 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9990 dc_new_plane_state,
9991 new_plane_state,
9992 new_crtc_state);
9993 if (ret) {
9994 dc_plane_state_release(dc_new_plane_state);
9995 return ret;
9996 }
62f55537 9997
9e869063
LL
9998 ret = dm_atomic_get_state(state, &dm_state);
9999 if (ret) {
10000 dc_plane_state_release(dc_new_plane_state);
10001 return ret;
10002 }
eb3dc897 10003
9e869063
LL
10004 /*
10005 * Any atomic check errors that occur after this will
10006 * not need a release. The plane state will be attached
10007 * to the stream, and therefore part of the atomic
10008 * state. It'll be released when the atomic state is
10009 * cleaned.
10010 */
10011 if (!dc_add_plane_to_context(
10012 dc,
10013 dm_new_crtc_state->stream,
10014 dc_new_plane_state,
10015 dm_state->context)) {
62f55537 10016
9e869063
LL
10017 dc_plane_state_release(dc_new_plane_state);
10018 return -EINVAL;
10019 }
8c45c5db 10020
9e869063 10021 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10022
9e869063
LL
10023 /* Tell DC to do a full surface update every time there
10024 * is a plane change. Inefficient, but works for now.
10025 */
10026 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10027
10028 *lock_and_validation_needed = true;
62f55537 10029 }
e7b07cee
HW
10030
10031
62f55537
AG
10032 return ret;
10033}
a87fa993 10034
12f4849a
SS
10035static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10036 struct drm_crtc *crtc,
10037 struct drm_crtc_state *new_crtc_state)
10038{
10039 struct drm_plane_state *new_cursor_state, *new_primary_state;
10040 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10041
10042 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10043 * cursor per pipe but it's going to inherit the scaling and
10044 * positioning from the underlying pipe. Check the cursor plane's
10045 * blending properties match the primary plane's. */
10046
10047 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10048 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
10049 if (!new_cursor_state || !new_primary_state ||
10050 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
10051 return 0;
10052 }
10053
10054 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10055 (new_cursor_state->src_w >> 16);
10056 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10057 (new_cursor_state->src_h >> 16);
10058
10059 primary_scale_w = new_primary_state->crtc_w * 1000 /
10060 (new_primary_state->src_w >> 16);
10061 primary_scale_h = new_primary_state->crtc_h * 1000 /
10062 (new_primary_state->src_h >> 16);
10063
10064 if (cursor_scale_w != primary_scale_w ||
10065 cursor_scale_h != primary_scale_h) {
8333388b 10066 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
12f4849a
SS
10067 return -EINVAL;
10068 }
10069
10070 return 0;
10071}
10072
e10517b3 10073#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10074static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10075{
10076 struct drm_connector *connector;
10077 struct drm_connector_state *conn_state;
10078 struct amdgpu_dm_connector *aconnector = NULL;
10079 int i;
10080 for_each_new_connector_in_state(state, connector, conn_state, i) {
10081 if (conn_state->crtc != crtc)
10082 continue;
10083
10084 aconnector = to_amdgpu_dm_connector(connector);
10085 if (!aconnector->port || !aconnector->mst_port)
10086 aconnector = NULL;
10087 else
10088 break;
10089 }
10090
10091 if (!aconnector)
10092 return 0;
10093
10094 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10095}
e10517b3 10096#endif
44be939f 10097
16e9b3e5
RS
10098static int validate_overlay(struct drm_atomic_state *state)
10099{
10100 int i;
10101 struct drm_plane *plane;
a6c3c37b 10102 struct drm_plane_state *new_plane_state;
e8ce3d47 10103 struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
16e9b3e5
RS
10104
10105 /* Check if primary plane is contained inside overlay */
a6c3c37b 10106 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
16e9b3e5
RS
10107 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10108 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10109 return 0;
10110
10111 overlay_state = new_plane_state;
10112 continue;
10113 }
10114 }
10115
10116 /* check if we're making changes to the overlay plane */
10117 if (!overlay_state)
10118 return 0;
10119
10120 /* check if overlay plane is enabled */
10121 if (!overlay_state->crtc)
10122 return 0;
10123
10124 /* find the primary plane for the CRTC that the overlay is enabled on */
10125 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10126 if (IS_ERR(primary_state))
10127 return PTR_ERR(primary_state);
10128
10129 /* check if primary plane is enabled */
10130 if (!primary_state->crtc)
10131 return 0;
10132
e8ce3d47
RS
10133 /* check if cursor plane is enabled */
10134 cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10135 if (IS_ERR(cursor_state))
10136 return PTR_ERR(cursor_state);
10137
10138 if (drm_atomic_plane_disabling(plane->state, cursor_state))
10139 return 0;
10140
16e9b3e5
RS
10141 /* Perform the bounds check to ensure the overlay plane covers the primary */
10142 if (primary_state->crtc_x < overlay_state->crtc_x ||
10143 primary_state->crtc_y < overlay_state->crtc_y ||
10144 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10145 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10146 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10147 return -EINVAL;
10148 }
10149
10150 return 0;
10151}
10152
b8592b48
LL
10153/**
10154 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10155 * @dev: The DRM device
10156 * @state: The atomic state to commit
10157 *
10158 * Validate that the given atomic state is programmable by DC into hardware.
10159 * This involves constructing a &struct dc_state reflecting the new hardware
10160 * state we wish to commit, then querying DC to see if it is programmable. It's
10161 * important not to modify the existing DC state. Otherwise, atomic_check
10162 * may unexpectedly commit hardware changes.
10163 *
10164 * When validating the DC state, it's important that the right locks are
10165 * acquired. For full updates case which removes/adds/updates streams on one
10166 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10167 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10168 * flip using DRMs synchronization events.
b8592b48
LL
10169 *
10170 * Note that DM adds the affected connectors for all CRTCs in state, when that
10171 * might not seem necessary. This is because DC stream creation requires the
10172 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10173 * be possible but non-trivial - a possible TODO item.
10174 *
10175 * Return: -Error code if validation failed.
10176 */
7578ecda
AD
10177static int amdgpu_dm_atomic_check(struct drm_device *dev,
10178 struct drm_atomic_state *state)
62f55537 10179{
1348969a 10180 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10181 struct dm_atomic_state *dm_state = NULL;
62f55537 10182 struct dc *dc = adev->dm.dc;
62f55537 10183 struct drm_connector *connector;
c2cea706 10184 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10185 struct drm_crtc *crtc;
fc9e9920 10186 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10187 struct drm_plane *plane;
10188 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10189 enum dc_status status;
1e88ad0a 10190 int ret, i;
62f55537 10191 bool lock_and_validation_needed = false;
886876ec 10192 struct dm_crtc_state *dm_old_crtc_state;
62f55537 10193
e8a98235 10194 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10195
62f55537 10196 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10197 if (ret)
10198 goto fail;
62f55537 10199
c5892a10
SW
10200 /* Check connector changes */
10201 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10202 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10203 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10204
10205 /* Skip connectors that are disabled or part of modeset already. */
10206 if (!old_con_state->crtc && !new_con_state->crtc)
10207 continue;
10208
10209 if (!new_con_state->crtc)
10210 continue;
10211
10212 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10213 if (IS_ERR(new_crtc_state)) {
10214 ret = PTR_ERR(new_crtc_state);
10215 goto fail;
10216 }
10217
10218 if (dm_old_con_state->abm_level !=
10219 dm_new_con_state->abm_level)
10220 new_crtc_state->connectors_changed = true;
10221 }
10222
e10517b3 10223#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10224 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10225 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10226 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10227 ret = add_affected_mst_dsc_crtcs(state, crtc);
10228 if (ret)
10229 goto fail;
10230 }
10231 }
10232 }
e10517b3 10233#endif
1e88ad0a 10234 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10235 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10236
1e88ad0a 10237 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10238 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10239 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10240 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10241 continue;
7bef1af3 10242
03fc4cf4
MY
10243 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10244 if (ret)
10245 goto fail;
10246
1e88ad0a
S
10247 if (!new_crtc_state->enable)
10248 continue;
fc9e9920 10249
1e88ad0a
S
10250 ret = drm_atomic_add_affected_connectors(state, crtc);
10251 if (ret)
10252 return ret;
fc9e9920 10253
1e88ad0a
S
10254 ret = drm_atomic_add_affected_planes(state, crtc);
10255 if (ret)
10256 goto fail;
115a385c 10257
cbac53f7 10258 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10259 new_crtc_state->mode_changed = true;
e7b07cee
HW
10260 }
10261
2d9e6431
NK
10262 /*
10263 * Add all primary and overlay planes on the CRTC to the state
10264 * whenever a plane is enabled to maintain correct z-ordering
10265 * and to enable fast surface updates.
10266 */
10267 drm_for_each_crtc(crtc, dev) {
10268 bool modified = false;
10269
10270 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10271 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10272 continue;
10273
10274 if (new_plane_state->crtc == crtc ||
10275 old_plane_state->crtc == crtc) {
10276 modified = true;
10277 break;
10278 }
10279 }
10280
10281 if (!modified)
10282 continue;
10283
10284 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10285 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10286 continue;
10287
10288 new_plane_state =
10289 drm_atomic_get_plane_state(state, plane);
10290
10291 if (IS_ERR(new_plane_state)) {
10292 ret = PTR_ERR(new_plane_state);
10293 goto fail;
10294 }
10295 }
10296 }
10297
62f55537 10298 /* Remove exiting planes if they are modified */
9e869063
LL
10299 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10300 ret = dm_update_plane_state(dc, state, plane,
10301 old_plane_state,
10302 new_plane_state,
10303 false,
10304 &lock_and_validation_needed);
10305 if (ret)
10306 goto fail;
62f55537
AG
10307 }
10308
10309 /* Disable all crtcs which require disable */
4b9674e5
LL
10310 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10311 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10312 old_crtc_state,
10313 new_crtc_state,
10314 false,
10315 &lock_and_validation_needed);
10316 if (ret)
10317 goto fail;
62f55537
AG
10318 }
10319
10320 /* Enable all crtcs which require enable */
4b9674e5
LL
10321 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10322 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10323 old_crtc_state,
10324 new_crtc_state,
10325 true,
10326 &lock_and_validation_needed);
10327 if (ret)
10328 goto fail;
62f55537
AG
10329 }
10330
16e9b3e5
RS
10331 ret = validate_overlay(state);
10332 if (ret)
10333 goto fail;
10334
62f55537 10335 /* Add new/modified planes */
9e869063
LL
10336 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10337 ret = dm_update_plane_state(dc, state, plane,
10338 old_plane_state,
10339 new_plane_state,
10340 true,
10341 &lock_and_validation_needed);
10342 if (ret)
10343 goto fail;
62f55537
AG
10344 }
10345
b349f76e
ES
10346 /* Run this here since we want to validate the streams we created */
10347 ret = drm_atomic_helper_check_planes(dev, state);
10348 if (ret)
10349 goto fail;
62f55537 10350
12f4849a
SS
10351 /* Check cursor planes scaling */
10352 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10353 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10354 if (ret)
10355 goto fail;
10356 }
10357
43d10d30
NK
10358 if (state->legacy_cursor_update) {
10359 /*
10360 * This is a fast cursor update coming from the plane update
10361 * helper, check if it can be done asynchronously for better
10362 * performance.
10363 */
10364 state->async_update =
10365 !drm_atomic_helper_async_check(dev, state);
10366
10367 /*
10368 * Skip the remaining global validation if this is an async
10369 * update. Cursor updates can be done without affecting
10370 * state or bandwidth calcs and this avoids the performance
10371 * penalty of locking the private state object and
10372 * allocating a new dc_state.
10373 */
10374 if (state->async_update)
10375 return 0;
10376 }
10377
ebdd27e1 10378 /* Check scaling and underscan changes*/
1f6010a9 10379 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10380 * new stream into context w\o causing full reset. Need to
10381 * decide how to handle.
10382 */
c2cea706 10383 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10384 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10385 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10386 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10387
10388 /* Skip any modesets/resets */
0bc9706d
LSL
10389 if (!acrtc || drm_atomic_crtc_needs_modeset(
10390 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10391 continue;
10392
b830ebc9 10393 /* Skip any thing not scale or underscan changes */
54d76575 10394 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10395 continue;
10396
10397 lock_and_validation_needed = true;
10398 }
10399
f6d7c7fa
NK
10400 /**
10401 * Streams and planes are reset when there are changes that affect
10402 * bandwidth. Anything that affects bandwidth needs to go through
10403 * DC global validation to ensure that the configuration can be applied
10404 * to hardware.
10405 *
10406 * We have to currently stall out here in atomic_check for outstanding
10407 * commits to finish in this case because our IRQ handlers reference
10408 * DRM state directly - we can end up disabling interrupts too early
10409 * if we don't.
10410 *
10411 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10412 */
f6d7c7fa 10413 if (lock_and_validation_needed) {
eb3dc897
NK
10414 ret = dm_atomic_get_state(state, &dm_state);
10415 if (ret)
10416 goto fail;
e7b07cee
HW
10417
10418 ret = do_aquire_global_lock(dev, state);
10419 if (ret)
10420 goto fail;
1dc90497 10421
d9fe1a4c 10422#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
10423 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10424 goto fail;
10425
29b9ba74
ML
10426 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10427 if (ret)
10428 goto fail;
d9fe1a4c 10429#endif
29b9ba74 10430
ded58c7b
ZL
10431 /*
10432 * Perform validation of MST topology in the state:
10433 * We need to perform MST atomic check before calling
10434 * dc_validate_global_state(), or there is a chance
10435 * to get stuck in an infinite loop and hang eventually.
10436 */
10437 ret = drm_dp_mst_atomic_check(state);
10438 if (ret)
10439 goto fail;
74a16675
RS
10440 status = dc_validate_global_state(dc, dm_state->context, false);
10441 if (status != DC_OK) {
10442 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10443 dc_status_to_str(status), status);
e7b07cee
HW
10444 ret = -EINVAL;
10445 goto fail;
10446 }
bd200d19 10447 } else {
674e78ac 10448 /*
bd200d19
NK
10449 * The commit is a fast update. Fast updates shouldn't change
10450 * the DC context, affect global validation, and can have their
10451 * commit work done in parallel with other commits not touching
10452 * the same resource. If we have a new DC context as part of
10453 * the DM atomic state from validation we need to free it and
10454 * retain the existing one instead.
fde9f39a
MR
10455 *
10456 * Furthermore, since the DM atomic state only contains the DC
10457 * context and can safely be annulled, we can free the state
10458 * and clear the associated private object now to free
10459 * some memory and avoid a possible use-after-free later.
674e78ac 10460 */
bd200d19 10461
fde9f39a
MR
10462 for (i = 0; i < state->num_private_objs; i++) {
10463 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10464
fde9f39a
MR
10465 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10466 int j = state->num_private_objs-1;
bd200d19 10467
fde9f39a
MR
10468 dm_atomic_destroy_state(obj,
10469 state->private_objs[i].state);
10470
10471 /* If i is not at the end of the array then the
10472 * last element needs to be moved to where i was
10473 * before the array can safely be truncated.
10474 */
10475 if (i != j)
10476 state->private_objs[i] =
10477 state->private_objs[j];
bd200d19 10478
fde9f39a
MR
10479 state->private_objs[j].ptr = NULL;
10480 state->private_objs[j].state = NULL;
10481 state->private_objs[j].old_state = NULL;
10482 state->private_objs[j].new_state = NULL;
10483
10484 state->num_private_objs = j;
10485 break;
10486 }
bd200d19 10487 }
e7b07cee
HW
10488 }
10489
caff0e66
NK
10490 /* Store the overall update type for use later in atomic check. */
10491 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10492 struct dm_crtc_state *dm_new_crtc_state =
10493 to_dm_crtc_state(new_crtc_state);
10494
f6d7c7fa
NK
10495 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10496 UPDATE_TYPE_FULL :
10497 UPDATE_TYPE_FAST;
e7b07cee
HW
10498 }
10499
10500 /* Must be success */
10501 WARN_ON(ret);
e8a98235
RS
10502
10503 trace_amdgpu_dm_atomic_check_finish(state, ret);
10504
e7b07cee
HW
10505 return ret;
10506
10507fail:
10508 if (ret == -EDEADLK)
01e28f9c 10509 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10510 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10511 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10512 else
01e28f9c 10513 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10514
e8a98235
RS
10515 trace_amdgpu_dm_atomic_check_finish(state, ret);
10516
e7b07cee
HW
10517 return ret;
10518}
10519
3ee6b26b
AD
10520static bool is_dp_capable_without_timing_msa(struct dc *dc,
10521 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10522{
10523 uint8_t dpcd_data;
10524 bool capable = false;
10525
c84dec2f 10526 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10527 dm_helpers_dp_read_dpcd(
10528 NULL,
c84dec2f 10529 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10530 DP_DOWN_STREAM_PORT_COUNT,
10531 &dpcd_data,
10532 sizeof(dpcd_data))) {
10533 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10534 }
10535
10536 return capable;
10537}
f9b4f20c
SW
10538
10539static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10540 uint8_t *edid_ext, int len,
10541 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10542{
10543 int i;
10544 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10545 struct dc *dc = adev->dm.dc;
10546
10547 /* send extension block to DMCU for parsing */
10548 for (i = 0; i < len; i += 8) {
10549 bool res;
10550 int offset;
10551
10552 /* send 8 bytes a time */
10553 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10554 return false;
10555
10556 if (i+8 == len) {
10557 /* EDID block sent completed, expect result */
10558 int version, min_rate, max_rate;
10559
10560 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10561 if (res) {
10562 /* amd vsdb found */
10563 vsdb_info->freesync_supported = 1;
10564 vsdb_info->amd_vsdb_version = version;
10565 vsdb_info->min_refresh_rate_hz = min_rate;
10566 vsdb_info->max_refresh_rate_hz = max_rate;
10567 return true;
10568 }
10569 /* not amd vsdb */
10570 return false;
10571 }
10572
10573 /* check for ack*/
10574 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10575 if (!res)
10576 return false;
10577 }
10578
10579 return false;
10580}
10581
7c7dd774 10582static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10583 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10584{
10585 uint8_t *edid_ext = NULL;
10586 int i;
10587 bool valid_vsdb_found = false;
10588
10589 /*----- drm_find_cea_extension() -----*/
10590 /* No EDID or EDID extensions */
10591 if (edid == NULL || edid->extensions == 0)
7c7dd774 10592 return -ENODEV;
f9b4f20c
SW
10593
10594 /* Find CEA extension */
10595 for (i = 0; i < edid->extensions; i++) {
10596 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10597 if (edid_ext[0] == CEA_EXT)
10598 break;
10599 }
10600
10601 if (i == edid->extensions)
7c7dd774 10602 return -ENODEV;
f9b4f20c
SW
10603
10604 /*----- cea_db_offsets() -----*/
10605 if (edid_ext[0] != CEA_EXT)
7c7dd774 10606 return -ENODEV;
f9b4f20c
SW
10607
10608 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10609
10610 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10611}
10612
98e6436d
AK
10613void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10614 struct edid *edid)
e7b07cee 10615{
eb0709ba 10616 int i = 0;
e7b07cee
HW
10617 struct detailed_timing *timing;
10618 struct detailed_non_pixel *data;
10619 struct detailed_data_monitor_range *range;
c84dec2f
HW
10620 struct amdgpu_dm_connector *amdgpu_dm_connector =
10621 to_amdgpu_dm_connector(connector);
bb47de73 10622 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
10623
10624 struct drm_device *dev = connector->dev;
1348969a 10625 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 10626 bool freesync_capable = false;
f9b4f20c 10627 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 10628
8218d7f1
HW
10629 if (!connector->state) {
10630 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10631 goto update;
8218d7f1
HW
10632 }
10633
98e6436d
AK
10634 if (!edid) {
10635 dm_con_state = to_dm_connector_state(connector->state);
10636
10637 amdgpu_dm_connector->min_vfreq = 0;
10638 amdgpu_dm_connector->max_vfreq = 0;
10639 amdgpu_dm_connector->pixel_clock_mhz = 0;
10640
bb47de73 10641 goto update;
98e6436d
AK
10642 }
10643
8218d7f1
HW
10644 dm_con_state = to_dm_connector_state(connector->state);
10645
c84dec2f 10646 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 10647 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 10648 goto update;
e7b07cee
HW
10649 }
10650 if (!adev->dm.freesync_module)
bb47de73 10651 goto update;
f9b4f20c
SW
10652
10653
10654 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10655 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10656 bool edid_check_required = false;
10657
10658 if (edid) {
e7b07cee
HW
10659 edid_check_required = is_dp_capable_without_timing_msa(
10660 adev->dm.dc,
c84dec2f 10661 amdgpu_dm_connector);
e7b07cee 10662 }
e7b07cee 10663
f9b4f20c
SW
10664 if (edid_check_required == true && (edid->version > 1 ||
10665 (edid->version == 1 && edid->revision > 1))) {
10666 for (i = 0; i < 4; i++) {
e7b07cee 10667
f9b4f20c
SW
10668 timing = &edid->detailed_timings[i];
10669 data = &timing->data.other_data;
10670 range = &data->data.range;
10671 /*
10672 * Check if monitor has continuous frequency mode
10673 */
10674 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10675 continue;
10676 /*
10677 * Check for flag range limits only. If flag == 1 then
10678 * no additional timing information provided.
10679 * Default GTF, GTF Secondary curve and CVT are not
10680 * supported
10681 */
10682 if (range->flags != 1)
10683 continue;
a0ffc3fd 10684
f9b4f20c
SW
10685 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10686 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10687 amdgpu_dm_connector->pixel_clock_mhz =
10688 range->pixel_clock_mhz * 10;
a0ffc3fd 10689
f9b4f20c
SW
10690 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10691 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10692
f9b4f20c
SW
10693 break;
10694 }
98e6436d 10695
f9b4f20c
SW
10696 if (amdgpu_dm_connector->max_vfreq -
10697 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10698
f9b4f20c
SW
10699 freesync_capable = true;
10700 }
10701 }
10702 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10703 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10704 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10705 timing = &edid->detailed_timings[i];
10706 data = &timing->data.other_data;
10707
10708 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10709 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10710 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10711 freesync_capable = true;
10712
10713 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10714 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10715 }
10716 }
bb47de73
NK
10717
10718update:
10719 if (dm_con_state)
10720 dm_con_state->freesync_capable = freesync_capable;
10721
10722 if (connector->vrr_capable_property)
10723 drm_connector_set_vrr_capable_property(connector,
10724 freesync_capable);
e7b07cee
HW
10725}
10726
3d4e52d0
VL
10727void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10728{
1348969a 10729 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10730 struct dc *dc = adev->dm.dc;
10731 int i;
10732
10733 mutex_lock(&adev->dm.dc_lock);
10734 if (dc->current_state) {
10735 for (i = 0; i < dc->current_state->stream_count; ++i)
10736 dc->current_state->streams[i]
10737 ->triggered_crtc_reset.enabled =
10738 adev->dm.force_timing_sync;
10739
10740 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10741 dc_trigger_sync(dc, dc->current_state);
10742 }
10743 mutex_unlock(&adev->dm.dc_lock);
10744}
9d83722d
RS
10745
10746void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10747 uint32_t value, const char *func_name)
10748{
10749#ifdef DM_CHECK_ADDR_0
10750 if (address == 0) {
10751 DC_ERR("invalid register write. address = 0");
10752 return;
10753 }
10754#endif
10755 cgs_write_register(ctx->cgs_device, address, value);
10756 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10757}
10758
10759uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10760 const char *func_name)
10761{
10762 uint32_t value;
10763#ifdef DM_CHECK_ADDR_0
10764 if (address == 0) {
10765 DC_ERR("invalid register read; address = 0\n");
10766 return 0;
10767 }
10768#endif
10769
10770 if (ctx->dmub_srv &&
10771 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10772 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10773 ASSERT(false);
10774 return 0;
10775 }
10776
10777 value = cgs_read_register(ctx->cgs_device, address);
10778
10779 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10780
10781 return value;
10782}
81927e28
JS
10783
10784int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10785 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10786{
10787 struct amdgpu_device *adev = ctx->driver_context;
10788 int ret = 0;
10789
10790 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10791 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10792 if (ret == 0) {
10793 *operation_result = AUX_RET_ERROR_TIMEOUT;
10794 return -1;
10795 }
10796 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10797
10798 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10799 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10800
10801 // For read case, Copy data to payload
10802 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10803 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10804 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10805 adev->dm.dmub_notify->aux_reply.length);
10806 }
10807
10808 return adev->dm.dmub_notify->aux_reply.length;
10809}