drm/amdgpu: Register MCE notifier for Aldebaran RAS
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
4562236b
HW
54
55#include "amd_shared.h"
56#include "amdgpu_dm_irq.h"
57#include "dm_helpers.h"
e7b07cee 58#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
59#if defined(CONFIG_DEBUG_FS)
60#include "amdgpu_dm_debugfs.h"
61#endif
f4594cd1 62#include "amdgpu_dm_psr.h"
4562236b
HW
63
64#include "ivsrcid/ivsrcid_vislands30.h"
65
81927e28 66#include "i2caux_interface.h"
4562236b
HW
67#include <linux/module.h>
68#include <linux/moduleparam.h>
e7b07cee 69#include <linux/types.h>
97028037 70#include <linux/pm_runtime.h>
09d21852 71#include <linux/pci.h>
a94d5569 72#include <linux/firmware.h>
6ce8f316 73#include <linux/component.h>
4562236b
HW
74
75#include <drm/drm_atomic.h>
674e78ac 76#include <drm/drm_atomic_uapi.h>
4562236b
HW
77#include <drm/drm_atomic_helper.h>
78#include <drm/drm_dp_mst_helper.h>
e7b07cee 79#include <drm/drm_fb_helper.h>
09d21852 80#include <drm/drm_fourcc.h>
e7b07cee 81#include <drm/drm_edid.h>
09d21852 82#include <drm/drm_vblank.h>
6ce8f316 83#include <drm/drm_audio_component.h>
4562236b 84
b86a1aa3 85#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 86#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 87
ad941f7a
FX
88#include "dcn/dcn_1_0_offset.h"
89#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
90#include "soc15_hw_ip.h"
91#include "vega10_ip_offset.h"
ff5ef992
AD
92
93#include "soc15_common.h"
94#endif
95
e7b07cee 96#include "modules/inc/mod_freesync.h"
bbf854dc 97#include "modules/power/power_helpers.h"
ecd0136b 98#include "modules/inc/mod_info_packet.h"
e7b07cee 99
743b9786
NK
100#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
102#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
104#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
106#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
110#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
112#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
114#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 116
a94d5569
DF
117#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
118MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 119
5ea23931
RL
120#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
121MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
8c7aea40
NK
123/* Number of bytes in PSP header for firmware. */
124#define PSP_HEADER_BYTES 0x100
125
126/* Number of bytes in PSP footer for firmware. */
127#define PSP_FOOTER_BYTES 0x100
128
b8592b48
LL
129/**
130 * DOC: overview
131 *
132 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 133 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
134 * requests into DC requests, and DC responses into DRM responses.
135 *
136 * The root control structure is &struct amdgpu_display_manager.
137 */
138
7578ecda
AD
139/* basic init/fini API */
140static int amdgpu_dm_init(struct amdgpu_device *adev);
141static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 142static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 143
0f877894
OV
144static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145{
146 switch (link->dpcd_caps.dongle_type) {
147 case DISPLAY_DONGLE_NONE:
148 return DRM_MODE_SUBCONNECTOR_Native;
149 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 return DRM_MODE_SUBCONNECTOR_VGA;
151 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 return DRM_MODE_SUBCONNECTOR_DVID;
154 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 return DRM_MODE_SUBCONNECTOR_HDMIA;
157 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 default:
159 return DRM_MODE_SUBCONNECTOR_Unknown;
160 }
161}
162
163static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164{
165 struct dc_link *link = aconnector->dc_link;
166 struct drm_connector *connector = &aconnector->base;
167 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 return;
171
172 if (aconnector->dc_sink)
173 subconnector = get_subconnector_type(link);
174
175 drm_object_property_set_value(&connector->base,
176 connector->dev->mode_config.dp_subconnector_property,
177 subconnector);
178}
179
1f6010a9
DF
180/*
181 * initializes drm_device display related structures, based on the information
7578ecda
AD
182 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183 * drm_encoder, drm_mode_config
184 *
185 * Returns 0 on success
186 */
187static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188/* removes and deallocates the drm structures, created by the above function */
189static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
7578ecda 191static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 192 struct drm_plane *plane,
cc1fec57
NK
193 unsigned long possible_crtcs,
194 const struct dc_plane_cap *plane_cap);
7578ecda
AD
195static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 struct drm_plane *plane,
197 uint32_t link_index);
198static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 struct amdgpu_dm_connector *amdgpu_dm_connector,
200 uint32_t link_index,
201 struct amdgpu_encoder *amdgpu_encoder);
202static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 struct amdgpu_encoder *aencoder,
204 uint32_t link_index);
205
206static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
7578ecda
AD
208static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 struct drm_atomic_state *state);
212
674e78ac
NK
213static void handle_cursor_update(struct drm_plane *plane,
214 struct drm_plane_state *old_plane_state);
7578ecda 215
dfbbfe3c
BN
216static const struct drm_format_info *
217amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
e27c41d5
JS
219static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220
a85ba005
NC
221static bool
222is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
4562236b
HW
224/*
225 * dm_vblank_get_counter
226 *
227 * @brief
228 * Get counter for number of vertical blanks
229 *
230 * @param
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
233 *
234 * @return
235 * Counter for vertical blanks
236 */
237static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238{
239 if (crtc >= adev->mode_info.num_crtc)
240 return 0;
241 else {
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
585d450c 244 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 crtc);
4562236b
HW
247 return 0;
248 }
249
585d450c 250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
251 }
252}
253
254static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 255 u32 *vbl, u32 *position)
4562236b 256{
81c50963
ST
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
4562236b
HW
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261 else {
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
585d450c 264 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 crtc);
4562236b
HW
267 return 0;
268 }
269
81c50963
ST
270 /*
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
273 */
585d450c 274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
275 &v_blank_start,
276 &v_blank_end,
277 &h_position,
278 &v_position);
279
e806208d
AG
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
282 }
283
284 return 0;
285}
286
287static bool dm_is_idle(void *handle)
288{
289 /* XXX todo */
290 return true;
291}
292
293static int dm_wait_for_idle(void *handle)
294{
295 /* XXX todo */
296 return 0;
297}
298
299static bool dm_check_soft_reset(void *handle)
300{
301 return false;
302}
303
304static int dm_soft_reset(void *handle)
305{
306 /* XXX todo */
307 return 0;
308}
309
3ee6b26b
AD
310static struct amdgpu_crtc *
311get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 int otg_inst)
4562236b 313{
4a580877 314 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
317
bcd74374 318 if (WARN_ON(otg_inst == -1))
4562236b 319 return adev->mode_info.crtcs[0];
4562236b
HW
320
321 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 amdgpu_crtc = to_amdgpu_crtc(crtc);
323
324 if (amdgpu_crtc->otg_inst == otg_inst)
325 return amdgpu_crtc;
326 }
327
328 return NULL;
329}
330
585d450c
AP
331static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332{
333 return acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_VARIABLE ||
335 acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_FIXED;
337}
338
66b0c973
MK
339static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340{
341 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343}
344
a85ba005
NC
345static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 struct dm_crtc_state *new_state)
347{
348 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
349 return true;
350 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 return true;
352 else
353 return false;
354}
355
b8e8c934
HW
356/**
357 * dm_pflip_high_irq() - Handle pageflip interrupt
358 * @interrupt_params: ignored
359 *
360 * Handles the pageflip interrupt by notifying all interested parties
361 * that the pageflip has been completed.
362 */
4562236b
HW
363static void dm_pflip_high_irq(void *interrupt_params)
364{
4562236b
HW
365 struct amdgpu_crtc *amdgpu_crtc;
366 struct common_irq_params *irq_params = interrupt_params;
367 struct amdgpu_device *adev = irq_params->adev;
368 unsigned long flags;
71bbe51a 369 struct drm_pending_vblank_event *e;
71bbe51a
MK
370 uint32_t vpos, hpos, v_blank_start, v_blank_end;
371 bool vrr_active;
4562236b
HW
372
373 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374
375 /* IRQ could occur when in initial stage */
1f6010a9 376 /* TODO work and BO cleanup */
4562236b 377 if (amdgpu_crtc == NULL) {
cb2318b7 378 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
379 return;
380 }
381
4a580877 382 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
383
384 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 385 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
386 amdgpu_crtc->pflip_status,
387 AMDGPU_FLIP_SUBMITTED,
388 amdgpu_crtc->crtc_id,
389 amdgpu_crtc);
4a580877 390 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
391 return;
392 }
393
71bbe51a
MK
394 /* page flip completed. */
395 e = amdgpu_crtc->event;
396 amdgpu_crtc->event = NULL;
4562236b 397
bcd74374 398 WARN_ON(!e);
1159898a 399
585d450c 400 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
401
402 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 if (!vrr_active ||
585d450c 404 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
405 &v_blank_end, &hpos, &vpos) ||
406 (vpos < v_blank_start)) {
407 /* Update to correct count and vblank timestamp if racing with
408 * vblank irq. This also updates to the correct vblank timestamp
409 * even in VRR mode, as scanout is past the front-porch atm.
410 */
411 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 412
71bbe51a
MK
413 /* Wake up userspace by sending the pageflip event with proper
414 * count and timestamp of vblank of flip completion.
415 */
416 if (e) {
417 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419 /* Event sent, so done with vblank for this flip */
420 drm_crtc_vblank_put(&amdgpu_crtc->base);
421 }
422 } else if (e) {
423 /* VRR active and inside front-porch: vblank count and
424 * timestamp for pageflip event will only be up to date after
425 * drm_crtc_handle_vblank() has been executed from late vblank
426 * irq handler after start of back-porch (vline 0). We queue the
427 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 * updated timestamp and count, once it runs after us.
429 *
430 * We need to open-code this instead of using the helper
431 * drm_crtc_arm_vblank_event(), as that helper would
432 * call drm_crtc_accurate_vblank_count(), which we must
433 * not call in VRR mode while we are in front-porch!
434 */
435
436 /* sequence will be replaced by real count during send-out. */
437 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 e->pipe = amdgpu_crtc->crtc_id;
439
4a580877 440 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
441 e = NULL;
442 }
4562236b 443
fdd1fe57
MK
444 /* Keep track of vblank of this flip for flip throttling. We use the
445 * cooked hw counter, as that one incremented at start of this vblank
446 * of pageflip completion, so last_flip_vblank is the forbidden count
447 * for queueing new pageflips if vsync + VRR is enabled.
448 */
5d1c59c4 449 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 450 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 451
54f5499a 452 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 453 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 454
cb2318b7
VL
455 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 amdgpu_crtc->crtc_id, amdgpu_crtc,
457 vrr_active, (int) !e);
4562236b
HW
458}
459
d2574c33
MK
460static void dm_vupdate_high_irq(void *interrupt_params)
461{
462 struct common_irq_params *irq_params = interrupt_params;
463 struct amdgpu_device *adev = irq_params->adev;
464 struct amdgpu_crtc *acrtc;
47588233
RS
465 struct drm_device *drm_dev;
466 struct drm_vblank_crtc *vblank;
467 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 468 unsigned long flags;
585d450c 469 int vrr_active;
d2574c33
MK
470
471 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473 if (acrtc) {
585d450c 474 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
475 drm_dev = acrtc->base.dev;
476 vblank = &drm_dev->vblank[acrtc->base.index];
477 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 frame_duration_ns = vblank->time - previous_timestamp;
479
480 if (frame_duration_ns > 0) {
481 trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 frame_duration_ns,
483 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 }
d2574c33 486
cb2318b7 487 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 488 acrtc->crtc_id,
585d450c 489 vrr_active);
d2574c33
MK
490
491 /* Core vblank handling is done here after end of front-porch in
492 * vrr mode, as vblank timestamping will give valid results
493 * while now done after front-porch. This will also deliver
494 * page-flip completion events that have been queued to us
495 * if a pageflip happened inside front-porch.
496 */
585d450c 497 if (vrr_active) {
d2574c33 498 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
499
500 /* BTR processing for pre-DCE12 ASICs */
585d450c 501 if (acrtc->dm_irq_params.stream &&
09aef2c4 502 adev->family < AMDGPU_FAMILY_AI) {
4a580877 503 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
504 mod_freesync_handle_v_update(
505 adev->dm.freesync_module,
585d450c
AP
506 acrtc->dm_irq_params.stream,
507 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
508
509 dc_stream_adjust_vmin_vmax(
510 adev->dm.dc,
585d450c
AP
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 513 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
514 }
515 }
d2574c33
MK
516 }
517}
518
b8e8c934
HW
519/**
520 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 521 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
522 *
523 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524 * event handler.
525 */
4562236b
HW
526static void dm_crtc_high_irq(void *interrupt_params)
527{
528 struct common_irq_params *irq_params = interrupt_params;
529 struct amdgpu_device *adev = irq_params->adev;
4562236b 530 struct amdgpu_crtc *acrtc;
09aef2c4 531 unsigned long flags;
585d450c 532 int vrr_active;
4562236b 533
b57de80a 534 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
535 if (!acrtc)
536 return;
537
585d450c 538 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 539
cb2318b7 540 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 541 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 542
2346ef47
NK
543 /**
544 * Core vblank handling at start of front-porch is only possible
545 * in non-vrr mode, as only there vblank timestamping will give
546 * valid results while done in front-porch. Otherwise defer it
547 * to dm_vupdate_high_irq after end of front-porch.
548 */
585d450c 549 if (!vrr_active)
2346ef47
NK
550 drm_crtc_handle_vblank(&acrtc->base);
551
552 /**
553 * Following stuff must happen at start of vblank, for crc
554 * computation and below-the-range btr support in vrr mode.
555 */
16f17eda 556 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
557
558 /* BTR updates need to happen before VUPDATE on Vega and above. */
559 if (adev->family < AMDGPU_FAMILY_AI)
560 return;
16f17eda 561
4a580877 562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 563
585d450c
AP
564 if (acrtc->dm_irq_params.stream &&
565 acrtc->dm_irq_params.vrr_params.supported &&
566 acrtc->dm_irq_params.freesync_config.state ==
567 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 568 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
569 acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params);
16f17eda 571
585d450c
AP
572 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
574 }
575
2b5aed9a
MK
576 /*
577 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 * In that case, pageflip completion interrupts won't fire and pageflip
579 * completion events won't get delivered. Prevent this by sending
580 * pending pageflip events from here if a flip is still pending.
581 *
582 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 * avoid race conditions between flip programming and completion,
584 * which could cause too early flip completion events.
585 */
2346ef47
NK
586 if (adev->family >= AMDGPU_FAMILY_RV &&
587 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 588 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
589 if (acrtc->event) {
590 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 acrtc->event = NULL;
592 drm_crtc_vblank_put(&acrtc->base);
593 }
594 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 }
596
4a580877 597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
598}
599
86bc2219 600#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 601#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
602/**
603 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604 * DCN generation ASICs
48e01bf4 605 * @interrupt_params: interrupt parameters
86bc2219
WL
606 *
607 * Used to set crc window/read out crc value at vertical line 0 position
608 */
86bc2219
WL
609static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610{
611 struct common_irq_params *irq_params = interrupt_params;
612 struct amdgpu_device *adev = irq_params->adev;
613 struct amdgpu_crtc *acrtc;
614
615 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617 if (!acrtc)
618 return;
619
620 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621}
622#endif
86bc2219 623
e27c41d5
JS
624/**
625 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626 * @adev: amdgpu_device pointer
627 * @notify: dmub notification structure
628 *
629 * Dmub AUX or SET_CONFIG command completion processing callback
630 * Copies dmub notification to DM which is to be read by AUX command.
631 * issuing thread and also signals the event to wake up the thread.
632 */
633void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634{
635 if (adev->dm.dmub_notify)
636 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 complete(&adev->dm.dmub_aux_transfer_done);
639}
640
641/**
642 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643 * @adev: amdgpu_device pointer
644 * @notify: dmub notification structure
645 *
646 * Dmub Hpd interrupt processing callback. Gets displayindex through the
647 * ink index and calls helper to do the processing.
648 */
649void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650{
651 struct amdgpu_dm_connector *aconnector;
f6e03f80 652 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
653 struct drm_connector *connector;
654 struct drm_connector_list_iter iter;
655 struct dc_link *link;
656 uint8_t link_index = 0;
657 struct drm_device *dev = adev->dm.ddev;
658
659 if (adev == NULL)
660 return;
661
662 if (notify == NULL) {
663 DRM_ERROR("DMUB HPD callback notification was NULL");
664 return;
665 }
666
667 if (notify->link_index > adev->dm.dc->link_count) {
668 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
669 return;
670 }
671
672 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
673
674 link_index = notify->link_index;
675
676 link = adev->dm.dc->links[link_index];
677
678 drm_connector_list_iter_begin(dev, &iter);
679 drm_for_each_connector_iter(connector, &iter) {
680 aconnector = to_amdgpu_dm_connector(connector);
681 if (link && aconnector->dc_link == link) {
682 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 683 hpd_aconnector = aconnector;
e27c41d5
JS
684 break;
685 }
686 }
687 drm_connector_list_iter_end(&iter);
688 drm_modeset_unlock(&dev->mode_config.connection_mutex);
689
f6e03f80
JS
690 if (hpd_aconnector)
691 handle_hpd_irq_helper(hpd_aconnector);
e27c41d5
JS
692}
693
694/**
695 * register_dmub_notify_callback - Sets callback for DMUB notify
696 * @adev: amdgpu_device pointer
697 * @type: Type of dmub notification
698 * @callback: Dmub interrupt callback function
699 * @dmub_int_thread_offload: offload indicator
700 *
701 * API to register a dmub callback handler for a dmub notification
702 * Also sets indicator whether callback processing to be offloaded.
703 * to dmub interrupt handling thread
704 * Return: true if successfully registered, false if there is existing registration
705 */
706bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
707dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
708{
709 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
710 adev->dm.dmub_callback[type] = callback;
711 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
712 } else
713 return false;
714
715 return true;
716}
717
718static void dm_handle_hpd_work(struct work_struct *work)
719{
720 struct dmub_hpd_work *dmub_hpd_wrk;
721
722 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
723
724 if (!dmub_hpd_wrk->dmub_notify) {
725 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
726 return;
727 }
728
729 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
730 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
731 dmub_hpd_wrk->dmub_notify);
732 }
733 kfree(dmub_hpd_wrk);
734
735}
736
e25515e2 737#define DMUB_TRACE_MAX_READ 64
81927e28
JS
738/**
739 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740 * @interrupt_params: used for determining the Outbox instance
741 *
742 * Handles the Outbox Interrupt
743 * event handler.
744 */
81927e28
JS
745static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746{
747 struct dmub_notification notify;
748 struct common_irq_params *irq_params = interrupt_params;
749 struct amdgpu_device *adev = irq_params->adev;
750 struct amdgpu_display_manager *dm = &adev->dm;
751 struct dmcub_trace_buf_entry entry = { 0 };
752 uint32_t count = 0;
e27c41d5 753 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 754 struct dc_link *plink = NULL;
81927e28 755
f6e03f80
JS
756 if (dc_enable_dmub_notifications(adev->dm.dc) &&
757 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5
JS
758 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
759 if (!dmub_hpd_wrk) {
760 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
761 return;
762 }
763 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
764
f6e03f80
JS
765 do {
766 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
767 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
768 DRM_ERROR("DM: notify type %d invalid!", notify.type);
769 continue;
770 }
771 if (dm->dmub_thread_offload[notify.type] == true) {
772 dmub_hpd_wrk->dmub_notify = &notify;
773 dmub_hpd_wrk->adev = adev;
774 if (notify.type == DMUB_NOTIFICATION_HPD) {
775 plink = adev->dm.dc->links[notify.link_index];
776 if (plink) {
777 plink->hpd_status =
778 notify.hpd_status ==
779 DP_HPD_PLUG ? true : false;
780 }
e27c41d5 781 }
f6e03f80
JS
782 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
783 } else {
784 dm->dmub_callback[notify.type](adev, &notify);
785 }
786 } while (notify.pending_notification);
81927e28
JS
787 }
788
789
790 do {
791 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
792 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
793 entry.param0, entry.param1);
794
795 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
796 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
797 } else
798 break;
799
800 count++;
801
802 } while (count <= DMUB_TRACE_MAX_READ);
803
f6e03f80
JS
804 if (count > DMUB_TRACE_MAX_READ)
805 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 806}
86bc2219
WL
807#endif
808
4562236b
HW
809static int dm_set_clockgating_state(void *handle,
810 enum amd_clockgating_state state)
811{
812 return 0;
813}
814
815static int dm_set_powergating_state(void *handle,
816 enum amd_powergating_state state)
817{
818 return 0;
819}
820
821/* Prototypes of private functions */
822static int dm_early_init(void* handle);
823
a32e24b4 824/* Allocate memory for FBC compressed data */
3e332d3a 825static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 826{
3e332d3a 827 struct drm_device *dev = connector->dev;
1348969a 828 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 829 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
830 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
831 struct drm_display_mode *mode;
42e67c3b
RL
832 unsigned long max_size = 0;
833
834 if (adev->dm.dc->fbc_compressor == NULL)
835 return;
a32e24b4 836
3e332d3a 837 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
838 return;
839
3e332d3a
RL
840 if (compressor->bo_ptr)
841 return;
42e67c3b 842
42e67c3b 843
3e332d3a
RL
844 list_for_each_entry(mode, &connector->modes, head) {
845 if (max_size < mode->htotal * mode->vtotal)
846 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
847 }
848
849 if (max_size) {
850 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 851 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 852 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
853
854 if (r)
42e67c3b
RL
855 DRM_ERROR("DM: Failed to initialize FBC\n");
856 else {
857 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
858 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
859 }
860
a32e24b4
RL
861 }
862
863}
a32e24b4 864
6ce8f316
NK
865static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
866 int pipe, bool *enabled,
867 unsigned char *buf, int max_bytes)
868{
869 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 870 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
871 struct drm_connector *connector;
872 struct drm_connector_list_iter conn_iter;
873 struct amdgpu_dm_connector *aconnector;
874 int ret = 0;
875
876 *enabled = false;
877
878 mutex_lock(&adev->dm.audio_lock);
879
880 drm_connector_list_iter_begin(dev, &conn_iter);
881 drm_for_each_connector_iter(connector, &conn_iter) {
882 aconnector = to_amdgpu_dm_connector(connector);
883 if (aconnector->audio_inst != port)
884 continue;
885
886 *enabled = true;
887 ret = drm_eld_size(connector->eld);
888 memcpy(buf, connector->eld, min(max_bytes, ret));
889
890 break;
891 }
892 drm_connector_list_iter_end(&conn_iter);
893
894 mutex_unlock(&adev->dm.audio_lock);
895
896 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
897
898 return ret;
899}
900
901static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
902 .get_eld = amdgpu_dm_audio_component_get_eld,
903};
904
905static int amdgpu_dm_audio_component_bind(struct device *kdev,
906 struct device *hda_kdev, void *data)
907{
908 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 909 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
910 struct drm_audio_component *acomp = data;
911
912 acomp->ops = &amdgpu_dm_audio_component_ops;
913 acomp->dev = kdev;
914 adev->dm.audio_component = acomp;
915
916 return 0;
917}
918
919static void amdgpu_dm_audio_component_unbind(struct device *kdev,
920 struct device *hda_kdev, void *data)
921{
922 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 923 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
924 struct drm_audio_component *acomp = data;
925
926 acomp->ops = NULL;
927 acomp->dev = NULL;
928 adev->dm.audio_component = NULL;
929}
930
931static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
932 .bind = amdgpu_dm_audio_component_bind,
933 .unbind = amdgpu_dm_audio_component_unbind,
934};
935
936static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
937{
938 int i, ret;
939
940 if (!amdgpu_audio)
941 return 0;
942
943 adev->mode_info.audio.enabled = true;
944
945 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
946
947 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
948 adev->mode_info.audio.pin[i].channels = -1;
949 adev->mode_info.audio.pin[i].rate = -1;
950 adev->mode_info.audio.pin[i].bits_per_sample = -1;
951 adev->mode_info.audio.pin[i].status_bits = 0;
952 adev->mode_info.audio.pin[i].category_code = 0;
953 adev->mode_info.audio.pin[i].connected = false;
954 adev->mode_info.audio.pin[i].id =
955 adev->dm.dc->res_pool->audios[i]->inst;
956 adev->mode_info.audio.pin[i].offset = 0;
957 }
958
959 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
960 if (ret < 0)
961 return ret;
962
963 adev->dm.audio_registered = true;
964
965 return 0;
966}
967
968static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
969{
970 if (!amdgpu_audio)
971 return;
972
973 if (!adev->mode_info.audio.enabled)
974 return;
975
976 if (adev->dm.audio_registered) {
977 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978 adev->dm.audio_registered = false;
979 }
980
981 /* TODO: Disable audio? */
982
983 adev->mode_info.audio.enabled = false;
984}
985
dfd84d90 986static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
987{
988 struct drm_audio_component *acomp = adev->dm.audio_component;
989
990 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
991 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
992
993 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
994 pin, -1);
995 }
996}
997
743b9786
NK
998static int dm_dmub_hw_init(struct amdgpu_device *adev)
999{
743b9786
NK
1000 const struct dmcub_firmware_header_v1_0 *hdr;
1001 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1002 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1003 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1004 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1005 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1006 struct dmub_srv_hw_params hw_params;
1007 enum dmub_status status;
1008 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1009 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1010 bool has_hw_support;
1011
1012 if (!dmub_srv)
1013 /* DMUB isn't supported on the ASIC. */
1014 return 0;
1015
8c7aea40
NK
1016 if (!fb_info) {
1017 DRM_ERROR("No framebuffer info for DMUB service.\n");
1018 return -EINVAL;
1019 }
1020
743b9786
NK
1021 if (!dmub_fw) {
1022 /* Firmware required for DMUB support. */
1023 DRM_ERROR("No firmware provided for DMUB.\n");
1024 return -EINVAL;
1025 }
1026
1027 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1028 if (status != DMUB_STATUS_OK) {
1029 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1030 return -EINVAL;
1031 }
1032
1033 if (!has_hw_support) {
1034 DRM_INFO("DMUB unsupported on ASIC\n");
1035 return 0;
1036 }
1037
1038 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1039
743b9786
NK
1040 fw_inst_const = dmub_fw->data +
1041 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1042 PSP_HEADER_BYTES;
743b9786
NK
1043
1044 fw_bss_data = dmub_fw->data +
1045 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1046 le32_to_cpu(hdr->inst_const_bytes);
1047
1048 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1049 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1050 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1051
1052 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1053
ddde28a5
HW
1054 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1055 * amdgpu_ucode_init_single_fw will load dmub firmware
1056 * fw_inst_const part to cw0; otherwise, the firmware back door load
1057 * will be done by dm_dmub_hw_init
1058 */
1059 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1060 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1061 fw_inst_const_size);
1062 }
1063
a576b345
NK
1064 if (fw_bss_data_size)
1065 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1066 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1067
1068 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1069 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1070 adev->bios_size);
1071
1072 /* Reset regions that need to be reset. */
1073 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1074 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1075
1076 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1077 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1078
1079 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1080 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1081
1082 /* Initialize hardware. */
1083 memset(&hw_params, 0, sizeof(hw_params));
1084 hw_params.fb_base = adev->gmc.fb_start;
1085 hw_params.fb_offset = adev->gmc.aper_base;
1086
31a7f4bb
HW
1087 /* backdoor load firmware and trigger dmub running */
1088 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1089 hw_params.load_inst_const = true;
1090
743b9786
NK
1091 if (dmcu)
1092 hw_params.psp_version = dmcu->psp_version;
1093
8c7aea40
NK
1094 for (i = 0; i < fb_info->num_fb; ++i)
1095 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
1096
1097 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1098 if (status != DMUB_STATUS_OK) {
1099 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1100 return -EINVAL;
1101 }
1102
1103 /* Wait for firmware load to finish. */
1104 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1105 if (status != DMUB_STATUS_OK)
1106 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1107
1108 /* Init DMCU and ABM if available. */
1109 if (dmcu && abm) {
1110 dmcu->funcs->dmcu_init(dmcu);
1111 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1112 }
1113
051b7887
RL
1114 if (!adev->dm.dc->ctx->dmub_srv)
1115 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1116 if (!adev->dm.dc->ctx->dmub_srv) {
1117 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1118 return -ENOMEM;
1119 }
1120
743b9786
NK
1121 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1122 adev->dm.dmcub_fw_version);
1123
1124 return 0;
1125}
1126
a3fe0e33 1127#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1128static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1129{
c0fb85ae
YZ
1130 uint64_t pt_base;
1131 uint32_t logical_addr_low;
1132 uint32_t logical_addr_high;
1133 uint32_t agp_base, agp_bot, agp_top;
1134 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1135
a0f884f5
NK
1136 memset(pa_config, 0, sizeof(*pa_config));
1137
c0fb85ae
YZ
1138 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1139 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1140
c0fb85ae
YZ
1141 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1142 /*
1143 * Raven2 has a HW issue that it is unable to use the vram which
1144 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1145 * workaround that increase system aperture high address (add 1)
1146 * to get rid of the VM fault and hardware hang.
1147 */
1148 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1149 else
1150 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1151
c0fb85ae
YZ
1152 agp_base = 0;
1153 agp_bot = adev->gmc.agp_start >> 24;
1154 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1155
c44a22b3 1156
c0fb85ae
YZ
1157 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1158 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1159 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1160 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1161 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1162 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1163
c0fb85ae
YZ
1164 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1165 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1166
1167 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1168 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1169 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1170
1171 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1172 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1173 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1174
1175 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1176 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1177 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1178
1179 pa_config->is_hvm_enabled = 0;
c44a22b3 1180
c44a22b3 1181}
e6cd859d 1182#endif
ea3b4242 1183#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1184static void vblank_control_worker(struct work_struct *work)
ea3b4242 1185{
09a5df6c
NK
1186 struct vblank_control_work *vblank_work =
1187 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1188 struct amdgpu_display_manager *dm = vblank_work->dm;
1189
1190 mutex_lock(&dm->dc_lock);
1191
1192 if (vblank_work->enable)
1193 dm->active_vblank_irq_count++;
5af50b0b 1194 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1195 dm->active_vblank_irq_count--;
1196
2cbcb78c 1197 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1198
4711c033 1199 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1200
58aa1c50
NK
1201 /* Control PSR based on vblank requirements from OS */
1202 if (vblank_work->stream && vblank_work->stream->link) {
1203 if (vblank_work->enable) {
1204 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1205 amdgpu_dm_psr_disable(vblank_work->stream);
1206 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1207 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1208 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1209 amdgpu_dm_psr_enable(vblank_work->stream);
1210 }
1211 }
1212
ea3b4242 1213 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1214
1215 dc_stream_release(vblank_work->stream);
1216
09a5df6c 1217 kfree(vblank_work);
ea3b4242
QZ
1218}
1219
ea3b4242 1220#endif
8e794421
WL
1221
1222static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1223{
1224 struct hpd_rx_irq_offload_work *offload_work;
1225 struct amdgpu_dm_connector *aconnector;
1226 struct dc_link *dc_link;
1227 struct amdgpu_device *adev;
1228 enum dc_connection_type new_connection_type = dc_connection_none;
1229 unsigned long flags;
1230
1231 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1232 aconnector = offload_work->offload_wq->aconnector;
1233
1234 if (!aconnector) {
1235 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1236 goto skip;
1237 }
1238
1239 adev = drm_to_adev(aconnector->base.dev);
1240 dc_link = aconnector->dc_link;
1241
1242 mutex_lock(&aconnector->hpd_lock);
1243 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1244 DRM_ERROR("KMS: Failed to detect connector\n");
1245 mutex_unlock(&aconnector->hpd_lock);
1246
1247 if (new_connection_type == dc_connection_none)
1248 goto skip;
1249
1250 if (amdgpu_in_reset(adev))
1251 goto skip;
1252
1253 mutex_lock(&adev->dm.dc_lock);
1254 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1255 dc_link_dp_handle_automated_test(dc_link);
1256 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1257 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1258 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1259 dc_link_dp_handle_link_loss(dc_link);
1260 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1261 offload_work->offload_wq->is_handling_link_loss = false;
1262 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1263 }
1264 mutex_unlock(&adev->dm.dc_lock);
1265
1266skip:
1267 kfree(offload_work);
1268
1269}
1270
1271static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1272{
1273 int max_caps = dc->caps.max_links;
1274 int i = 0;
1275 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1276
1277 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1278
1279 if (!hpd_rx_offload_wq)
1280 return NULL;
1281
1282
1283 for (i = 0; i < max_caps; i++) {
1284 hpd_rx_offload_wq[i].wq =
1285 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1286
1287 if (hpd_rx_offload_wq[i].wq == NULL) {
1288 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1289 return NULL;
1290 }
1291
1292 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1293 }
1294
1295 return hpd_rx_offload_wq;
1296}
1297
7578ecda 1298static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1299{
1300 struct dc_init_data init_data;
52704fca
BL
1301#ifdef CONFIG_DRM_AMD_DC_HDCP
1302 struct dc_callback_init init_params;
1303#endif
743b9786 1304 int r;
52704fca 1305
4a580877 1306 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1307 adev->dm.adev = adev;
1308
4562236b
HW
1309 /* Zero all the fields */
1310 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1311#ifdef CONFIG_DRM_AMD_DC_HDCP
1312 memset(&init_params, 0, sizeof(init_params));
1313#endif
4562236b 1314
674e78ac 1315 mutex_init(&adev->dm.dc_lock);
6ce8f316 1316 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1317#if defined(CONFIG_DRM_AMD_DC_DCN)
1318 spin_lock_init(&adev->dm.vblank_lock);
1319#endif
674e78ac 1320
4562236b
HW
1321 if(amdgpu_dm_irq_init(adev)) {
1322 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1323 goto error;
1324 }
1325
1326 init_data.asic_id.chip_family = adev->family;
1327
2dc31ca1 1328 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1329 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1330 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1331
770d13b1 1332 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1333 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1334 init_data.asic_id.atombios_base_address =
1335 adev->mode_info.atom_context->bios;
1336
1337 init_data.driver = adev;
1338
1339 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1340
1341 if (!adev->dm.cgs_device) {
1342 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1343 goto error;
1344 }
1345
1346 init_data.cgs_device = adev->dm.cgs_device;
1347
4562236b
HW
1348 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1349
60fb100b
AD
1350 switch (adev->asic_type) {
1351 case CHIP_CARRIZO:
1352 case CHIP_STONEY:
1ebcaebd
NK
1353 init_data.flags.gpu_vm_support = true;
1354 break;
60fb100b 1355 default:
1d789535 1356 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1357 case IP_VERSION(2, 1, 0):
1358 init_data.flags.gpu_vm_support = true;
1359 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 init_data.flags.disable_dmcu = true;
1361 break;
559f591d
AD
1362 case IP_VERSION(1, 0, 0):
1363 case IP_VERSION(1, 0, 1):
c08182f2
AD
1364 case IP_VERSION(3, 0, 1):
1365 case IP_VERSION(3, 1, 2):
1366 case IP_VERSION(3, 1, 3):
1367 init_data.flags.gpu_vm_support = true;
1368 break;
1369 case IP_VERSION(2, 0, 3):
1370 init_data.flags.disable_dmcu = true;
1371 break;
1372 default:
1373 break;
1374 }
60fb100b
AD
1375 break;
1376 }
6e227308 1377
04b94af4
AD
1378 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1379 init_data.flags.fbc_support = true;
1380
d99f38ae
AD
1381 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1382 init_data.flags.multi_mon_pp_mclk_switch = true;
1383
eaf56410
LL
1384 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1385 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1386
1387 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1388 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1389
27eaa492 1390 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1391
0dd79532 1392 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1393 /* Display Core create. */
1394 adev->dm.dc = dc_create(&init_data);
1395
423788c7 1396 if (adev->dm.dc) {
76121231 1397 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1398 } else {
76121231 1399 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1400 goto error;
1401 }
4562236b 1402
8a791dab
HW
1403 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1404 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1405 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1406 }
1407
f99d8762
HW
1408 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1409 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1410
8a791dab
HW
1411 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1412 adev->dm.dc->debug.disable_stutter = true;
1413
1414 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1415 adev->dm.dc->debug.disable_dsc = true;
1416
1417 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1418 adev->dm.dc->debug.disable_clock_gate = true;
1419
743b9786
NK
1420 r = dm_dmub_hw_init(adev);
1421 if (r) {
1422 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1423 goto error;
1424 }
1425
bb6785c1
NK
1426 dc_hardware_init(adev->dm.dc);
1427
8e794421
WL
1428 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1429 if (!adev->dm.hpd_rx_offload_wq) {
1430 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1431 goto error;
1432 }
1433
0b08c54b 1434#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1435 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1436 struct dc_phy_addr_space_config pa_config;
1437
0b08c54b 1438 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1439
0b08c54b
YZ
1440 // Call the DC init_memory func
1441 dc_setup_system_context(adev->dm.dc, &pa_config);
1442 }
1443#endif
c0fb85ae 1444
4562236b
HW
1445 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1446 if (!adev->dm.freesync_module) {
1447 DRM_ERROR(
1448 "amdgpu: failed to initialize freesync_module.\n");
1449 } else
f1ad2f5e 1450 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1451 adev->dm.freesync_module);
1452
e277adc5
LSL
1453 amdgpu_dm_init_color_mod();
1454
ea3b4242
QZ
1455#if defined(CONFIG_DRM_AMD_DC_DCN)
1456 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1457 adev->dm.vblank_control_workqueue =
1458 create_singlethread_workqueue("dm_vblank_control_workqueue");
1459 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1460 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1461 }
1462#endif
1463
52704fca 1464#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1465 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1466 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1467
96a3b32e
BL
1468 if (!adev->dm.hdcp_workqueue)
1469 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1470 else
1471 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1472
96a3b32e
BL
1473 dc_init_callbacks(adev->dm.dc, &init_params);
1474 }
9a65df19
WL
1475#endif
1476#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1477 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1478#endif
81927e28
JS
1479 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1480 init_completion(&adev->dm.dmub_aux_transfer_done);
1481 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1482 if (!adev->dm.dmub_notify) {
1483 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1484 goto error;
1485 }
e27c41d5
JS
1486
1487 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1488 if (!adev->dm.delayed_hpd_wq) {
1489 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1490 goto error;
1491 }
1492
81927e28 1493 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1494#if defined(CONFIG_DRM_AMD_DC_DCN)
1495 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1496 dmub_aux_setconfig_callback, false)) {
1497 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1498 goto error;
1499 }
1500 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1501 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1502 goto error;
1503 }
1504#endif
81927e28
JS
1505 }
1506
4562236b
HW
1507 if (amdgpu_dm_initialize_drm_device(adev)) {
1508 DRM_ERROR(
1509 "amdgpu: failed to initialize sw for display support.\n");
1510 goto error;
1511 }
1512
f74367e4
AD
1513 /* create fake encoders for MST */
1514 dm_dp_create_fake_mst_encoders(adev);
1515
4562236b
HW
1516 /* TODO: Add_display_info? */
1517
1518 /* TODO use dynamic cursor width */
4a580877
LT
1519 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1520 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1521
4a580877 1522 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1523 DRM_ERROR(
1524 "amdgpu: failed to initialize sw for display support.\n");
1525 goto error;
1526 }
1527
c0fb85ae 1528
f1ad2f5e 1529 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1530
1531 return 0;
1532error:
1533 amdgpu_dm_fini(adev);
1534
59d0f396 1535 return -EINVAL;
4562236b
HW
1536}
1537
e9669fb7
AG
1538static int amdgpu_dm_early_fini(void *handle)
1539{
1540 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1541
1542 amdgpu_dm_audio_fini(adev);
1543
1544 return 0;
1545}
1546
7578ecda 1547static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1548{
f74367e4
AD
1549 int i;
1550
09a5df6c
NK
1551#if defined(CONFIG_DRM_AMD_DC_DCN)
1552 if (adev->dm.vblank_control_workqueue) {
1553 destroy_workqueue(adev->dm.vblank_control_workqueue);
1554 adev->dm.vblank_control_workqueue = NULL;
1555 }
1556#endif
1557
f74367e4
AD
1558 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1559 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1560 }
1561
4562236b 1562 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1563
9a65df19
WL
1564#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1565 if (adev->dm.crc_rd_wrk) {
1566 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1567 kfree(adev->dm.crc_rd_wrk);
1568 adev->dm.crc_rd_wrk = NULL;
1569 }
1570#endif
52704fca
BL
1571#ifdef CONFIG_DRM_AMD_DC_HDCP
1572 if (adev->dm.hdcp_workqueue) {
e96b1b29 1573 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1574 adev->dm.hdcp_workqueue = NULL;
1575 }
1576
1577 if (adev->dm.dc)
1578 dc_deinit_callbacks(adev->dm.dc);
1579#endif
51ba6912 1580
3beac533 1581 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1582
81927e28
JS
1583 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1584 kfree(adev->dm.dmub_notify);
1585 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1586 destroy_workqueue(adev->dm.delayed_hpd_wq);
1587 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1588 }
1589
743b9786
NK
1590 if (adev->dm.dmub_bo)
1591 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1592 &adev->dm.dmub_bo_gpu_addr,
1593 &adev->dm.dmub_bo_cpu_addr);
52704fca 1594
006c26a0
AG
1595 if (adev->dm.hpd_rx_offload_wq) {
1596 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1597 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1598 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1599 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1600 }
1601 }
1602
1603 kfree(adev->dm.hpd_rx_offload_wq);
1604 adev->dm.hpd_rx_offload_wq = NULL;
1605 }
1606
c8bdf2b6
ED
1607 /* DC Destroy TODO: Replace destroy DAL */
1608 if (adev->dm.dc)
1609 dc_destroy(&adev->dm.dc);
4562236b
HW
1610 /*
1611 * TODO: pageflip, vlank interrupt
1612 *
1613 * amdgpu_dm_irq_fini(adev);
1614 */
1615
1616 if (adev->dm.cgs_device) {
1617 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1618 adev->dm.cgs_device = NULL;
1619 }
1620 if (adev->dm.freesync_module) {
1621 mod_freesync_destroy(adev->dm.freesync_module);
1622 adev->dm.freesync_module = NULL;
1623 }
674e78ac 1624
6ce8f316 1625 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1626 mutex_destroy(&adev->dm.dc_lock);
1627
4562236b
HW
1628 return;
1629}
1630
a94d5569 1631static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1632{
a7669aff 1633 const char *fw_name_dmcu = NULL;
a94d5569
DF
1634 int r;
1635 const struct dmcu_firmware_header_v1_0 *hdr;
1636
1637 switch(adev->asic_type) {
55e56389
MR
1638#if defined(CONFIG_DRM_AMD_DC_SI)
1639 case CHIP_TAHITI:
1640 case CHIP_PITCAIRN:
1641 case CHIP_VERDE:
1642 case CHIP_OLAND:
1643#endif
a94d5569
DF
1644 case CHIP_BONAIRE:
1645 case CHIP_HAWAII:
1646 case CHIP_KAVERI:
1647 case CHIP_KABINI:
1648 case CHIP_MULLINS:
1649 case CHIP_TONGA:
1650 case CHIP_FIJI:
1651 case CHIP_CARRIZO:
1652 case CHIP_STONEY:
1653 case CHIP_POLARIS11:
1654 case CHIP_POLARIS10:
1655 case CHIP_POLARIS12:
1656 case CHIP_VEGAM:
1657 case CHIP_VEGA10:
1658 case CHIP_VEGA12:
1659 case CHIP_VEGA20:
1660 return 0;
5ea23931
RL
1661 case CHIP_NAVI12:
1662 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1663 break;
a94d5569 1664 case CHIP_RAVEN:
a7669aff
HW
1665 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1666 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1667 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1668 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1669 else
a7669aff 1670 return 0;
a94d5569
DF
1671 break;
1672 default:
1d789535 1673 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1674 case IP_VERSION(2, 0, 2):
1675 case IP_VERSION(2, 0, 3):
1676 case IP_VERSION(2, 0, 0):
1677 case IP_VERSION(2, 1, 0):
1678 case IP_VERSION(3, 0, 0):
1679 case IP_VERSION(3, 0, 2):
1680 case IP_VERSION(3, 0, 3):
1681 case IP_VERSION(3, 0, 1):
1682 case IP_VERSION(3, 1, 2):
1683 case IP_VERSION(3, 1, 3):
1684 return 0;
1685 default:
1686 break;
1687 }
a94d5569 1688 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1689 return -EINVAL;
a94d5569
DF
1690 }
1691
1692 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1693 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1694 return 0;
1695 }
1696
1697 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1698 if (r == -ENOENT) {
1699 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1700 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1701 adev->dm.fw_dmcu = NULL;
1702 return 0;
1703 }
1704 if (r) {
1705 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1706 fw_name_dmcu);
1707 return r;
1708 }
1709
1710 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1711 if (r) {
1712 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1713 fw_name_dmcu);
1714 release_firmware(adev->dm.fw_dmcu);
1715 adev->dm.fw_dmcu = NULL;
1716 return r;
1717 }
1718
1719 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1720 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1721 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1722 adev->firmware.fw_size +=
1723 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1724
1725 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1726 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1727 adev->firmware.fw_size +=
1728 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1729
ee6e89c0
DF
1730 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1731
a94d5569
DF
1732 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1733
4562236b
HW
1734 return 0;
1735}
1736
743b9786
NK
1737static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1738{
1739 struct amdgpu_device *adev = ctx;
1740
1741 return dm_read_reg(adev->dm.dc->ctx, address);
1742}
1743
1744static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1745 uint32_t value)
1746{
1747 struct amdgpu_device *adev = ctx;
1748
1749 return dm_write_reg(adev->dm.dc->ctx, address, value);
1750}
1751
1752static int dm_dmub_sw_init(struct amdgpu_device *adev)
1753{
1754 struct dmub_srv_create_params create_params;
8c7aea40
NK
1755 struct dmub_srv_region_params region_params;
1756 struct dmub_srv_region_info region_info;
1757 struct dmub_srv_fb_params fb_params;
1758 struct dmub_srv_fb_info *fb_info;
1759 struct dmub_srv *dmub_srv;
743b9786
NK
1760 const struct dmcub_firmware_header_v1_0 *hdr;
1761 const char *fw_name_dmub;
1762 enum dmub_asic dmub_asic;
1763 enum dmub_status status;
1764 int r;
1765
1d789535 1766 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1767 case IP_VERSION(2, 1, 0):
743b9786
NK
1768 dmub_asic = DMUB_ASIC_DCN21;
1769 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1770 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1771 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1772 break;
c08182f2 1773 case IP_VERSION(3, 0, 0):
1d789535 1774 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1775 dmub_asic = DMUB_ASIC_DCN30;
1776 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1777 } else {
1778 dmub_asic = DMUB_ASIC_DCN30;
1779 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1780 }
79037324 1781 break;
c08182f2 1782 case IP_VERSION(3, 0, 1):
469989ca
RL
1783 dmub_asic = DMUB_ASIC_DCN301;
1784 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1785 break;
c08182f2 1786 case IP_VERSION(3, 0, 2):
2a411205
BL
1787 dmub_asic = DMUB_ASIC_DCN302;
1788 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1789 break;
c08182f2 1790 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1791 dmub_asic = DMUB_ASIC_DCN303;
1792 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1793 break;
c08182f2
AD
1794 case IP_VERSION(3, 1, 2):
1795 case IP_VERSION(3, 1, 3):
1ebcaebd
NK
1796 dmub_asic = DMUB_ASIC_DCN31;
1797 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1798 break;
743b9786
NK
1799
1800 default:
1801 /* ASIC doesn't support DMUB. */
1802 return 0;
1803 }
1804
743b9786
NK
1805 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1806 if (r) {
1807 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1808 return 0;
1809 }
1810
1811 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1812 if (r) {
1813 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1814 return 0;
1815 }
1816
743b9786 1817 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1818 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1819
9a6ed547
NK
1820 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1821 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1822 AMDGPU_UCODE_ID_DMCUB;
1823 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1824 adev->dm.dmub_fw;
1825 adev->firmware.fw_size +=
1826 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1827
9a6ed547
NK
1828 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1829 adev->dm.dmcub_fw_version);
1830 }
1831
743b9786 1832
8c7aea40
NK
1833 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1834 dmub_srv = adev->dm.dmub_srv;
1835
1836 if (!dmub_srv) {
1837 DRM_ERROR("Failed to allocate DMUB service!\n");
1838 return -ENOMEM;
1839 }
1840
1841 memset(&create_params, 0, sizeof(create_params));
1842 create_params.user_ctx = adev;
1843 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1844 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1845 create_params.asic = dmub_asic;
1846
1847 /* Create the DMUB service. */
1848 status = dmub_srv_create(dmub_srv, &create_params);
1849 if (status != DMUB_STATUS_OK) {
1850 DRM_ERROR("Error creating DMUB service: %d\n", status);
1851 return -EINVAL;
1852 }
1853
1854 /* Calculate the size of all the regions for the DMUB service. */
1855 memset(&region_params, 0, sizeof(region_params));
1856
1857 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1858 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1859 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1860 region_params.vbios_size = adev->bios_size;
0922b899 1861 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1862 adev->dm.dmub_fw->data +
1863 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1864 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1865 region_params.fw_inst_const =
1866 adev->dm.dmub_fw->data +
1867 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1868 PSP_HEADER_BYTES;
8c7aea40
NK
1869
1870 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1871 &region_info);
1872
1873 if (status != DMUB_STATUS_OK) {
1874 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1875 return -EINVAL;
1876 }
1877
1878 /*
1879 * Allocate a framebuffer based on the total size of all the regions.
1880 * TODO: Move this into GART.
1881 */
1882 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1883 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1884 &adev->dm.dmub_bo_gpu_addr,
1885 &adev->dm.dmub_bo_cpu_addr);
1886 if (r)
1887 return r;
1888
1889 /* Rebase the regions on the framebuffer address. */
1890 memset(&fb_params, 0, sizeof(fb_params));
1891 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1892 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1893 fb_params.region_info = &region_info;
1894
1895 adev->dm.dmub_fb_info =
1896 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1897 fb_info = adev->dm.dmub_fb_info;
1898
1899 if (!fb_info) {
1900 DRM_ERROR(
1901 "Failed to allocate framebuffer info for DMUB service!\n");
1902 return -ENOMEM;
1903 }
1904
1905 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1906 if (status != DMUB_STATUS_OK) {
1907 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1908 return -EINVAL;
1909 }
1910
743b9786
NK
1911 return 0;
1912}
1913
a94d5569
DF
1914static int dm_sw_init(void *handle)
1915{
1916 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1917 int r;
1918
1919 r = dm_dmub_sw_init(adev);
1920 if (r)
1921 return r;
a94d5569
DF
1922
1923 return load_dmcu_fw(adev);
1924}
1925
4562236b
HW
1926static int dm_sw_fini(void *handle)
1927{
a94d5569
DF
1928 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1929
8c7aea40
NK
1930 kfree(adev->dm.dmub_fb_info);
1931 adev->dm.dmub_fb_info = NULL;
1932
743b9786
NK
1933 if (adev->dm.dmub_srv) {
1934 dmub_srv_destroy(adev->dm.dmub_srv);
1935 adev->dm.dmub_srv = NULL;
1936 }
1937
75e1658e
ND
1938 release_firmware(adev->dm.dmub_fw);
1939 adev->dm.dmub_fw = NULL;
743b9786 1940
75e1658e
ND
1941 release_firmware(adev->dm.fw_dmcu);
1942 adev->dm.fw_dmcu = NULL;
a94d5569 1943
4562236b
HW
1944 return 0;
1945}
1946
7abcf6b5 1947static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1948{
c84dec2f 1949 struct amdgpu_dm_connector *aconnector;
4562236b 1950 struct drm_connector *connector;
f8d2d39e 1951 struct drm_connector_list_iter iter;
7abcf6b5 1952 int ret = 0;
4562236b 1953
f8d2d39e
LP
1954 drm_connector_list_iter_begin(dev, &iter);
1955 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1956 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1957 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1958 aconnector->mst_mgr.aux) {
f1ad2f5e 1959 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1960 aconnector,
1961 aconnector->base.base.id);
7abcf6b5
AG
1962
1963 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1964 if (ret < 0) {
1965 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1966 aconnector->dc_link->type =
1967 dc_connection_single;
1968 break;
7abcf6b5 1969 }
f8d2d39e 1970 }
4562236b 1971 }
f8d2d39e 1972 drm_connector_list_iter_end(&iter);
4562236b 1973
7abcf6b5
AG
1974 return ret;
1975}
1976
1977static int dm_late_init(void *handle)
1978{
42e67c3b 1979 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1980
bbf854dc
DF
1981 struct dmcu_iram_parameters params;
1982 unsigned int linear_lut[16];
1983 int i;
17bdb4a8 1984 struct dmcu *dmcu = NULL;
bbf854dc 1985
17bdb4a8
JFZ
1986 dmcu = adev->dm.dc->res_pool->dmcu;
1987
bbf854dc
DF
1988 for (i = 0; i < 16; i++)
1989 linear_lut[i] = 0xFFFF * i / 15;
1990
1991 params.set = 0;
75068994 1992 params.backlight_ramping_override = false;
bbf854dc
DF
1993 params.backlight_ramping_start = 0xCCCC;
1994 params.backlight_ramping_reduction = 0xCCCCCCCC;
1995 params.backlight_lut_array_size = 16;
1996 params.backlight_lut_array = linear_lut;
1997
2ad0cdf9
AK
1998 /* Min backlight level after ABM reduction, Don't allow below 1%
1999 * 0xFFFF x 0.01 = 0x28F
2000 */
2001 params.min_abm_backlight = 0x28F;
5cb32419 2002 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2003 * dmcu object will be null.
2004 * ABM 2.4 and up are implemented on dmcub.
2005 */
2006 if (dmcu) {
2007 if (!dmcu_load_iram(dmcu, params))
2008 return -EINVAL;
2009 } else if (adev->dm.dc->ctx->dmub_srv) {
2010 struct dc_link *edp_links[MAX_NUM_EDP];
2011 int edp_num;
bbf854dc 2012
6e568e43
JW
2013 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2014 for (i = 0; i < edp_num; i++) {
2015 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2016 return -EINVAL;
2017 }
2018 }
bbf854dc 2019
4a580877 2020 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2021}
2022
2023static void s3_handle_mst(struct drm_device *dev, bool suspend)
2024{
c84dec2f 2025 struct amdgpu_dm_connector *aconnector;
4562236b 2026 struct drm_connector *connector;
f8d2d39e 2027 struct drm_connector_list_iter iter;
fe7553be
LP
2028 struct drm_dp_mst_topology_mgr *mgr;
2029 int ret;
2030 bool need_hotplug = false;
4562236b 2031
f8d2d39e
LP
2032 drm_connector_list_iter_begin(dev, &iter);
2033 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2034 aconnector = to_amdgpu_dm_connector(connector);
2035 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2036 aconnector->mst_port)
2037 continue;
2038
2039 mgr = &aconnector->mst_mgr;
2040
2041 if (suspend) {
2042 drm_dp_mst_topology_mgr_suspend(mgr);
2043 } else {
6f85f738 2044 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2045 if (ret < 0) {
2046 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2047 need_hotplug = true;
2048 }
2049 }
4562236b 2050 }
f8d2d39e 2051 drm_connector_list_iter_end(&iter);
fe7553be
LP
2052
2053 if (need_hotplug)
2054 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2055}
2056
9340dfd3
HW
2057static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2058{
2059 struct smu_context *smu = &adev->smu;
2060 int ret = 0;
2061
2062 if (!is_support_sw_smu(adev))
2063 return 0;
2064
2065 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2066 * on window driver dc implementation.
2067 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2068 * should be passed to smu during boot up and resume from s3.
2069 * boot up: dc calculate dcn watermark clock settings within dc_create,
2070 * dcn20_resource_construct
2071 * then call pplib functions below to pass the settings to smu:
2072 * smu_set_watermarks_for_clock_ranges
2073 * smu_set_watermarks_table
2074 * navi10_set_watermarks_table
2075 * smu_write_watermarks_table
2076 *
2077 * For Renoir, clock settings of dcn watermark are also fixed values.
2078 * dc has implemented different flow for window driver:
2079 * dc_hardware_init / dc_set_power_state
2080 * dcn10_init_hw
2081 * notify_wm_ranges
2082 * set_wm_ranges
2083 * -- Linux
2084 * smu_set_watermarks_for_clock_ranges
2085 * renoir_set_watermarks_table
2086 * smu_write_watermarks_table
2087 *
2088 * For Linux,
2089 * dc_hardware_init -> amdgpu_dm_init
2090 * dc_set_power_state --> dm_resume
2091 *
2092 * therefore, this function apply to navi10/12/14 but not Renoir
2093 * *
2094 */
1d789535 2095 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2096 case IP_VERSION(2, 0, 2):
2097 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2098 break;
2099 default:
2100 return 0;
2101 }
2102
e7a95eea
EQ
2103 ret = smu_write_watermarks_table(smu);
2104 if (ret) {
2105 DRM_ERROR("Failed to update WMTABLE!\n");
2106 return ret;
9340dfd3
HW
2107 }
2108
9340dfd3
HW
2109 return 0;
2110}
2111
b8592b48
LL
2112/**
2113 * dm_hw_init() - Initialize DC device
28d687ea 2114 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2115 *
2116 * Initialize the &struct amdgpu_display_manager device. This involves calling
2117 * the initializers of each DM component, then populating the struct with them.
2118 *
2119 * Although the function implies hardware initialization, both hardware and
2120 * software are initialized here. Splitting them out to their relevant init
2121 * hooks is a future TODO item.
2122 *
2123 * Some notable things that are initialized here:
2124 *
2125 * - Display Core, both software and hardware
2126 * - DC modules that we need (freesync and color management)
2127 * - DRM software states
2128 * - Interrupt sources and handlers
2129 * - Vblank support
2130 * - Debug FS entries, if enabled
2131 */
4562236b
HW
2132static int dm_hw_init(void *handle)
2133{
2134 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2135 /* Create DAL display manager */
2136 amdgpu_dm_init(adev);
4562236b
HW
2137 amdgpu_dm_hpd_init(adev);
2138
4562236b
HW
2139 return 0;
2140}
2141
b8592b48
LL
2142/**
2143 * dm_hw_fini() - Teardown DC device
28d687ea 2144 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2145 *
2146 * Teardown components within &struct amdgpu_display_manager that require
2147 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2148 * were loaded. Also flush IRQ workqueues and disable them.
2149 */
4562236b
HW
2150static int dm_hw_fini(void *handle)
2151{
2152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2153
2154 amdgpu_dm_hpd_fini(adev);
2155
2156 amdgpu_dm_irq_fini(adev);
21de3396 2157 amdgpu_dm_fini(adev);
4562236b
HW
2158 return 0;
2159}
2160
cdaae837
BL
2161
2162static int dm_enable_vblank(struct drm_crtc *crtc);
2163static void dm_disable_vblank(struct drm_crtc *crtc);
2164
2165static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2166 struct dc_state *state, bool enable)
2167{
2168 enum dc_irq_source irq_source;
2169 struct amdgpu_crtc *acrtc;
2170 int rc = -EBUSY;
2171 int i = 0;
2172
2173 for (i = 0; i < state->stream_count; i++) {
2174 acrtc = get_crtc_by_otg_inst(
2175 adev, state->stream_status[i].primary_otg_inst);
2176
2177 if (acrtc && state->stream_status[i].plane_count != 0) {
2178 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2179 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2180 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2181 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2182 if (rc)
2183 DRM_WARN("Failed to %s pflip interrupts\n",
2184 enable ? "enable" : "disable");
2185
2186 if (enable) {
2187 rc = dm_enable_vblank(&acrtc->base);
2188 if (rc)
2189 DRM_WARN("Failed to enable vblank interrupts\n");
2190 } else {
2191 dm_disable_vblank(&acrtc->base);
2192 }
2193
2194 }
2195 }
2196
2197}
2198
dfd84d90 2199static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2200{
2201 struct dc_state *context = NULL;
2202 enum dc_status res = DC_ERROR_UNEXPECTED;
2203 int i;
2204 struct dc_stream_state *del_streams[MAX_PIPES];
2205 int del_streams_count = 0;
2206
2207 memset(del_streams, 0, sizeof(del_streams));
2208
2209 context = dc_create_state(dc);
2210 if (context == NULL)
2211 goto context_alloc_fail;
2212
2213 dc_resource_state_copy_construct_current(dc, context);
2214
2215 /* First remove from context all streams */
2216 for (i = 0; i < context->stream_count; i++) {
2217 struct dc_stream_state *stream = context->streams[i];
2218
2219 del_streams[del_streams_count++] = stream;
2220 }
2221
2222 /* Remove all planes for removed streams and then remove the streams */
2223 for (i = 0; i < del_streams_count; i++) {
2224 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2225 res = DC_FAIL_DETACH_SURFACES;
2226 goto fail;
2227 }
2228
2229 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2230 if (res != DC_OK)
2231 goto fail;
2232 }
2233
2234
2235 res = dc_validate_global_state(dc, context, false);
2236
2237 if (res != DC_OK) {
2238 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2239 goto fail;
2240 }
2241
2242 res = dc_commit_state(dc, context);
2243
2244fail:
2245 dc_release_state(context);
2246
2247context_alloc_fail:
2248 return res;
2249}
2250
8e794421
WL
2251static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2252{
2253 int i;
2254
2255 if (dm->hpd_rx_offload_wq) {
2256 for (i = 0; i < dm->dc->caps.max_links; i++)
2257 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2258 }
2259}
2260
4562236b
HW
2261static int dm_suspend(void *handle)
2262{
2263 struct amdgpu_device *adev = handle;
2264 struct amdgpu_display_manager *dm = &adev->dm;
2265 int ret = 0;
4562236b 2266
53b3f8f4 2267 if (amdgpu_in_reset(adev)) {
cdaae837 2268 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2269
2270#if defined(CONFIG_DRM_AMD_DC_DCN)
2271 dc_allow_idle_optimizations(adev->dm.dc, false);
2272#endif
2273
cdaae837
BL
2274 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2275
2276 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2277
2278 amdgpu_dm_commit_zero_streams(dm->dc);
2279
2280 amdgpu_dm_irq_suspend(adev);
2281
8e794421
WL
2282 hpd_rx_irq_work_suspend(dm);
2283
cdaae837
BL
2284 return ret;
2285 }
4562236b 2286
d2f0b53b 2287 WARN_ON(adev->dm.cached_state);
4a580877 2288 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2289
4a580877 2290 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2291
4562236b
HW
2292 amdgpu_dm_irq_suspend(adev);
2293
8e794421
WL
2294 hpd_rx_irq_work_suspend(dm);
2295
32f5062d 2296 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2297
1c2075d4 2298 return 0;
4562236b
HW
2299}
2300
1daf8c63
AD
2301static struct amdgpu_dm_connector *
2302amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2303 struct drm_crtc *crtc)
4562236b
HW
2304{
2305 uint32_t i;
c2cea706 2306 struct drm_connector_state *new_con_state;
4562236b
HW
2307 struct drm_connector *connector;
2308 struct drm_crtc *crtc_from_state;
2309
c2cea706
LSL
2310 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2311 crtc_from_state = new_con_state->crtc;
4562236b
HW
2312
2313 if (crtc_from_state == crtc)
c84dec2f 2314 return to_amdgpu_dm_connector(connector);
4562236b
HW
2315 }
2316
2317 return NULL;
2318}
2319
fbbdadf2
BL
2320static void emulated_link_detect(struct dc_link *link)
2321{
2322 struct dc_sink_init_data sink_init_data = { 0 };
2323 struct display_sink_capability sink_caps = { 0 };
2324 enum dc_edid_status edid_status;
2325 struct dc_context *dc_ctx = link->ctx;
2326 struct dc_sink *sink = NULL;
2327 struct dc_sink *prev_sink = NULL;
2328
2329 link->type = dc_connection_none;
2330 prev_sink = link->local_sink;
2331
30164a16
VL
2332 if (prev_sink)
2333 dc_sink_release(prev_sink);
fbbdadf2
BL
2334
2335 switch (link->connector_signal) {
2336 case SIGNAL_TYPE_HDMI_TYPE_A: {
2337 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2338 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2339 break;
2340 }
2341
2342 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2343 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2344 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2345 break;
2346 }
2347
2348 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2349 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2350 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2351 break;
2352 }
2353
2354 case SIGNAL_TYPE_LVDS: {
2355 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2356 sink_caps.signal = SIGNAL_TYPE_LVDS;
2357 break;
2358 }
2359
2360 case SIGNAL_TYPE_EDP: {
2361 sink_caps.transaction_type =
2362 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2363 sink_caps.signal = SIGNAL_TYPE_EDP;
2364 break;
2365 }
2366
2367 case SIGNAL_TYPE_DISPLAY_PORT: {
2368 sink_caps.transaction_type =
2369 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2370 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2371 break;
2372 }
2373
2374 default:
2375 DC_ERROR("Invalid connector type! signal:%d\n",
2376 link->connector_signal);
2377 return;
2378 }
2379
2380 sink_init_data.link = link;
2381 sink_init_data.sink_signal = sink_caps.signal;
2382
2383 sink = dc_sink_create(&sink_init_data);
2384 if (!sink) {
2385 DC_ERROR("Failed to create sink!\n");
2386 return;
2387 }
2388
dcd5fb82 2389 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2390 link->local_sink = sink;
2391
2392 edid_status = dm_helpers_read_local_edid(
2393 link->ctx,
2394 link,
2395 sink);
2396
2397 if (edid_status != EDID_OK)
2398 DC_ERROR("Failed to read EDID");
2399
2400}
2401
cdaae837
BL
2402static void dm_gpureset_commit_state(struct dc_state *dc_state,
2403 struct amdgpu_display_manager *dm)
2404{
2405 struct {
2406 struct dc_surface_update surface_updates[MAX_SURFACES];
2407 struct dc_plane_info plane_infos[MAX_SURFACES];
2408 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2409 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2410 struct dc_stream_update stream_update;
2411 } * bundle;
2412 int k, m;
2413
2414 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2415
2416 if (!bundle) {
2417 dm_error("Failed to allocate update bundle\n");
2418 goto cleanup;
2419 }
2420
2421 for (k = 0; k < dc_state->stream_count; k++) {
2422 bundle->stream_update.stream = dc_state->streams[k];
2423
2424 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2425 bundle->surface_updates[m].surface =
2426 dc_state->stream_status->plane_states[m];
2427 bundle->surface_updates[m].surface->force_full_update =
2428 true;
2429 }
2430 dc_commit_updates_for_stream(
2431 dm->dc, bundle->surface_updates,
2432 dc_state->stream_status->plane_count,
efc8278e 2433 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2434 }
2435
2436cleanup:
2437 kfree(bundle);
2438
2439 return;
2440}
2441
035f5496 2442static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2443{
2444 struct dc_stream_state *stream_state;
2445 struct amdgpu_dm_connector *aconnector = link->priv;
2446 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2447 struct dc_stream_update stream_update;
2448 bool dpms_off = true;
2449
2450 memset(&stream_update, 0, sizeof(stream_update));
2451 stream_update.dpms_off = &dpms_off;
2452
2453 mutex_lock(&adev->dm.dc_lock);
2454 stream_state = dc_stream_find_from_link(link);
2455
2456 if (stream_state == NULL) {
2457 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2458 mutex_unlock(&adev->dm.dc_lock);
2459 return;
2460 }
2461
2462 stream_update.stream = stream_state;
035f5496 2463 acrtc_state->force_dpms_off = true;
3c4d55c9 2464 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2465 stream_state, &stream_update,
2466 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2467 mutex_unlock(&adev->dm.dc_lock);
2468}
2469
4562236b
HW
2470static int dm_resume(void *handle)
2471{
2472 struct amdgpu_device *adev = handle;
4a580877 2473 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2474 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2475 struct amdgpu_dm_connector *aconnector;
4562236b 2476 struct drm_connector *connector;
f8d2d39e 2477 struct drm_connector_list_iter iter;
4562236b 2478 struct drm_crtc *crtc;
c2cea706 2479 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2480 struct dm_crtc_state *dm_new_crtc_state;
2481 struct drm_plane *plane;
2482 struct drm_plane_state *new_plane_state;
2483 struct dm_plane_state *dm_new_plane_state;
113b7a01 2484 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2485 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2486 struct dc_state *dc_state;
2487 int i, r, j;
4562236b 2488
53b3f8f4 2489 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2490 dc_state = dm->cached_dc_state;
2491
2492 r = dm_dmub_hw_init(adev);
2493 if (r)
2494 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2495
2496 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2497 dc_resume(dm->dc);
2498
2499 amdgpu_dm_irq_resume_early(adev);
2500
2501 for (i = 0; i < dc_state->stream_count; i++) {
2502 dc_state->streams[i]->mode_changed = true;
2503 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2504 dc_state->stream_status->plane_states[j]->update_flags.raw
2505 = 0xffffffff;
2506 }
2507 }
8fe44c08 2508#if defined(CONFIG_DRM_AMD_DC_DCN)
1ebcaebd
NK
2509 /*
2510 * Resource allocation happens for link encoders for newer ASIC in
2511 * dc_validate_global_state, so we need to revalidate it.
2512 *
2513 * This shouldn't fail (it passed once before), so warn if it does.
2514 */
2515 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2516#endif
cdaae837
BL
2517
2518 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2519
cdaae837
BL
2520 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2521
2522 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2523
2524 dc_release_state(dm->cached_dc_state);
2525 dm->cached_dc_state = NULL;
2526
2527 amdgpu_dm_irq_resume_late(adev);
2528
2529 mutex_unlock(&dm->dc_lock);
2530
2531 return 0;
2532 }
113b7a01
LL
2533 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2534 dc_release_state(dm_state->context);
2535 dm_state->context = dc_create_state(dm->dc);
2536 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2537 dc_resource_state_construct(dm->dc, dm_state->context);
2538
8c7aea40
NK
2539 /* Before powering on DC we need to re-initialize DMUB. */
2540 r = dm_dmub_hw_init(adev);
2541 if (r)
2542 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2543
a80aa93d
ML
2544 /* power on hardware */
2545 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2546
4562236b
HW
2547 /* program HPD filter */
2548 dc_resume(dm->dc);
2549
4562236b
HW
2550 /*
2551 * early enable HPD Rx IRQ, should be done before set mode as short
2552 * pulse interrupts are used for MST
2553 */
2554 amdgpu_dm_irq_resume_early(adev);
2555
d20ebea8 2556 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2557 s3_handle_mst(ddev, false);
2558
4562236b 2559 /* Do detection*/
f8d2d39e
LP
2560 drm_connector_list_iter_begin(ddev, &iter);
2561 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2562 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2563
2564 /*
2565 * this is the case when traversing through already created
2566 * MST connectors, should be skipped
2567 */
2568 if (aconnector->mst_port)
2569 continue;
2570
03ea364c 2571 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2572 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2573 DRM_ERROR("KMS: Failed to detect connector\n");
2574
2575 if (aconnector->base.force && new_connection_type == dc_connection_none)
2576 emulated_link_detect(aconnector->dc_link);
2577 else
2578 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2579
2580 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2581 aconnector->fake_enable = false;
2582
dcd5fb82
MF
2583 if (aconnector->dc_sink)
2584 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2585 aconnector->dc_sink = NULL;
2586 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2587 mutex_unlock(&aconnector->hpd_lock);
4562236b 2588 }
f8d2d39e 2589 drm_connector_list_iter_end(&iter);
4562236b 2590
1f6010a9 2591 /* Force mode set in atomic commit */
a80aa93d 2592 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2593 new_crtc_state->active_changed = true;
4f346e65 2594
fcb4019e
LSL
2595 /*
2596 * atomic_check is expected to create the dc states. We need to release
2597 * them here, since they were duplicated as part of the suspend
2598 * procedure.
2599 */
a80aa93d 2600 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2601 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2602 if (dm_new_crtc_state->stream) {
2603 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2604 dc_stream_release(dm_new_crtc_state->stream);
2605 dm_new_crtc_state->stream = NULL;
2606 }
2607 }
2608
a80aa93d 2609 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2610 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2611 if (dm_new_plane_state->dc_state) {
2612 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2613 dc_plane_state_release(dm_new_plane_state->dc_state);
2614 dm_new_plane_state->dc_state = NULL;
2615 }
2616 }
2617
2d1af6a1 2618 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2619
a80aa93d 2620 dm->cached_state = NULL;
0a214e2f 2621
9faa4237 2622 amdgpu_dm_irq_resume_late(adev);
4562236b 2623
9340dfd3
HW
2624 amdgpu_dm_smu_write_watermarks_table(adev);
2625
2d1af6a1 2626 return 0;
4562236b
HW
2627}
2628
b8592b48
LL
2629/**
2630 * DOC: DM Lifecycle
2631 *
2632 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2633 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2634 * the base driver's device list to be initialized and torn down accordingly.
2635 *
2636 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2637 */
2638
4562236b
HW
2639static const struct amd_ip_funcs amdgpu_dm_funcs = {
2640 .name = "dm",
2641 .early_init = dm_early_init,
7abcf6b5 2642 .late_init = dm_late_init,
4562236b
HW
2643 .sw_init = dm_sw_init,
2644 .sw_fini = dm_sw_fini,
e9669fb7 2645 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2646 .hw_init = dm_hw_init,
2647 .hw_fini = dm_hw_fini,
2648 .suspend = dm_suspend,
2649 .resume = dm_resume,
2650 .is_idle = dm_is_idle,
2651 .wait_for_idle = dm_wait_for_idle,
2652 .check_soft_reset = dm_check_soft_reset,
2653 .soft_reset = dm_soft_reset,
2654 .set_clockgating_state = dm_set_clockgating_state,
2655 .set_powergating_state = dm_set_powergating_state,
2656};
2657
2658const struct amdgpu_ip_block_version dm_ip_block =
2659{
2660 .type = AMD_IP_BLOCK_TYPE_DCE,
2661 .major = 1,
2662 .minor = 0,
2663 .rev = 0,
2664 .funcs = &amdgpu_dm_funcs,
2665};
2666
ca3268c4 2667
b8592b48
LL
2668/**
2669 * DOC: atomic
2670 *
2671 * *WIP*
2672 */
0a323b84 2673
b3663f70 2674static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2675 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2676 .get_format_info = amd_get_format_info,
366c1baa 2677 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2678 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2679 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2680};
2681
2682static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2683 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2684};
2685
94562810
RS
2686static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2687{
2688 u32 max_cll, min_cll, max, min, q, r;
2689 struct amdgpu_dm_backlight_caps *caps;
2690 struct amdgpu_display_manager *dm;
2691 struct drm_connector *conn_base;
2692 struct amdgpu_device *adev;
ec11fe37 2693 struct dc_link *link = NULL;
94562810
RS
2694 static const u8 pre_computed_values[] = {
2695 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2696 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2697 int i;
94562810
RS
2698
2699 if (!aconnector || !aconnector->dc_link)
2700 return;
2701
ec11fe37 2702 link = aconnector->dc_link;
2703 if (link->connector_signal != SIGNAL_TYPE_EDP)
2704 return;
2705
94562810 2706 conn_base = &aconnector->base;
1348969a 2707 adev = drm_to_adev(conn_base->dev);
94562810 2708 dm = &adev->dm;
7fd13bae
AD
2709 for (i = 0; i < dm->num_of_edps; i++) {
2710 if (link == dm->backlight_link[i])
2711 break;
2712 }
2713 if (i >= dm->num_of_edps)
2714 return;
2715 caps = &dm->backlight_caps[i];
94562810
RS
2716 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2717 caps->aux_support = false;
2718 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2719 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2720
d0ae0b64 2721 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2722 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2723 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2724 caps->aux_support = true;
2725
7a46f05e
TI
2726 if (amdgpu_backlight == 0)
2727 caps->aux_support = false;
2728 else if (amdgpu_backlight == 1)
2729 caps->aux_support = true;
2730
94562810
RS
2731 /* From the specification (CTA-861-G), for calculating the maximum
2732 * luminance we need to use:
2733 * Luminance = 50*2**(CV/32)
2734 * Where CV is a one-byte value.
2735 * For calculating this expression we may need float point precision;
2736 * to avoid this complexity level, we take advantage that CV is divided
2737 * by a constant. From the Euclids division algorithm, we know that CV
2738 * can be written as: CV = 32*q + r. Next, we replace CV in the
2739 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2740 * need to pre-compute the value of r/32. For pre-computing the values
2741 * We just used the following Ruby line:
2742 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2743 * The results of the above expressions can be verified at
2744 * pre_computed_values.
2745 */
2746 q = max_cll >> 5;
2747 r = max_cll % 32;
2748 max = (1 << q) * pre_computed_values[r];
2749
2750 // min luminance: maxLum * (CV/255)^2 / 100
2751 q = DIV_ROUND_CLOSEST(min_cll, 255);
2752 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2753
2754 caps->aux_max_input_signal = max;
2755 caps->aux_min_input_signal = min;
2756}
2757
97e51c16
HW
2758void amdgpu_dm_update_connector_after_detect(
2759 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2760{
2761 struct drm_connector *connector = &aconnector->base;
2762 struct drm_device *dev = connector->dev;
b73a22d3 2763 struct dc_sink *sink;
4562236b
HW
2764
2765 /* MST handled by drm_mst framework */
2766 if (aconnector->mst_mgr.mst_state == true)
2767 return;
2768
4562236b 2769 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2770 if (sink)
2771 dc_sink_retain(sink);
4562236b 2772
1f6010a9
DF
2773 /*
2774 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2775 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2776 * Skip if already done during boot.
4562236b
HW
2777 */
2778 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2779 && aconnector->dc_em_sink) {
2780
1f6010a9
DF
2781 /*
2782 * For S3 resume with headless use eml_sink to fake stream
2783 * because on resume connector->sink is set to NULL
4562236b
HW
2784 */
2785 mutex_lock(&dev->mode_config.mutex);
2786
2787 if (sink) {
922aa1e1 2788 if (aconnector->dc_sink) {
98e6436d 2789 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2790 /*
2791 * retain and release below are used to
2792 * bump up refcount for sink because the link doesn't point
2793 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2794 * reshuffle by UMD we will get into unwanted dc_sink release
2795 */
dcd5fb82 2796 dc_sink_release(aconnector->dc_sink);
922aa1e1 2797 }
4562236b 2798 aconnector->dc_sink = sink;
dcd5fb82 2799 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2800 amdgpu_dm_update_freesync_caps(connector,
2801 aconnector->edid);
4562236b 2802 } else {
98e6436d 2803 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2804 if (!aconnector->dc_sink) {
4562236b 2805 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2806 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2807 }
4562236b
HW
2808 }
2809
2810 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2811
2812 if (sink)
2813 dc_sink_release(sink);
4562236b
HW
2814 return;
2815 }
2816
2817 /*
2818 * TODO: temporary guard to look for proper fix
2819 * if this sink is MST sink, we should not do anything
2820 */
dcd5fb82
MF
2821 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2822 dc_sink_release(sink);
4562236b 2823 return;
dcd5fb82 2824 }
4562236b
HW
2825
2826 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2827 /*
2828 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2829 * Do nothing!!
2830 */
f1ad2f5e 2831 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2832 aconnector->connector_id);
dcd5fb82
MF
2833 if (sink)
2834 dc_sink_release(sink);
4562236b
HW
2835 return;
2836 }
2837
f1ad2f5e 2838 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2839 aconnector->connector_id, aconnector->dc_sink, sink);
2840
2841 mutex_lock(&dev->mode_config.mutex);
2842
1f6010a9
DF
2843 /*
2844 * 1. Update status of the drm connector
2845 * 2. Send an event and let userspace tell us what to do
2846 */
4562236b 2847 if (sink) {
1f6010a9
DF
2848 /*
2849 * TODO: check if we still need the S3 mode update workaround.
2850 * If yes, put it here.
2851 */
c64b0d6b 2852 if (aconnector->dc_sink) {
98e6436d 2853 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2854 dc_sink_release(aconnector->dc_sink);
2855 }
4562236b
HW
2856
2857 aconnector->dc_sink = sink;
dcd5fb82 2858 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2859 if (sink->dc_edid.length == 0) {
4562236b 2860 aconnector->edid = NULL;
e6142dd5
AP
2861 if (aconnector->dc_link->aux_mode) {
2862 drm_dp_cec_unset_edid(
2863 &aconnector->dm_dp_aux.aux);
2864 }
900b3cb1 2865 } else {
4562236b 2866 aconnector->edid =
e6142dd5 2867 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2868
c555f023 2869 drm_connector_update_edid_property(connector,
e6142dd5 2870 aconnector->edid);
e6142dd5
AP
2871 if (aconnector->dc_link->aux_mode)
2872 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2873 aconnector->edid);
4562236b 2874 }
e6142dd5 2875
98e6436d 2876 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2877 update_connector_ext_caps(aconnector);
4562236b 2878 } else {
e86e8947 2879 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2880 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2881 drm_connector_update_edid_property(connector, NULL);
4562236b 2882 aconnector->num_modes = 0;
dcd5fb82 2883 dc_sink_release(aconnector->dc_sink);
4562236b 2884 aconnector->dc_sink = NULL;
5326c452 2885 aconnector->edid = NULL;
0c8620d6
BL
2886#ifdef CONFIG_DRM_AMD_DC_HDCP
2887 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2888 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2889 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2890#endif
4562236b
HW
2891 }
2892
2893 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2894
0f877894
OV
2895 update_subconnector_property(aconnector);
2896
dcd5fb82
MF
2897 if (sink)
2898 dc_sink_release(sink);
4562236b
HW
2899}
2900
e27c41d5 2901static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2902{
4562236b
HW
2903 struct drm_connector *connector = &aconnector->base;
2904 struct drm_device *dev = connector->dev;
fbbdadf2 2905 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2906 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2907 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2908 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2909
b972b4f9
HW
2910 if (adev->dm.disable_hpd_irq)
2911 return;
2912
035f5496
AP
2913 if (dm_con_state->base.state && dm_con_state->base.crtc)
2914 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2915 dm_con_state->base.state,
2916 dm_con_state->base.crtc));
1f6010a9
DF
2917 /*
2918 * In case of failure or MST no need to update connector status or notify the OS
2919 * since (for MST case) MST does this in its own context.
4562236b
HW
2920 */
2921 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2922
0c8620d6 2923#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2924 if (adev->dm.hdcp_workqueue) {
96a3b32e 2925 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2926 dm_con_state->update_hdcp = true;
2927 }
0c8620d6 2928#endif
2e0ac3d6
HW
2929 if (aconnector->fake_enable)
2930 aconnector->fake_enable = false;
2931
fbbdadf2
BL
2932 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2933 DRM_ERROR("KMS: Failed to detect connector\n");
2934
2935 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2936 emulated_link_detect(aconnector->dc_link);
2937
fbbdadf2
BL
2938 drm_modeset_lock_all(dev);
2939 dm_restore_drm_connector_state(dev, connector);
2940 drm_modeset_unlock_all(dev);
2941
2942 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2943 drm_kms_helper_hotplug_event(dev);
2944
2945 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 2946 if (new_connection_type == dc_connection_none &&
035f5496
AP
2947 aconnector->dc_link->type == dc_connection_none &&
2948 dm_crtc_state)
2949 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 2950
3c4d55c9 2951 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2952
2953 drm_modeset_lock_all(dev);
2954 dm_restore_drm_connector_state(dev, connector);
2955 drm_modeset_unlock_all(dev);
2956
2957 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2958 drm_kms_helper_hotplug_event(dev);
2959 }
2960 mutex_unlock(&aconnector->hpd_lock);
2961
2962}
2963
e27c41d5
JS
2964static void handle_hpd_irq(void *param)
2965{
2966 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2967
2968 handle_hpd_irq_helper(aconnector);
2969
2970}
2971
8e794421 2972static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2973{
2974 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2975 uint8_t dret;
2976 bool new_irq_handled = false;
2977 int dpcd_addr;
2978 int dpcd_bytes_to_read;
2979
2980 const int max_process_count = 30;
2981 int process_count = 0;
2982
2983 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2984
2985 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2986 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2987 /* DPCD 0x200 - 0x201 for downstream IRQ */
2988 dpcd_addr = DP_SINK_COUNT;
2989 } else {
2990 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2991 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2992 dpcd_addr = DP_SINK_COUNT_ESI;
2993 }
2994
2995 dret = drm_dp_dpcd_read(
2996 &aconnector->dm_dp_aux.aux,
2997 dpcd_addr,
2998 esi,
2999 dpcd_bytes_to_read);
3000
3001 while (dret == dpcd_bytes_to_read &&
3002 process_count < max_process_count) {
3003 uint8_t retry;
3004 dret = 0;
3005
3006 process_count++;
3007
f1ad2f5e 3008 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3009 /* handle HPD short pulse irq */
3010 if (aconnector->mst_mgr.mst_state)
3011 drm_dp_mst_hpd_irq(
3012 &aconnector->mst_mgr,
3013 esi,
3014 &new_irq_handled);
4562236b
HW
3015
3016 if (new_irq_handled) {
3017 /* ACK at DPCD to notify down stream */
3018 const int ack_dpcd_bytes_to_write =
3019 dpcd_bytes_to_read - 1;
3020
3021 for (retry = 0; retry < 3; retry++) {
3022 uint8_t wret;
3023
3024 wret = drm_dp_dpcd_write(
3025 &aconnector->dm_dp_aux.aux,
3026 dpcd_addr + 1,
3027 &esi[1],
3028 ack_dpcd_bytes_to_write);
3029 if (wret == ack_dpcd_bytes_to_write)
3030 break;
3031 }
3032
1f6010a9 3033 /* check if there is new irq to be handled */
4562236b
HW
3034 dret = drm_dp_dpcd_read(
3035 &aconnector->dm_dp_aux.aux,
3036 dpcd_addr,
3037 esi,
3038 dpcd_bytes_to_read);
3039
3040 new_irq_handled = false;
d4a6e8a9 3041 } else {
4562236b 3042 break;
d4a6e8a9 3043 }
4562236b
HW
3044 }
3045
3046 if (process_count == max_process_count)
f1ad2f5e 3047 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3048}
3049
8e794421
WL
3050static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3051 union hpd_irq_data hpd_irq_data)
3052{
3053 struct hpd_rx_irq_offload_work *offload_work =
3054 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3055
3056 if (!offload_work) {
3057 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3058 return;
3059 }
3060
3061 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3062 offload_work->data = hpd_irq_data;
3063 offload_work->offload_wq = offload_wq;
3064
3065 queue_work(offload_wq->wq, &offload_work->work);
3066 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3067}
3068
4562236b
HW
3069static void handle_hpd_rx_irq(void *param)
3070{
c84dec2f 3071 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3072 struct drm_connector *connector = &aconnector->base;
3073 struct drm_device *dev = connector->dev;
53cbf65c 3074 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3075 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3076 bool result = false;
fbbdadf2 3077 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3078 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3079 union hpd_irq_data hpd_irq_data;
8e794421
WL
3080 bool link_loss = false;
3081 bool has_left_work = false;
3082 int idx = aconnector->base.index;
3083 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3084
3085 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3086
b972b4f9
HW
3087 if (adev->dm.disable_hpd_irq)
3088 return;
3089
1f6010a9
DF
3090 /*
3091 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3092 * conflict, after implement i2c helper, this mutex should be
3093 * retired.
3094 */
b86e7eef 3095 mutex_lock(&aconnector->hpd_lock);
4562236b 3096
8e794421
WL
3097 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3098 &link_loss, true, &has_left_work);
3083a984 3099
8e794421
WL
3100 if (!has_left_work)
3101 goto out;
3102
3103 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3104 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3105 goto out;
3106 }
3107
3108 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3109 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3110 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3111 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3112 goto out;
3113 }
3083a984 3114
8e794421
WL
3115 if (link_loss) {
3116 bool skip = false;
d2aa1356 3117
8e794421
WL
3118 spin_lock(&offload_wq->offload_lock);
3119 skip = offload_wq->is_handling_link_loss;
3120
3121 if (!skip)
3122 offload_wq->is_handling_link_loss = true;
3123
3124 spin_unlock(&offload_wq->offload_lock);
3125
3126 if (!skip)
3127 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3128
3129 goto out;
3130 }
3131 }
c8ea79a8 3132
3083a984 3133out:
c8ea79a8 3134 if (result && !is_mst_root_connector) {
4562236b 3135 /* Downstream Port status changed. */
fbbdadf2
BL
3136 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3137 DRM_ERROR("KMS: Failed to detect connector\n");
3138
3139 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3140 emulated_link_detect(dc_link);
3141
3142 if (aconnector->fake_enable)
3143 aconnector->fake_enable = false;
3144
3145 amdgpu_dm_update_connector_after_detect(aconnector);
3146
3147
3148 drm_modeset_lock_all(dev);
3149 dm_restore_drm_connector_state(dev, connector);
3150 drm_modeset_unlock_all(dev);
3151
3152 drm_kms_helper_hotplug_event(dev);
3153 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3154
3155 if (aconnector->fake_enable)
3156 aconnector->fake_enable = false;
3157
4562236b
HW
3158 amdgpu_dm_update_connector_after_detect(aconnector);
3159
3160
3161 drm_modeset_lock_all(dev);
3162 dm_restore_drm_connector_state(dev, connector);
3163 drm_modeset_unlock_all(dev);
3164
3165 drm_kms_helper_hotplug_event(dev);
3166 }
3167 }
2a0f9270 3168#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3169 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3170 if (adev->dm.hdcp_workqueue)
3171 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3172 }
2a0f9270 3173#endif
4562236b 3174
b86e7eef 3175 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3176 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3177
3178 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3179}
3180
3181static void register_hpd_handlers(struct amdgpu_device *adev)
3182{
4a580877 3183 struct drm_device *dev = adev_to_drm(adev);
4562236b 3184 struct drm_connector *connector;
c84dec2f 3185 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3186 const struct dc_link *dc_link;
3187 struct dc_interrupt_params int_params = {0};
3188
3189 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3190 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3191
3192 list_for_each_entry(connector,
3193 &dev->mode_config.connector_list, head) {
3194
c84dec2f 3195 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3196 dc_link = aconnector->dc_link;
3197
3198 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3199 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3200 int_params.irq_source = dc_link->irq_source_hpd;
3201
3202 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3203 handle_hpd_irq,
3204 (void *) aconnector);
3205 }
3206
3207 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3208
3209 /* Also register for DP short pulse (hpd_rx). */
3210 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3211 int_params.irq_source = dc_link->irq_source_hpd_rx;
3212
3213 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3214 handle_hpd_rx_irq,
3215 (void *) aconnector);
8e794421
WL
3216
3217 if (adev->dm.hpd_rx_offload_wq)
3218 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3219 aconnector;
4562236b
HW
3220 }
3221 }
3222}
3223
55e56389
MR
3224#if defined(CONFIG_DRM_AMD_DC_SI)
3225/* Register IRQ sources and initialize IRQ callbacks */
3226static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3227{
3228 struct dc *dc = adev->dm.dc;
3229 struct common_irq_params *c_irq_params;
3230 struct dc_interrupt_params int_params = {0};
3231 int r;
3232 int i;
3233 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3234
3235 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3236 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3237
3238 /*
3239 * Actions of amdgpu_irq_add_id():
3240 * 1. Register a set() function with base driver.
3241 * Base driver will call set() function to enable/disable an
3242 * interrupt in DC hardware.
3243 * 2. Register amdgpu_dm_irq_handler().
3244 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3245 * coming from DC hardware.
3246 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3247 * for acknowledging and handling. */
3248
3249 /* Use VBLANK interrupt */
3250 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3251 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3252 if (r) {
3253 DRM_ERROR("Failed to add crtc irq id!\n");
3254 return r;
3255 }
3256
3257 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3258 int_params.irq_source =
3259 dc_interrupt_to_irq_source(dc, i+1 , 0);
3260
3261 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3262
3263 c_irq_params->adev = adev;
3264 c_irq_params->irq_src = int_params.irq_source;
3265
3266 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3267 dm_crtc_high_irq, c_irq_params);
3268 }
3269
3270 /* Use GRPH_PFLIP interrupt */
3271 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3272 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3273 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3274 if (r) {
3275 DRM_ERROR("Failed to add page flip irq id!\n");
3276 return r;
3277 }
3278
3279 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3280 int_params.irq_source =
3281 dc_interrupt_to_irq_source(dc, i, 0);
3282
3283 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3284
3285 c_irq_params->adev = adev;
3286 c_irq_params->irq_src = int_params.irq_source;
3287
3288 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3289 dm_pflip_high_irq, c_irq_params);
3290
3291 }
3292
3293 /* HPD */
3294 r = amdgpu_irq_add_id(adev, client_id,
3295 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3296 if (r) {
3297 DRM_ERROR("Failed to add hpd irq id!\n");
3298 return r;
3299 }
3300
3301 register_hpd_handlers(adev);
3302
3303 return 0;
3304}
3305#endif
3306
4562236b
HW
3307/* Register IRQ sources and initialize IRQ callbacks */
3308static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3309{
3310 struct dc *dc = adev->dm.dc;
3311 struct common_irq_params *c_irq_params;
3312 struct dc_interrupt_params int_params = {0};
3313 int r;
3314 int i;
1ffdeca6 3315 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3316
c08182f2 3317 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3318 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3319
3320 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3321 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3322
1f6010a9
DF
3323 /*
3324 * Actions of amdgpu_irq_add_id():
4562236b
HW
3325 * 1. Register a set() function with base driver.
3326 * Base driver will call set() function to enable/disable an
3327 * interrupt in DC hardware.
3328 * 2. Register amdgpu_dm_irq_handler().
3329 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3330 * coming from DC hardware.
3331 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3332 * for acknowledging and handling. */
3333
b57de80a 3334 /* Use VBLANK interrupt */
e9029155 3335 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3336 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3337 if (r) {
3338 DRM_ERROR("Failed to add crtc irq id!\n");
3339 return r;
3340 }
3341
3342 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3343 int_params.irq_source =
3d761e79 3344 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3345
b57de80a 3346 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3347
3348 c_irq_params->adev = adev;
3349 c_irq_params->irq_src = int_params.irq_source;
3350
3351 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352 dm_crtc_high_irq, c_irq_params);
3353 }
3354
d2574c33
MK
3355 /* Use VUPDATE interrupt */
3356 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3357 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3358 if (r) {
3359 DRM_ERROR("Failed to add vupdate irq id!\n");
3360 return r;
3361 }
3362
3363 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364 int_params.irq_source =
3365 dc_interrupt_to_irq_source(dc, i, 0);
3366
3367 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3368
3369 c_irq_params->adev = adev;
3370 c_irq_params->irq_src = int_params.irq_source;
3371
3372 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373 dm_vupdate_high_irq, c_irq_params);
3374 }
3375
3d761e79 3376 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3377 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3378 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3379 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3380 if (r) {
3381 DRM_ERROR("Failed to add page flip irq id!\n");
3382 return r;
3383 }
3384
3385 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386 int_params.irq_source =
3387 dc_interrupt_to_irq_source(dc, i, 0);
3388
3389 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3390
3391 c_irq_params->adev = adev;
3392 c_irq_params->irq_src = int_params.irq_source;
3393
3394 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 dm_pflip_high_irq, c_irq_params);
3396
3397 }
3398
3399 /* HPD */
2c8ad2d5
AD
3400 r = amdgpu_irq_add_id(adev, client_id,
3401 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3402 if (r) {
3403 DRM_ERROR("Failed to add hpd irq id!\n");
3404 return r;
3405 }
3406
3407 register_hpd_handlers(adev);
3408
3409 return 0;
3410}
3411
b86a1aa3 3412#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3413/* Register IRQ sources and initialize IRQ callbacks */
3414static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3415{
3416 struct dc *dc = adev->dm.dc;
3417 struct common_irq_params *c_irq_params;
3418 struct dc_interrupt_params int_params = {0};
3419 int r;
3420 int i;
660d5406
WL
3421#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3422 static const unsigned int vrtl_int_srcid[] = {
3423 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3424 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3425 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3426 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3427 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3428 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3429 };
3430#endif
ff5ef992
AD
3431
3432 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3433 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3434
1f6010a9
DF
3435 /*
3436 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3437 * 1. Register a set() function with base driver.
3438 * Base driver will call set() function to enable/disable an
3439 * interrupt in DC hardware.
3440 * 2. Register amdgpu_dm_irq_handler().
3441 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3442 * coming from DC hardware.
3443 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3444 * for acknowledging and handling.
1f6010a9 3445 */
ff5ef992
AD
3446
3447 /* Use VSTARTUP interrupt */
3448 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3449 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3450 i++) {
3760f76c 3451 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3452
3453 if (r) {
3454 DRM_ERROR("Failed to add crtc irq id!\n");
3455 return r;
3456 }
3457
3458 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3459 int_params.irq_source =
3460 dc_interrupt_to_irq_source(dc, i, 0);
3461
3462 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3463
3464 c_irq_params->adev = adev;
3465 c_irq_params->irq_src = int_params.irq_source;
3466
2346ef47
NK
3467 amdgpu_dm_irq_register_interrupt(
3468 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3469 }
3470
86bc2219
WL
3471 /* Use otg vertical line interrupt */
3472#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3473 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3474 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3475 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3476
3477 if (r) {
3478 DRM_ERROR("Failed to add vline0 irq id!\n");
3479 return r;
3480 }
3481
3482 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3483 int_params.irq_source =
660d5406
WL
3484 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3485
3486 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3487 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3488 break;
3489 }
86bc2219
WL
3490
3491 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3492 - DC_IRQ_SOURCE_DC1_VLINE0];
3493
3494 c_irq_params->adev = adev;
3495 c_irq_params->irq_src = int_params.irq_source;
3496
3497 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3498 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3499 }
3500#endif
3501
2346ef47
NK
3502 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3503 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3504 * to trigger at end of each vblank, regardless of state of the lock,
3505 * matching DCE behaviour.
3506 */
3507 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3508 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3509 i++) {
3510 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3511
3512 if (r) {
3513 DRM_ERROR("Failed to add vupdate irq id!\n");
3514 return r;
3515 }
3516
3517 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3518 int_params.irq_source =
3519 dc_interrupt_to_irq_source(dc, i, 0);
3520
3521 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3522
3523 c_irq_params->adev = adev;
3524 c_irq_params->irq_src = int_params.irq_source;
3525
ff5ef992 3526 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3527 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3528 }
3529
ff5ef992
AD
3530 /* Use GRPH_PFLIP interrupt */
3531 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3532 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3533 i++) {
3760f76c 3534 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3535 if (r) {
3536 DRM_ERROR("Failed to add page flip irq id!\n");
3537 return r;
3538 }
3539
3540 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3541 int_params.irq_source =
3542 dc_interrupt_to_irq_source(dc, i, 0);
3543
3544 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3545
3546 c_irq_params->adev = adev;
3547 c_irq_params->irq_src = int_params.irq_source;
3548
3549 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3550 dm_pflip_high_irq, c_irq_params);
3551
3552 }
3553
81927e28
JS
3554 /* HPD */
3555 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3556 &adev->hpd_irq);
3557 if (r) {
3558 DRM_ERROR("Failed to add hpd irq id!\n");
3559 return r;
3560 }
a08f16cf 3561
81927e28 3562 register_hpd_handlers(adev);
a08f16cf 3563
81927e28
JS
3564 return 0;
3565}
3566/* Register Outbox IRQ sources and initialize IRQ callbacks */
3567static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3568{
3569 struct dc *dc = adev->dm.dc;
3570 struct common_irq_params *c_irq_params;
3571 struct dc_interrupt_params int_params = {0};
3572 int r, i;
3573
3574 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3575 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3576
3577 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3578 &adev->dmub_outbox_irq);
3579 if (r) {
3580 DRM_ERROR("Failed to add outbox irq id!\n");
3581 return r;
3582 }
3583
3584 if (dc->ctx->dmub_srv) {
3585 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3586 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3587 int_params.irq_source =
81927e28 3588 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3589
81927e28 3590 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3591
3592 c_irq_params->adev = adev;
3593 c_irq_params->irq_src = int_params.irq_source;
3594
3595 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3596 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3597 }
3598
ff5ef992
AD
3599 return 0;
3600}
3601#endif
3602
eb3dc897
NK
3603/*
3604 * Acquires the lock for the atomic state object and returns
3605 * the new atomic state.
3606 *
3607 * This should only be called during atomic check.
3608 */
3609static int dm_atomic_get_state(struct drm_atomic_state *state,
3610 struct dm_atomic_state **dm_state)
3611{
3612 struct drm_device *dev = state->dev;
1348969a 3613 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3614 struct amdgpu_display_manager *dm = &adev->dm;
3615 struct drm_private_state *priv_state;
eb3dc897
NK
3616
3617 if (*dm_state)
3618 return 0;
3619
eb3dc897
NK
3620 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3621 if (IS_ERR(priv_state))
3622 return PTR_ERR(priv_state);
3623
3624 *dm_state = to_dm_atomic_state(priv_state);
3625
3626 return 0;
3627}
3628
dfd84d90 3629static struct dm_atomic_state *
eb3dc897
NK
3630dm_atomic_get_new_state(struct drm_atomic_state *state)
3631{
3632 struct drm_device *dev = state->dev;
1348969a 3633 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3634 struct amdgpu_display_manager *dm = &adev->dm;
3635 struct drm_private_obj *obj;
3636 struct drm_private_state *new_obj_state;
3637 int i;
3638
3639 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3640 if (obj->funcs == dm->atomic_obj.funcs)
3641 return to_dm_atomic_state(new_obj_state);
3642 }
3643
3644 return NULL;
3645}
3646
eb3dc897
NK
3647static struct drm_private_state *
3648dm_atomic_duplicate_state(struct drm_private_obj *obj)
3649{
3650 struct dm_atomic_state *old_state, *new_state;
3651
3652 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3653 if (!new_state)
3654 return NULL;
3655
3656 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3657
813d20dc
AW
3658 old_state = to_dm_atomic_state(obj->state);
3659
3660 if (old_state && old_state->context)
3661 new_state->context = dc_copy_state(old_state->context);
3662
eb3dc897
NK
3663 if (!new_state->context) {
3664 kfree(new_state);
3665 return NULL;
3666 }
3667
eb3dc897
NK
3668 return &new_state->base;
3669}
3670
3671static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3672 struct drm_private_state *state)
3673{
3674 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3675
3676 if (dm_state && dm_state->context)
3677 dc_release_state(dm_state->context);
3678
3679 kfree(dm_state);
3680}
3681
3682static struct drm_private_state_funcs dm_atomic_state_funcs = {
3683 .atomic_duplicate_state = dm_atomic_duplicate_state,
3684 .atomic_destroy_state = dm_atomic_destroy_state,
3685};
3686
4562236b
HW
3687static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3688{
eb3dc897 3689 struct dm_atomic_state *state;
4562236b
HW
3690 int r;
3691
3692 adev->mode_info.mode_config_initialized = true;
3693
4a580877
LT
3694 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3695 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3696
4a580877
LT
3697 adev_to_drm(adev)->mode_config.max_width = 16384;
3698 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3699
4a580877
LT
3700 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3701 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3702 /* indicates support for immediate flip */
4a580877 3703 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3704
4a580877 3705 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3706
eb3dc897
NK
3707 state = kzalloc(sizeof(*state), GFP_KERNEL);
3708 if (!state)
3709 return -ENOMEM;
3710
813d20dc 3711 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3712 if (!state->context) {
3713 kfree(state);
3714 return -ENOMEM;
3715 }
3716
3717 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3718
4a580877 3719 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3720 &adev->dm.atomic_obj,
eb3dc897
NK
3721 &state->base,
3722 &dm_atomic_state_funcs);
3723
3dc9b1ce 3724 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3725 if (r) {
3726 dc_release_state(state->context);
3727 kfree(state);
4562236b 3728 return r;
b67a468a 3729 }
4562236b 3730
6ce8f316 3731 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3732 if (r) {
3733 dc_release_state(state->context);
3734 kfree(state);
6ce8f316 3735 return r;
b67a468a 3736 }
6ce8f316 3737
4562236b
HW
3738 return 0;
3739}
3740
206bbafe
DF
3741#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3742#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3743#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3744
4562236b
HW
3745#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3746 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3747
7fd13bae
AD
3748static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3749 int bl_idx)
206bbafe
DF
3750{
3751#if defined(CONFIG_ACPI)
3752 struct amdgpu_dm_backlight_caps caps;
3753
58965855
FS
3754 memset(&caps, 0, sizeof(caps));
3755
7fd13bae 3756 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3757 return;
3758
f9b7f370 3759 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3760 if (caps.caps_valid) {
7fd13bae 3761 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3762 if (caps.aux_support)
3763 return;
7fd13bae
AD
3764 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3765 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3766 } else {
7fd13bae 3767 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3768 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3769 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3770 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3771 }
3772#else
7fd13bae 3773 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3774 return;
3775
7fd13bae
AD
3776 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3777 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3778#endif
3779}
3780
69d9f427
AM
3781static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3782 unsigned *min, unsigned *max)
94562810 3783{
94562810 3784 if (!caps)
69d9f427 3785 return 0;
94562810 3786
69d9f427
AM
3787 if (caps->aux_support) {
3788 // Firmware limits are in nits, DC API wants millinits.
3789 *max = 1000 * caps->aux_max_input_signal;
3790 *min = 1000 * caps->aux_min_input_signal;
94562810 3791 } else {
69d9f427
AM
3792 // Firmware limits are 8-bit, PWM control is 16-bit.
3793 *max = 0x101 * caps->max_input_signal;
3794 *min = 0x101 * caps->min_input_signal;
94562810 3795 }
69d9f427
AM
3796 return 1;
3797}
94562810 3798
69d9f427
AM
3799static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3800 uint32_t brightness)
3801{
3802 unsigned min, max;
94562810 3803
69d9f427
AM
3804 if (!get_brightness_range(caps, &min, &max))
3805 return brightness;
3806
3807 // Rescale 0..255 to min..max
3808 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3809 AMDGPU_MAX_BL_LEVEL);
3810}
3811
3812static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3813 uint32_t brightness)
3814{
3815 unsigned min, max;
3816
3817 if (!get_brightness_range(caps, &min, &max))
3818 return brightness;
3819
3820 if (brightness < min)
3821 return 0;
3822 // Rescale min..max to 0..255
3823 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3824 max - min);
94562810
RS
3825}
3826
3d6c9164 3827static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3828 int bl_idx,
3d6c9164 3829 u32 user_brightness)
4562236b 3830{
206bbafe 3831 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3832 struct dc_link *link;
3833 u32 brightness;
94562810 3834 bool rc;
4562236b 3835
7fd13bae
AD
3836 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3837 caps = dm->backlight_caps[bl_idx];
94562810 3838
7fd13bae
AD
3839 dm->brightness[bl_idx] = user_brightness;
3840 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3841 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3842
3d6c9164 3843 /* Change brightness based on AUX property */
118b4627 3844 if (caps.aux_support) {
7fd13bae
AD
3845 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3846 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3847 if (!rc)
3848 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3849 } else {
7fd13bae
AD
3850 rc = dc_link_set_backlight_level(link, brightness, 0);
3851 if (!rc)
3852 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3853 }
94562810
RS
3854
3855 return rc ? 0 : 1;
4562236b
HW
3856}
3857
3d6c9164 3858static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3859{
620a0d27 3860 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3861 int i;
3d6c9164 3862
7fd13bae
AD
3863 for (i = 0; i < dm->num_of_edps; i++) {
3864 if (bd == dm->backlight_dev[i])
3865 break;
3866 }
3867 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3868 i = 0;
3869 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3870
3871 return 0;
3872}
3873
7fd13bae
AD
3874static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3875 int bl_idx)
3d6c9164 3876{
0ad3e64e 3877 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3878 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3879
7fd13bae
AD
3880 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3881 caps = dm->backlight_caps[bl_idx];
620a0d27 3882
0ad3e64e 3883 if (caps.aux_support) {
0ad3e64e
AD
3884 u32 avg, peak;
3885 bool rc;
3886
3887 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3888 if (!rc)
7fd13bae 3889 return dm->brightness[bl_idx];
0ad3e64e
AD
3890 return convert_brightness_to_user(&caps, avg);
3891 } else {
7fd13bae 3892 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3893
3894 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3895 return dm->brightness[bl_idx];
0ad3e64e
AD
3896 return convert_brightness_to_user(&caps, ret);
3897 }
4562236b
HW
3898}
3899
3d6c9164
AD
3900static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3901{
3902 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3903 int i;
3d6c9164 3904
7fd13bae
AD
3905 for (i = 0; i < dm->num_of_edps; i++) {
3906 if (bd == dm->backlight_dev[i])
3907 break;
3908 }
3909 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3910 i = 0;
3911 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3912}
3913
4562236b 3914static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3915 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3916 .get_brightness = amdgpu_dm_backlight_get_brightness,
3917 .update_status = amdgpu_dm_backlight_update_status,
3918};
3919
7578ecda
AD
3920static void
3921amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3922{
3923 char bl_name[16];
3924 struct backlight_properties props = { 0 };
3925
7fd13bae
AD
3926 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3927 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3928
4562236b 3929 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3930 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3931 props.type = BACKLIGHT_RAW;
3932
3933 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 3934 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 3935
7fd13bae
AD
3936 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3937 adev_to_drm(dm->adev)->dev,
3938 dm,
3939 &amdgpu_dm_backlight_ops,
3940 &props);
4562236b 3941
7fd13bae 3942 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
3943 DRM_ERROR("DM: Backlight registration failed!\n");
3944 else
f1ad2f5e 3945 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 3946}
4562236b
HW
3947#endif
3948
df534fff 3949static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3950 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3951 enum drm_plane_type plane_type,
3952 const struct dc_plane_cap *plane_cap)
df534fff 3953{
f180b4bc 3954 struct drm_plane *plane;
df534fff
S
3955 unsigned long possible_crtcs;
3956 int ret = 0;
3957
f180b4bc 3958 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3959 if (!plane) {
3960 DRM_ERROR("KMS: Failed to allocate plane\n");
3961 return -ENOMEM;
3962 }
b2fddb13 3963 plane->type = plane_type;
df534fff
S
3964
3965 /*
b2fddb13
NK
3966 * HACK: IGT tests expect that the primary plane for a CRTC
3967 * can only have one possible CRTC. Only expose support for
3968 * any CRTC if they're not going to be used as a primary plane
3969 * for a CRTC - like overlay or underlay planes.
df534fff
S
3970 */
3971 possible_crtcs = 1 << plane_id;
3972 if (plane_id >= dm->dc->caps.max_streams)
3973 possible_crtcs = 0xff;
3974
cc1fec57 3975 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3976
3977 if (ret) {
3978 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3979 kfree(plane);
df534fff
S
3980 return ret;
3981 }
3982
54087768
NK
3983 if (mode_info)
3984 mode_info->planes[plane_id] = plane;
3985
df534fff
S
3986 return ret;
3987}
3988
89fc8d4e
HW
3989
3990static void register_backlight_device(struct amdgpu_display_manager *dm,
3991 struct dc_link *link)
3992{
3993#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3994 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3995
3996 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3997 link->type != dc_connection_none) {
1f6010a9
DF
3998 /*
3999 * Event if registration failed, we should continue with
89fc8d4e
HW
4000 * DM initialization because not having a backlight control
4001 * is better then a black screen.
4002 */
7fd13bae 4003 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4004 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4005
7fd13bae 4006 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4007 dm->backlight_link[dm->num_of_edps] = link;
4008 dm->num_of_edps++;
4009 }
89fc8d4e
HW
4010 }
4011#endif
4012}
4013
4014
1f6010a9
DF
4015/*
4016 * In this architecture, the association
4562236b
HW
4017 * connector -> encoder -> crtc
4018 * id not really requried. The crtc and connector will hold the
4019 * display_index as an abstraction to use with DAL component
4020 *
4021 * Returns 0 on success
4022 */
7578ecda 4023static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4024{
4025 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4026 int32_t i;
c84dec2f 4027 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4028 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4029 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4030 uint32_t link_cnt;
cc1fec57 4031 int32_t primary_planes;
fbbdadf2 4032 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4033 const struct dc_plane_cap *plane;
4562236b 4034
d58159de
AD
4035 dm->display_indexes_num = dm->dc->caps.max_streams;
4036 /* Update the actual used number of crtc */
4037 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4038
4562236b 4039 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4040 if (amdgpu_dm_mode_config_init(dm->adev)) {
4041 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4042 return -EINVAL;
4562236b
HW
4043 }
4044
b2fddb13
NK
4045 /* There is one primary plane per CRTC */
4046 primary_planes = dm->dc->caps.max_streams;
54087768 4047 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4048
b2fddb13
NK
4049 /*
4050 * Initialize primary planes, implicit planes for legacy IOCTLS.
4051 * Order is reversed to match iteration order in atomic check.
4052 */
4053 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4054 plane = &dm->dc->caps.planes[i];
4055
b2fddb13 4056 if (initialize_plane(dm, mode_info, i,
cc1fec57 4057 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4058 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4059 goto fail;
d4e13b0d 4060 }
df534fff 4061 }
92f3ac40 4062
0d579c7e
NK
4063 /*
4064 * Initialize overlay planes, index starting after primary planes.
4065 * These planes have a higher DRM index than the primary planes since
4066 * they should be considered as having a higher z-order.
4067 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4068 *
4069 * Only support DCN for now, and only expose one so we don't encourage
4070 * userspace to use up all the pipes.
0d579c7e 4071 */
cc1fec57
NK
4072 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4073 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4074
4075 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4076 continue;
4077
4078 if (!plane->blends_with_above || !plane->blends_with_below)
4079 continue;
4080
ea36ad34 4081 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4082 continue;
4083
54087768 4084 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4085 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4086 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4087 goto fail;
d4e13b0d 4088 }
cc1fec57
NK
4089
4090 /* Only create one overlay plane. */
4091 break;
d4e13b0d 4092 }
4562236b 4093
d4e13b0d 4094 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4095 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4096 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4097 goto fail;
4562236b 4098 }
4562236b 4099
50610b74 4100#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4101 /* Use Outbox interrupt */
1d789535 4102 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4103 case IP_VERSION(3, 0, 0):
4104 case IP_VERSION(3, 1, 2):
4105 case IP_VERSION(3, 1, 3):
4106 case IP_VERSION(2, 1, 0):
81927e28
JS
4107 if (register_outbox_irq_handlers(dm->adev)) {
4108 DRM_ERROR("DM: Failed to initialize IRQ\n");
4109 goto fail;
4110 }
4111 break;
4112 default:
c08182f2 4113 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4114 adev->ip_versions[DCE_HWIP][0]);
81927e28 4115 }
50610b74 4116#endif
81927e28 4117
4562236b
HW
4118 /* loops over all connectors on the board */
4119 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4120 struct dc_link *link = NULL;
4562236b
HW
4121
4122 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4123 DRM_ERROR(
4124 "KMS: Cannot support more than %d display indexes\n",
4125 AMDGPU_DM_MAX_DISPLAY_INDEX);
4126 continue;
4127 }
4128
4129 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4130 if (!aconnector)
cd8a2ae8 4131 goto fail;
4562236b
HW
4132
4133 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4134 if (!aencoder)
cd8a2ae8 4135 goto fail;
4562236b
HW
4136
4137 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4138 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4139 goto fail;
4562236b
HW
4140 }
4141
4142 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4143 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4144 goto fail;
4562236b
HW
4145 }
4146
89fc8d4e
HW
4147 link = dc_get_link_at_index(dm->dc, i);
4148
fbbdadf2
BL
4149 if (!dc_link_detect_sink(link, &new_connection_type))
4150 DRM_ERROR("KMS: Failed to detect connector\n");
4151
4152 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4153 emulated_link_detect(link);
4154 amdgpu_dm_update_connector_after_detect(aconnector);
4155
4156 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4157 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4158 register_backlight_device(dm, link);
397a9bc5
RL
4159 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4160 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4161 }
4162
4163
4562236b
HW
4164 }
4165
4166 /* Software is initialized. Now we can register interrupt handlers. */
4167 switch (adev->asic_type) {
55e56389
MR
4168#if defined(CONFIG_DRM_AMD_DC_SI)
4169 case CHIP_TAHITI:
4170 case CHIP_PITCAIRN:
4171 case CHIP_VERDE:
4172 case CHIP_OLAND:
4173 if (dce60_register_irq_handlers(dm->adev)) {
4174 DRM_ERROR("DM: Failed to initialize IRQ\n");
4175 goto fail;
4176 }
4177 break;
4178#endif
4562236b
HW
4179 case CHIP_BONAIRE:
4180 case CHIP_HAWAII:
cd4b356f
AD
4181 case CHIP_KAVERI:
4182 case CHIP_KABINI:
4183 case CHIP_MULLINS:
4562236b
HW
4184 case CHIP_TONGA:
4185 case CHIP_FIJI:
4186 case CHIP_CARRIZO:
4187 case CHIP_STONEY:
4188 case CHIP_POLARIS11:
4189 case CHIP_POLARIS10:
b264d345 4190 case CHIP_POLARIS12:
7737de91 4191 case CHIP_VEGAM:
2c8ad2d5 4192 case CHIP_VEGA10:
2325ff30 4193 case CHIP_VEGA12:
1fe6bf2f 4194 case CHIP_VEGA20:
4562236b
HW
4195 if (dce110_register_irq_handlers(dm->adev)) {
4196 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4197 goto fail;
4562236b
HW
4198 }
4199 break;
4200 default:
c08182f2 4201#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4202 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4203 case IP_VERSION(1, 0, 0):
4204 case IP_VERSION(1, 0, 1):
c08182f2
AD
4205 case IP_VERSION(2, 0, 2):
4206 case IP_VERSION(2, 0, 3):
4207 case IP_VERSION(2, 0, 0):
4208 case IP_VERSION(2, 1, 0):
4209 case IP_VERSION(3, 0, 0):
4210 case IP_VERSION(3, 0, 2):
4211 case IP_VERSION(3, 0, 3):
4212 case IP_VERSION(3, 0, 1):
4213 case IP_VERSION(3, 1, 2):
4214 case IP_VERSION(3, 1, 3):
4215 if (dcn10_register_irq_handlers(dm->adev)) {
4216 DRM_ERROR("DM: Failed to initialize IRQ\n");
4217 goto fail;
4218 }
4219 break;
4220 default:
2cbc6f42 4221 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4222 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4223 goto fail;
c08182f2
AD
4224 }
4225#endif
2cbc6f42 4226 break;
4562236b
HW
4227 }
4228
4562236b 4229 return 0;
cd8a2ae8 4230fail:
4562236b 4231 kfree(aencoder);
4562236b 4232 kfree(aconnector);
54087768 4233
59d0f396 4234 return -EINVAL;
4562236b
HW
4235}
4236
7578ecda 4237static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4238{
eb3dc897 4239 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4240 return;
4241}
4242
4243/******************************************************************************
4244 * amdgpu_display_funcs functions
4245 *****************************************************************************/
4246
1f6010a9 4247/*
4562236b
HW
4248 * dm_bandwidth_update - program display watermarks
4249 *
4250 * @adev: amdgpu_device pointer
4251 *
4252 * Calculate and program the display watermarks and line buffer allocation.
4253 */
4254static void dm_bandwidth_update(struct amdgpu_device *adev)
4255{
49c07a99 4256 /* TODO: implement later */
4562236b
HW
4257}
4258
39cc5be2 4259static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4260 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4261 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4262 .backlight_set_level = NULL, /* never called for DC */
4263 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4264 .hpd_sense = NULL,/* called unconditionally */
4265 .hpd_set_polarity = NULL, /* called unconditionally */
4266 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4267 .page_flip_get_scanoutpos =
4268 dm_crtc_get_scanoutpos,/* called unconditionally */
4269 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4270 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4271};
4272
4273#if defined(CONFIG_DEBUG_KERNEL_DC)
4274
3ee6b26b
AD
4275static ssize_t s3_debug_store(struct device *device,
4276 struct device_attribute *attr,
4277 const char *buf,
4278 size_t count)
4562236b
HW
4279{
4280 int ret;
4281 int s3_state;
ef1de361 4282 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4283 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4284
4285 ret = kstrtoint(buf, 0, &s3_state);
4286
4287 if (ret == 0) {
4288 if (s3_state) {
4289 dm_resume(adev);
4a580877 4290 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4291 } else
4292 dm_suspend(adev);
4293 }
4294
4295 return ret == 0 ? count : 0;
4296}
4297
4298DEVICE_ATTR_WO(s3_debug);
4299
4300#endif
4301
4302static int dm_early_init(void *handle)
4303{
4304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4305
4562236b 4306 switch (adev->asic_type) {
55e56389
MR
4307#if defined(CONFIG_DRM_AMD_DC_SI)
4308 case CHIP_TAHITI:
4309 case CHIP_PITCAIRN:
4310 case CHIP_VERDE:
4311 adev->mode_info.num_crtc = 6;
4312 adev->mode_info.num_hpd = 6;
4313 adev->mode_info.num_dig = 6;
4314 break;
4315 case CHIP_OLAND:
4316 adev->mode_info.num_crtc = 2;
4317 adev->mode_info.num_hpd = 2;
4318 adev->mode_info.num_dig = 2;
4319 break;
4320#endif
4562236b
HW
4321 case CHIP_BONAIRE:
4322 case CHIP_HAWAII:
4323 adev->mode_info.num_crtc = 6;
4324 adev->mode_info.num_hpd = 6;
4325 adev->mode_info.num_dig = 6;
4562236b 4326 break;
cd4b356f
AD
4327 case CHIP_KAVERI:
4328 adev->mode_info.num_crtc = 4;
4329 adev->mode_info.num_hpd = 6;
4330 adev->mode_info.num_dig = 7;
cd4b356f
AD
4331 break;
4332 case CHIP_KABINI:
4333 case CHIP_MULLINS:
4334 adev->mode_info.num_crtc = 2;
4335 adev->mode_info.num_hpd = 6;
4336 adev->mode_info.num_dig = 6;
cd4b356f 4337 break;
4562236b
HW
4338 case CHIP_FIJI:
4339 case CHIP_TONGA:
4340 adev->mode_info.num_crtc = 6;
4341 adev->mode_info.num_hpd = 6;
4342 adev->mode_info.num_dig = 7;
4562236b
HW
4343 break;
4344 case CHIP_CARRIZO:
4345 adev->mode_info.num_crtc = 3;
4346 adev->mode_info.num_hpd = 6;
4347 adev->mode_info.num_dig = 9;
4562236b
HW
4348 break;
4349 case CHIP_STONEY:
4350 adev->mode_info.num_crtc = 2;
4351 adev->mode_info.num_hpd = 6;
4352 adev->mode_info.num_dig = 9;
4562236b
HW
4353 break;
4354 case CHIP_POLARIS11:
b264d345 4355 case CHIP_POLARIS12:
4562236b
HW
4356 adev->mode_info.num_crtc = 5;
4357 adev->mode_info.num_hpd = 5;
4358 adev->mode_info.num_dig = 5;
4562236b
HW
4359 break;
4360 case CHIP_POLARIS10:
7737de91 4361 case CHIP_VEGAM:
4562236b
HW
4362 adev->mode_info.num_crtc = 6;
4363 adev->mode_info.num_hpd = 6;
4364 adev->mode_info.num_dig = 6;
4562236b 4365 break;
2c8ad2d5 4366 case CHIP_VEGA10:
2325ff30 4367 case CHIP_VEGA12:
1fe6bf2f 4368 case CHIP_VEGA20:
2c8ad2d5
AD
4369 adev->mode_info.num_crtc = 6;
4370 adev->mode_info.num_hpd = 6;
4371 adev->mode_info.num_dig = 6;
4372 break;
4562236b 4373 default:
c08182f2 4374#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4375 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4376 case IP_VERSION(2, 0, 2):
4377 case IP_VERSION(3, 0, 0):
4378 adev->mode_info.num_crtc = 6;
4379 adev->mode_info.num_hpd = 6;
4380 adev->mode_info.num_dig = 6;
4381 break;
4382 case IP_VERSION(2, 0, 0):
4383 case IP_VERSION(3, 0, 2):
4384 adev->mode_info.num_crtc = 5;
4385 adev->mode_info.num_hpd = 5;
4386 adev->mode_info.num_dig = 5;
4387 break;
4388 case IP_VERSION(2, 0, 3):
4389 case IP_VERSION(3, 0, 3):
4390 adev->mode_info.num_crtc = 2;
4391 adev->mode_info.num_hpd = 2;
4392 adev->mode_info.num_dig = 2;
4393 break;
559f591d
AD
4394 case IP_VERSION(1, 0, 0):
4395 case IP_VERSION(1, 0, 1):
c08182f2
AD
4396 case IP_VERSION(3, 0, 1):
4397 case IP_VERSION(2, 1, 0):
4398 case IP_VERSION(3, 1, 2):
4399 case IP_VERSION(3, 1, 3):
4400 adev->mode_info.num_crtc = 4;
4401 adev->mode_info.num_hpd = 4;
4402 adev->mode_info.num_dig = 4;
4403 break;
4404 default:
2cbc6f42 4405 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4406 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4407 return -EINVAL;
c08182f2
AD
4408 }
4409#endif
2cbc6f42 4410 break;
4562236b
HW
4411 }
4412
c8dd5715
MD
4413 amdgpu_dm_set_irq_funcs(adev);
4414
39cc5be2
AD
4415 if (adev->mode_info.funcs == NULL)
4416 adev->mode_info.funcs = &dm_display_funcs;
4417
1f6010a9
DF
4418 /*
4419 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4420 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4421 * amdgpu_device_init()
4422 */
4562236b
HW
4423#if defined(CONFIG_DEBUG_KERNEL_DC)
4424 device_create_file(
4a580877 4425 adev_to_drm(adev)->dev,
4562236b
HW
4426 &dev_attr_s3_debug);
4427#endif
4428
4429 return 0;
4430}
4431
9b690ef3 4432static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4433 struct dc_stream_state *new_stream,
4434 struct dc_stream_state *old_stream)
9b690ef3 4435{
2afda735 4436 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4437}
4438
4439static bool modereset_required(struct drm_crtc_state *crtc_state)
4440{
2afda735 4441 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4442}
4443
7578ecda 4444static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4445{
4446 drm_encoder_cleanup(encoder);
4447 kfree(encoder);
4448}
4449
4450static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4451 .destroy = amdgpu_dm_encoder_destroy,
4452};
4453
e7b07cee 4454
6300b3bd
MK
4455static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4456 struct drm_framebuffer *fb,
4457 int *min_downscale, int *max_upscale)
4458{
4459 struct amdgpu_device *adev = drm_to_adev(dev);
4460 struct dc *dc = adev->dm.dc;
4461 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4462 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4463
4464 switch (fb->format->format) {
4465 case DRM_FORMAT_P010:
4466 case DRM_FORMAT_NV12:
4467 case DRM_FORMAT_NV21:
4468 *max_upscale = plane_cap->max_upscale_factor.nv12;
4469 *min_downscale = plane_cap->max_downscale_factor.nv12;
4470 break;
4471
4472 case DRM_FORMAT_XRGB16161616F:
4473 case DRM_FORMAT_ARGB16161616F:
4474 case DRM_FORMAT_XBGR16161616F:
4475 case DRM_FORMAT_ABGR16161616F:
4476 *max_upscale = plane_cap->max_upscale_factor.fp16;
4477 *min_downscale = plane_cap->max_downscale_factor.fp16;
4478 break;
4479
4480 default:
4481 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4482 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4483 break;
4484 }
4485
4486 /*
4487 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4488 * scaling factor of 1.0 == 1000 units.
4489 */
4490 if (*max_upscale == 1)
4491 *max_upscale = 1000;
4492
4493 if (*min_downscale == 1)
4494 *min_downscale = 1000;
4495}
4496
4497
695af5f9
NK
4498static int fill_dc_scaling_info(const struct drm_plane_state *state,
4499 struct dc_scaling_info *scaling_info)
e7b07cee 4500{
6300b3bd 4501 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4502
695af5f9 4503 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4504
695af5f9
NK
4505 /* Source is fixed 16.16 but we ignore mantissa for now... */
4506 scaling_info->src_rect.x = state->src_x >> 16;
4507 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4508
d89f6048
HW
4509 /*
4510 * For reasons we don't (yet) fully understand a non-zero
4511 * src_y coordinate into an NV12 buffer can cause a
4512 * system hang. To avoid hangs (and maybe be overly cautious)
4513 * let's reject both non-zero src_x and src_y.
4514 *
4515 * We currently know of only one use-case to reproduce a
4516 * scenario with non-zero src_x and src_y for NV12, which
4517 * is to gesture the YouTube Android app into full screen
4518 * on ChromeOS.
4519 */
4520 if (state->fb &&
4521 state->fb->format->format == DRM_FORMAT_NV12 &&
4522 (scaling_info->src_rect.x != 0 ||
4523 scaling_info->src_rect.y != 0))
4524 return -EINVAL;
4525
695af5f9
NK
4526 scaling_info->src_rect.width = state->src_w >> 16;
4527 if (scaling_info->src_rect.width == 0)
4528 return -EINVAL;
4529
4530 scaling_info->src_rect.height = state->src_h >> 16;
4531 if (scaling_info->src_rect.height == 0)
4532 return -EINVAL;
4533
4534 scaling_info->dst_rect.x = state->crtc_x;
4535 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4536
4537 if (state->crtc_w == 0)
695af5f9 4538 return -EINVAL;
e7b07cee 4539
695af5f9 4540 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4541
4542 if (state->crtc_h == 0)
695af5f9 4543 return -EINVAL;
e7b07cee 4544
695af5f9 4545 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4546
695af5f9
NK
4547 /* DRM doesn't specify clipping on destination output. */
4548 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4549
6300b3bd
MK
4550 /* Validate scaling per-format with DC plane caps */
4551 if (state->plane && state->plane->dev && state->fb) {
4552 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4553 &min_downscale, &max_upscale);
4554 } else {
4555 min_downscale = 250;
4556 max_upscale = 16000;
4557 }
4558
6491f0c0
NK
4559 scale_w = scaling_info->dst_rect.width * 1000 /
4560 scaling_info->src_rect.width;
e7b07cee 4561
6300b3bd 4562 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4563 return -EINVAL;
4564
4565 scale_h = scaling_info->dst_rect.height * 1000 /
4566 scaling_info->src_rect.height;
4567
6300b3bd 4568 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4569 return -EINVAL;
4570
695af5f9
NK
4571 /*
4572 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4573 * assume reasonable defaults based on the format.
4574 */
e7b07cee 4575
695af5f9 4576 return 0;
4562236b 4577}
695af5f9 4578
a3241991
BN
4579static void
4580fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4581 uint64_t tiling_flags)
e7b07cee 4582{
a3241991
BN
4583 /* Fill GFX8 params */
4584 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4585 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4586
a3241991
BN
4587 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4588 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4589 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4590 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4591 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4592
a3241991
BN
4593 /* XXX fix me for VI */
4594 tiling_info->gfx8.num_banks = num_banks;
4595 tiling_info->gfx8.array_mode =
4596 DC_ARRAY_2D_TILED_THIN1;
4597 tiling_info->gfx8.tile_split = tile_split;
4598 tiling_info->gfx8.bank_width = bankw;
4599 tiling_info->gfx8.bank_height = bankh;
4600 tiling_info->gfx8.tile_aspect = mtaspect;
4601 tiling_info->gfx8.tile_mode =
4602 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4603 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4604 == DC_ARRAY_1D_TILED_THIN1) {
4605 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4606 }
4607
a3241991
BN
4608 tiling_info->gfx8.pipe_config =
4609 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4610}
4611
a3241991
BN
4612static void
4613fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4614 union dc_tiling_info *tiling_info)
4615{
4616 tiling_info->gfx9.num_pipes =
4617 adev->gfx.config.gb_addr_config_fields.num_pipes;
4618 tiling_info->gfx9.num_banks =
4619 adev->gfx.config.gb_addr_config_fields.num_banks;
4620 tiling_info->gfx9.pipe_interleave =
4621 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4622 tiling_info->gfx9.num_shader_engines =
4623 adev->gfx.config.gb_addr_config_fields.num_se;
4624 tiling_info->gfx9.max_compressed_frags =
4625 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4626 tiling_info->gfx9.num_rb_per_se =
4627 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4628 tiling_info->gfx9.shaderEnable = 1;
1d789535 4629 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4630 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4631}
4632
695af5f9 4633static int
a3241991
BN
4634validate_dcc(struct amdgpu_device *adev,
4635 const enum surface_pixel_format format,
4636 const enum dc_rotation_angle rotation,
4637 const union dc_tiling_info *tiling_info,
4638 const struct dc_plane_dcc_param *dcc,
4639 const struct dc_plane_address *address,
4640 const struct plane_size *plane_size)
7df7e505
NK
4641{
4642 struct dc *dc = adev->dm.dc;
8daa1218
NC
4643 struct dc_dcc_surface_param input;
4644 struct dc_surface_dcc_cap output;
7df7e505 4645
8daa1218
NC
4646 memset(&input, 0, sizeof(input));
4647 memset(&output, 0, sizeof(output));
4648
a3241991 4649 if (!dcc->enable)
87b7ebc2
RS
4650 return 0;
4651
a3241991
BN
4652 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4653 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4654 return -EINVAL;
7df7e505 4655
695af5f9 4656 input.format = format;
12e2b2d4
DL
4657 input.surface_size.width = plane_size->surface_size.width;
4658 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4659 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4660
695af5f9 4661 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4662 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4663 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4664 input.scan = SCAN_DIRECTION_VERTICAL;
4665
4666 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4667 return -EINVAL;
7df7e505
NK
4668
4669 if (!output.capable)
09e5665a 4670 return -EINVAL;
7df7e505 4671
a3241991
BN
4672 if (dcc->independent_64b_blks == 0 &&
4673 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4674 return -EINVAL;
7df7e505 4675
a3241991
BN
4676 return 0;
4677}
4678
37384b3f
BN
4679static bool
4680modifier_has_dcc(uint64_t modifier)
4681{
4682 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4683}
4684
4685static unsigned
4686modifier_gfx9_swizzle_mode(uint64_t modifier)
4687{
4688 if (modifier == DRM_FORMAT_MOD_LINEAR)
4689 return 0;
4690
4691 return AMD_FMT_MOD_GET(TILE, modifier);
4692}
4693
dfbbfe3c
BN
4694static const struct drm_format_info *
4695amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4696{
816853f9 4697 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4698}
4699
37384b3f
BN
4700static void
4701fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4702 union dc_tiling_info *tiling_info,
4703 uint64_t modifier)
4704{
4705 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4706 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4707 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4708 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4709
4710 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4711
4712 if (!IS_AMD_FMT_MOD(modifier))
4713 return;
4714
4715 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4716 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4717
4718 if (adev->family >= AMDGPU_FAMILY_NV) {
4719 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4720 } else {
4721 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4722
4723 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4724 }
4725}
4726
faa37f54
BN
4727enum dm_micro_swizzle {
4728 MICRO_SWIZZLE_Z = 0,
4729 MICRO_SWIZZLE_S = 1,
4730 MICRO_SWIZZLE_D = 2,
4731 MICRO_SWIZZLE_R = 3
4732};
4733
4734static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4735 uint32_t format,
4736 uint64_t modifier)
4737{
4738 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4739 const struct drm_format_info *info = drm_format_info(format);
fe180178 4740 int i;
faa37f54
BN
4741
4742 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4743
4744 if (!info)
4745 return false;
4746
4747 /*
fe180178
QZ
4748 * We always have to allow these modifiers:
4749 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4750 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4751 */
fe180178
QZ
4752 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4753 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4754 return true;
fe180178 4755 }
faa37f54 4756
fe180178
QZ
4757 /* Check that the modifier is on the list of the plane's supported modifiers. */
4758 for (i = 0; i < plane->modifier_count; i++) {
4759 if (modifier == plane->modifiers[i])
4760 break;
4761 }
4762 if (i == plane->modifier_count)
faa37f54
BN
4763 return false;
4764
4765 /*
4766 * For D swizzle the canonical modifier depends on the bpp, so check
4767 * it here.
4768 */
4769 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4770 adev->family >= AMDGPU_FAMILY_NV) {
4771 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4772 return false;
4773 }
4774
4775 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4776 info->cpp[0] < 8)
4777 return false;
4778
4779 if (modifier_has_dcc(modifier)) {
4780 /* Per radeonsi comments 16/64 bpp are more complicated. */
4781 if (info->cpp[0] != 4)
4782 return false;
951796f2
SS
4783 /* We support multi-planar formats, but not when combined with
4784 * additional DCC metadata planes. */
4785 if (info->num_planes > 1)
4786 return false;
faa37f54
BN
4787 }
4788
4789 return true;
4790}
4791
4792static void
4793add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4794{
4795 if (!*mods)
4796 return;
4797
4798 if (*cap - *size < 1) {
4799 uint64_t new_cap = *cap * 2;
4800 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4801
4802 if (!new_mods) {
4803 kfree(*mods);
4804 *mods = NULL;
4805 return;
4806 }
4807
4808 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4809 kfree(*mods);
4810 *mods = new_mods;
4811 *cap = new_cap;
4812 }
4813
4814 (*mods)[*size] = mod;
4815 *size += 1;
4816}
4817
4818static void
4819add_gfx9_modifiers(const struct amdgpu_device *adev,
4820 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4821{
4822 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4823 int pipe_xor_bits = min(8, pipes +
4824 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4825 int bank_xor_bits = min(8 - pipe_xor_bits,
4826 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4827 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4828 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4829
4830
4831 if (adev->family == AMDGPU_FAMILY_RV) {
4832 /* Raven2 and later */
4833 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4834
4835 /*
4836 * No _D DCC swizzles yet because we only allow 32bpp, which
4837 * doesn't support _D on DCN
4838 */
4839
4840 if (has_constant_encode) {
4841 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4842 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4843 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4844 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4845 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4846 AMD_FMT_MOD_SET(DCC, 1) |
4847 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4848 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4849 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4850 }
4851
4852 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4853 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4854 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4855 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4856 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4857 AMD_FMT_MOD_SET(DCC, 1) |
4858 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4859 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4860 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4861
4862 if (has_constant_encode) {
4863 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4864 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4865 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4866 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4867 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4868 AMD_FMT_MOD_SET(DCC, 1) |
4869 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4870 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4871 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4872
4873 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4874 AMD_FMT_MOD_SET(RB, rb) |
4875 AMD_FMT_MOD_SET(PIPE, pipes));
4876 }
4877
4878 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4879 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4880 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4881 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4882 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4883 AMD_FMT_MOD_SET(DCC, 1) |
4884 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4885 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4886 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4887 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4888 AMD_FMT_MOD_SET(RB, rb) |
4889 AMD_FMT_MOD_SET(PIPE, pipes));
4890 }
4891
4892 /*
4893 * Only supported for 64bpp on Raven, will be filtered on format in
4894 * dm_plane_format_mod_supported.
4895 */
4896 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4897 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4898 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4899 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4900 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4901
4902 if (adev->family == AMDGPU_FAMILY_RV) {
4903 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4904 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4905 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4906 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4907 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4908 }
4909
4910 /*
4911 * Only supported for 64bpp on Raven, will be filtered on format in
4912 * dm_plane_format_mod_supported.
4913 */
4914 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4915 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4916 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4917
4918 if (adev->family == AMDGPU_FAMILY_RV) {
4919 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4920 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4921 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4922 }
4923}
4924
4925static void
4926add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4927 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4928{
4929 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4930
4931 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4932 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4933 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4934 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4935 AMD_FMT_MOD_SET(DCC, 1) |
4936 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4937 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4938 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4939
4940 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4941 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4942 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4943 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4944 AMD_FMT_MOD_SET(DCC, 1) |
4945 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4946 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4947 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4948 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4949
4950 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4951 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4952 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4953 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4954
4955 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4956 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4957 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4958 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4959
4960
4961 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4962 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4963 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4964 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4965
4966 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4968 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4969}
4970
4971static void
4972add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4973 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4974{
4975 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4976 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4977
4978 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4979 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4980 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4981 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4982 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4983 AMD_FMT_MOD_SET(DCC, 1) |
4984 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4985 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4986 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4987 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 4988
7f6ab50a
JA
4989 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4990 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4991 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4992 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4993 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4994 AMD_FMT_MOD_SET(DCC, 1) |
4995 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4996 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4997 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4998
faa37f54
BN
4999 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5001 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5002 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5004 AMD_FMT_MOD_SET(DCC, 1) |
5005 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5006 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5007 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5009 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5010
7f6ab50a
JA
5011 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5012 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5013 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5014 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5015 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5016 AMD_FMT_MOD_SET(DCC, 1) |
5017 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5018 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5019 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5020 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5021
faa37f54
BN
5022 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5024 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5025 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5026 AMD_FMT_MOD_SET(PACKERS, pkrs));
5027
5028 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5029 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5030 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5031 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5032 AMD_FMT_MOD_SET(PACKERS, pkrs));
5033
5034 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5035 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5036 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5037 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5038
5039 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5041 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5042}
5043
5044static int
5045get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5046{
5047 uint64_t size = 0, capacity = 128;
5048 *mods = NULL;
5049
5050 /* We have not hooked up any pre-GFX9 modifiers. */
5051 if (adev->family < AMDGPU_FAMILY_AI)
5052 return 0;
5053
5054 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5055
5056 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5057 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5058 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5059 return *mods ? 0 : -ENOMEM;
5060 }
5061
5062 switch (adev->family) {
5063 case AMDGPU_FAMILY_AI:
5064 case AMDGPU_FAMILY_RV:
5065 add_gfx9_modifiers(adev, mods, &size, &capacity);
5066 break;
5067 case AMDGPU_FAMILY_NV:
5068 case AMDGPU_FAMILY_VGH:
1ebcaebd 5069 case AMDGPU_FAMILY_YC:
1d789535 5070 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5071 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5072 else
5073 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5074 break;
5075 }
5076
5077 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5078
5079 /* INVALID marks the end of the list. */
5080 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5081
5082 if (!*mods)
5083 return -ENOMEM;
5084
5085 return 0;
5086}
5087
37384b3f
BN
5088static int
5089fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5090 const struct amdgpu_framebuffer *afb,
5091 const enum surface_pixel_format format,
5092 const enum dc_rotation_angle rotation,
5093 const struct plane_size *plane_size,
5094 union dc_tiling_info *tiling_info,
5095 struct dc_plane_dcc_param *dcc,
5096 struct dc_plane_address *address,
5097 const bool force_disable_dcc)
5098{
5099 const uint64_t modifier = afb->base.modifier;
2be7f77f 5100 int ret = 0;
37384b3f
BN
5101
5102 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5103 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5104
5105 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5106 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5107 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5108 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5109
5110 dcc->enable = 1;
5111 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5112 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5113 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5114 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5115 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5116 else if (independent_128b_blks)
5117 dcc->dcc_ind_blk = hubp_ind_block_128b;
5118 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5119 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5120 else
5121 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5122 } else {
5123 if (independent_64b_blks)
5124 dcc->dcc_ind_blk = hubp_ind_block_64b;
5125 else
5126 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5127 }
37384b3f
BN
5128
5129 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5130 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5131 }
5132
5133 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5134 if (ret)
2be7f77f 5135 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5136
2be7f77f 5137 return ret;
09e5665a
NK
5138}
5139
5140static int
320932bf 5141fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5142 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5143 const enum surface_pixel_format format,
5144 const enum dc_rotation_angle rotation,
5145 const uint64_t tiling_flags,
09e5665a 5146 union dc_tiling_info *tiling_info,
12e2b2d4 5147 struct plane_size *plane_size,
09e5665a 5148 struct dc_plane_dcc_param *dcc,
87b7ebc2 5149 struct dc_plane_address *address,
5888f07a 5150 bool tmz_surface,
87b7ebc2 5151 bool force_disable_dcc)
09e5665a 5152{
320932bf 5153 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5154 int ret;
5155
5156 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5157 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5158 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5159 memset(address, 0, sizeof(*address));
5160
5888f07a
HW
5161 address->tmz_surface = tmz_surface;
5162
695af5f9 5163 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5164 uint64_t addr = afb->address + fb->offsets[0];
5165
12e2b2d4
DL
5166 plane_size->surface_size.x = 0;
5167 plane_size->surface_size.y = 0;
5168 plane_size->surface_size.width = fb->width;
5169 plane_size->surface_size.height = fb->height;
5170 plane_size->surface_pitch =
320932bf
NK
5171 fb->pitches[0] / fb->format->cpp[0];
5172
e0634e8d 5173 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5174 address->grph.addr.low_part = lower_32_bits(addr);
5175 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5176 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5177 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5178 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5179
12e2b2d4
DL
5180 plane_size->surface_size.x = 0;
5181 plane_size->surface_size.y = 0;
5182 plane_size->surface_size.width = fb->width;
5183 plane_size->surface_size.height = fb->height;
5184 plane_size->surface_pitch =
320932bf
NK
5185 fb->pitches[0] / fb->format->cpp[0];
5186
12e2b2d4
DL
5187 plane_size->chroma_size.x = 0;
5188 plane_size->chroma_size.y = 0;
320932bf 5189 /* TODO: set these based on surface format */
12e2b2d4
DL
5190 plane_size->chroma_size.width = fb->width / 2;
5191 plane_size->chroma_size.height = fb->height / 2;
320932bf 5192
12e2b2d4 5193 plane_size->chroma_pitch =
320932bf
NK
5194 fb->pitches[1] / fb->format->cpp[1];
5195
e0634e8d
NK
5196 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5197 address->video_progressive.luma_addr.low_part =
be7b9b32 5198 lower_32_bits(luma_addr);
e0634e8d 5199 address->video_progressive.luma_addr.high_part =
be7b9b32 5200 upper_32_bits(luma_addr);
e0634e8d
NK
5201 address->video_progressive.chroma_addr.low_part =
5202 lower_32_bits(chroma_addr);
5203 address->video_progressive.chroma_addr.high_part =
5204 upper_32_bits(chroma_addr);
5205 }
09e5665a 5206
a3241991 5207 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5208 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5209 rotation, plane_size,
5210 tiling_info, dcc,
5211 address,
5212 force_disable_dcc);
09e5665a
NK
5213 if (ret)
5214 return ret;
a3241991
BN
5215 } else {
5216 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5217 }
5218
5219 return 0;
7df7e505
NK
5220}
5221
d74004b6 5222static void
695af5f9 5223fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5224 bool *per_pixel_alpha, bool *global_alpha,
5225 int *global_alpha_value)
5226{
5227 *per_pixel_alpha = false;
5228 *global_alpha = false;
5229 *global_alpha_value = 0xff;
5230
5231 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5232 return;
5233
5234 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5235 static const uint32_t alpha_formats[] = {
5236 DRM_FORMAT_ARGB8888,
5237 DRM_FORMAT_RGBA8888,
5238 DRM_FORMAT_ABGR8888,
5239 };
5240 uint32_t format = plane_state->fb->format->format;
5241 unsigned int i;
5242
5243 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5244 if (format == alpha_formats[i]) {
5245 *per_pixel_alpha = true;
5246 break;
5247 }
5248 }
5249 }
5250
5251 if (plane_state->alpha < 0xffff) {
5252 *global_alpha = true;
5253 *global_alpha_value = plane_state->alpha >> 8;
5254 }
5255}
5256
004fefa3
NK
5257static int
5258fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5259 const enum surface_pixel_format format,
004fefa3
NK
5260 enum dc_color_space *color_space)
5261{
5262 bool full_range;
5263
5264 *color_space = COLOR_SPACE_SRGB;
5265
5266 /* DRM color properties only affect non-RGB formats. */
695af5f9 5267 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5268 return 0;
5269
5270 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5271
5272 switch (plane_state->color_encoding) {
5273 case DRM_COLOR_YCBCR_BT601:
5274 if (full_range)
5275 *color_space = COLOR_SPACE_YCBCR601;
5276 else
5277 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5278 break;
5279
5280 case DRM_COLOR_YCBCR_BT709:
5281 if (full_range)
5282 *color_space = COLOR_SPACE_YCBCR709;
5283 else
5284 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5285 break;
5286
5287 case DRM_COLOR_YCBCR_BT2020:
5288 if (full_range)
5289 *color_space = COLOR_SPACE_2020_YCBCR;
5290 else
5291 return -EINVAL;
5292 break;
5293
5294 default:
5295 return -EINVAL;
5296 }
5297
5298 return 0;
5299}
5300
695af5f9
NK
5301static int
5302fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5303 const struct drm_plane_state *plane_state,
5304 const uint64_t tiling_flags,
5305 struct dc_plane_info *plane_info,
87b7ebc2 5306 struct dc_plane_address *address,
5888f07a 5307 bool tmz_surface,
87b7ebc2 5308 bool force_disable_dcc)
695af5f9
NK
5309{
5310 const struct drm_framebuffer *fb = plane_state->fb;
5311 const struct amdgpu_framebuffer *afb =
5312 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5313 int ret;
5314
5315 memset(plane_info, 0, sizeof(*plane_info));
5316
5317 switch (fb->format->format) {
5318 case DRM_FORMAT_C8:
5319 plane_info->format =
5320 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5321 break;
5322 case DRM_FORMAT_RGB565:
5323 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5324 break;
5325 case DRM_FORMAT_XRGB8888:
5326 case DRM_FORMAT_ARGB8888:
5327 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5328 break;
5329 case DRM_FORMAT_XRGB2101010:
5330 case DRM_FORMAT_ARGB2101010:
5331 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5332 break;
5333 case DRM_FORMAT_XBGR2101010:
5334 case DRM_FORMAT_ABGR2101010:
5335 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5336 break;
5337 case DRM_FORMAT_XBGR8888:
5338 case DRM_FORMAT_ABGR8888:
5339 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5340 break;
5341 case DRM_FORMAT_NV21:
5342 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5343 break;
5344 case DRM_FORMAT_NV12:
5345 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5346 break;
cbec6477
SW
5347 case DRM_FORMAT_P010:
5348 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5349 break;
492548dc
SW
5350 case DRM_FORMAT_XRGB16161616F:
5351 case DRM_FORMAT_ARGB16161616F:
5352 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5353 break;
2a5195dc
MK
5354 case DRM_FORMAT_XBGR16161616F:
5355 case DRM_FORMAT_ABGR16161616F:
5356 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5357 break;
58020403
MK
5358 case DRM_FORMAT_XRGB16161616:
5359 case DRM_FORMAT_ARGB16161616:
5360 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5361 break;
5362 case DRM_FORMAT_XBGR16161616:
5363 case DRM_FORMAT_ABGR16161616:
5364 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5365 break;
695af5f9
NK
5366 default:
5367 DRM_ERROR(
92f1d09c
SA
5368 "Unsupported screen format %p4cc\n",
5369 &fb->format->format);
695af5f9
NK
5370 return -EINVAL;
5371 }
5372
5373 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5374 case DRM_MODE_ROTATE_0:
5375 plane_info->rotation = ROTATION_ANGLE_0;
5376 break;
5377 case DRM_MODE_ROTATE_90:
5378 plane_info->rotation = ROTATION_ANGLE_90;
5379 break;
5380 case DRM_MODE_ROTATE_180:
5381 plane_info->rotation = ROTATION_ANGLE_180;
5382 break;
5383 case DRM_MODE_ROTATE_270:
5384 plane_info->rotation = ROTATION_ANGLE_270;
5385 break;
5386 default:
5387 plane_info->rotation = ROTATION_ANGLE_0;
5388 break;
5389 }
5390
5391 plane_info->visible = true;
5392 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5393
6d83a32d
MS
5394 plane_info->layer_index = 0;
5395
695af5f9
NK
5396 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5397 &plane_info->color_space);
5398 if (ret)
5399 return ret;
5400
5401 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5402 plane_info->rotation, tiling_flags,
5403 &plane_info->tiling_info,
5404 &plane_info->plane_size,
5888f07a 5405 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5406 force_disable_dcc);
695af5f9
NK
5407 if (ret)
5408 return ret;
5409
5410 fill_blending_from_plane_state(
5411 plane_state, &plane_info->per_pixel_alpha,
5412 &plane_info->global_alpha, &plane_info->global_alpha_value);
5413
5414 return 0;
5415}
5416
5417static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5418 struct dc_plane_state *dc_plane_state,
5419 struct drm_plane_state *plane_state,
5420 struct drm_crtc_state *crtc_state)
e7b07cee 5421{
cf020d49 5422 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5423 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5424 struct dc_scaling_info scaling_info;
5425 struct dc_plane_info plane_info;
695af5f9 5426 int ret;
87b7ebc2 5427 bool force_disable_dcc = false;
e7b07cee 5428
695af5f9
NK
5429 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5430 if (ret)
5431 return ret;
e7b07cee 5432
695af5f9
NK
5433 dc_plane_state->src_rect = scaling_info.src_rect;
5434 dc_plane_state->dst_rect = scaling_info.dst_rect;
5435 dc_plane_state->clip_rect = scaling_info.clip_rect;
5436 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5437
87b7ebc2 5438 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5439 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5440 afb->tiling_flags,
695af5f9 5441 &plane_info,
87b7ebc2 5442 &dc_plane_state->address,
6eed95b0 5443 afb->tmz_surface,
87b7ebc2 5444 force_disable_dcc);
004fefa3
NK
5445 if (ret)
5446 return ret;
5447
695af5f9
NK
5448 dc_plane_state->format = plane_info.format;
5449 dc_plane_state->color_space = plane_info.color_space;
5450 dc_plane_state->format = plane_info.format;
5451 dc_plane_state->plane_size = plane_info.plane_size;
5452 dc_plane_state->rotation = plane_info.rotation;
5453 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5454 dc_plane_state->stereo_format = plane_info.stereo_format;
5455 dc_plane_state->tiling_info = plane_info.tiling_info;
5456 dc_plane_state->visible = plane_info.visible;
5457 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5458 dc_plane_state->global_alpha = plane_info.global_alpha;
5459 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5460 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5461 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5462 dc_plane_state->flip_int_enabled = true;
695af5f9 5463
e277adc5
LSL
5464 /*
5465 * Always set input transfer function, since plane state is refreshed
5466 * every time.
5467 */
cf020d49
NK
5468 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5469 if (ret)
5470 return ret;
e7b07cee 5471
cf020d49 5472 return 0;
e7b07cee
HW
5473}
5474
3ee6b26b
AD
5475static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5476 const struct dm_connector_state *dm_state,
5477 struct dc_stream_state *stream)
e7b07cee
HW
5478{
5479 enum amdgpu_rmx_type rmx_type;
5480
5481 struct rect src = { 0 }; /* viewport in composition space*/
5482 struct rect dst = { 0 }; /* stream addressable area */
5483
5484 /* no mode. nothing to be done */
5485 if (!mode)
5486 return;
5487
5488 /* Full screen scaling by default */
5489 src.width = mode->hdisplay;
5490 src.height = mode->vdisplay;
5491 dst.width = stream->timing.h_addressable;
5492 dst.height = stream->timing.v_addressable;
5493
f4791779
HW
5494 if (dm_state) {
5495 rmx_type = dm_state->scaling;
5496 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5497 if (src.width * dst.height <
5498 src.height * dst.width) {
5499 /* height needs less upscaling/more downscaling */
5500 dst.width = src.width *
5501 dst.height / src.height;
5502 } else {
5503 /* width needs less upscaling/more downscaling */
5504 dst.height = src.height *
5505 dst.width / src.width;
5506 }
5507 } else if (rmx_type == RMX_CENTER) {
5508 dst = src;
e7b07cee 5509 }
e7b07cee 5510
f4791779
HW
5511 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5512 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5513
f4791779
HW
5514 if (dm_state->underscan_enable) {
5515 dst.x += dm_state->underscan_hborder / 2;
5516 dst.y += dm_state->underscan_vborder / 2;
5517 dst.width -= dm_state->underscan_hborder;
5518 dst.height -= dm_state->underscan_vborder;
5519 }
e7b07cee
HW
5520 }
5521
5522 stream->src = src;
5523 stream->dst = dst;
5524
4711c033
LT
5525 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5526 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5527
5528}
5529
3ee6b26b 5530static enum dc_color_depth
42ba01fc 5531convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5532 bool is_y420, int requested_bpc)
e7b07cee 5533{
1bc22f20 5534 uint8_t bpc;
01c22997 5535
1bc22f20
SW
5536 if (is_y420) {
5537 bpc = 8;
5538
5539 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5540 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5541 bpc = 16;
5542 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5543 bpc = 12;
5544 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5545 bpc = 10;
5546 } else {
5547 bpc = (uint8_t)connector->display_info.bpc;
5548 /* Assume 8 bpc by default if no bpc is specified. */
5549 bpc = bpc ? bpc : 8;
5550 }
e7b07cee 5551
cbd14ae7 5552 if (requested_bpc > 0) {
01c22997
NK
5553 /*
5554 * Cap display bpc based on the user requested value.
5555 *
5556 * The value for state->max_bpc may not correctly updated
5557 * depending on when the connector gets added to the state
5558 * or if this was called outside of atomic check, so it
5559 * can't be used directly.
5560 */
cbd14ae7 5561 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5562
1825fd34
NK
5563 /* Round down to the nearest even number. */
5564 bpc = bpc - (bpc & 1);
5565 }
07e3a1cf 5566
e7b07cee
HW
5567 switch (bpc) {
5568 case 0:
1f6010a9
DF
5569 /*
5570 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5571 * EDID revision before 1.4
5572 * TODO: Fix edid parsing
5573 */
5574 return COLOR_DEPTH_888;
5575 case 6:
5576 return COLOR_DEPTH_666;
5577 case 8:
5578 return COLOR_DEPTH_888;
5579 case 10:
5580 return COLOR_DEPTH_101010;
5581 case 12:
5582 return COLOR_DEPTH_121212;
5583 case 14:
5584 return COLOR_DEPTH_141414;
5585 case 16:
5586 return COLOR_DEPTH_161616;
5587 default:
5588 return COLOR_DEPTH_UNDEFINED;
5589 }
5590}
5591
3ee6b26b
AD
5592static enum dc_aspect_ratio
5593get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5594{
e11d4147
LSL
5595 /* 1-1 mapping, since both enums follow the HDMI spec. */
5596 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5597}
5598
3ee6b26b
AD
5599static enum dc_color_space
5600get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5601{
5602 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5603
5604 switch (dc_crtc_timing->pixel_encoding) {
5605 case PIXEL_ENCODING_YCBCR422:
5606 case PIXEL_ENCODING_YCBCR444:
5607 case PIXEL_ENCODING_YCBCR420:
5608 {
5609 /*
5610 * 27030khz is the separation point between HDTV and SDTV
5611 * according to HDMI spec, we use YCbCr709 and YCbCr601
5612 * respectively
5613 */
380604e2 5614 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5615 if (dc_crtc_timing->flags.Y_ONLY)
5616 color_space =
5617 COLOR_SPACE_YCBCR709_LIMITED;
5618 else
5619 color_space = COLOR_SPACE_YCBCR709;
5620 } else {
5621 if (dc_crtc_timing->flags.Y_ONLY)
5622 color_space =
5623 COLOR_SPACE_YCBCR601_LIMITED;
5624 else
5625 color_space = COLOR_SPACE_YCBCR601;
5626 }
5627
5628 }
5629 break;
5630 case PIXEL_ENCODING_RGB:
5631 color_space = COLOR_SPACE_SRGB;
5632 break;
5633
5634 default:
5635 WARN_ON(1);
5636 break;
5637 }
5638
5639 return color_space;
5640}
5641
ea117312
TA
5642static bool adjust_colour_depth_from_display_info(
5643 struct dc_crtc_timing *timing_out,
5644 const struct drm_display_info *info)
400443e8 5645{
ea117312 5646 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5647 int normalized_clk;
400443e8 5648 do {
380604e2 5649 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5650 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5651 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5652 normalized_clk /= 2;
5653 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5654 switch (depth) {
5655 case COLOR_DEPTH_888:
5656 break;
400443e8
ML
5657 case COLOR_DEPTH_101010:
5658 normalized_clk = (normalized_clk * 30) / 24;
5659 break;
5660 case COLOR_DEPTH_121212:
5661 normalized_clk = (normalized_clk * 36) / 24;
5662 break;
5663 case COLOR_DEPTH_161616:
5664 normalized_clk = (normalized_clk * 48) / 24;
5665 break;
5666 default:
ea117312
TA
5667 /* The above depths are the only ones valid for HDMI. */
5668 return false;
400443e8 5669 }
ea117312
TA
5670 if (normalized_clk <= info->max_tmds_clock) {
5671 timing_out->display_color_depth = depth;
5672 return true;
5673 }
5674 } while (--depth > COLOR_DEPTH_666);
5675 return false;
400443e8 5676}
e7b07cee 5677
42ba01fc
NK
5678static void fill_stream_properties_from_drm_display_mode(
5679 struct dc_stream_state *stream,
5680 const struct drm_display_mode *mode_in,
5681 const struct drm_connector *connector,
5682 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5683 const struct dc_stream_state *old_stream,
5684 int requested_bpc)
e7b07cee
HW
5685{
5686 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5687 const struct drm_display_info *info = &connector->display_info;
d4252eee 5688 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5689 struct hdmi_vendor_infoframe hv_frame;
5690 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5691
acf83f86
WL
5692 memset(&hv_frame, 0, sizeof(hv_frame));
5693 memset(&avi_frame, 0, sizeof(avi_frame));
5694
e7b07cee
HW
5695 timing_out->h_border_left = 0;
5696 timing_out->h_border_right = 0;
5697 timing_out->v_border_top = 0;
5698 timing_out->v_border_bottom = 0;
5699 /* TODO: un-hardcode */
fe61a2f1 5700 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5701 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5702 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5703 else if (drm_mode_is_420_also(info, mode_in)
5704 && aconnector->force_yuv420_output)
5705 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5706 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5707 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5708 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5709 else
5710 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5711
5712 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5713 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5714 connector,
5715 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5716 requested_bpc);
e7b07cee
HW
5717 timing_out->scan_type = SCANNING_TYPE_NODATA;
5718 timing_out->hdmi_vic = 0;
b333730d
BL
5719
5720 if(old_stream) {
5721 timing_out->vic = old_stream->timing.vic;
5722 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5723 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5724 } else {
5725 timing_out->vic = drm_match_cea_mode(mode_in);
5726 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5727 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5728 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5729 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5730 }
e7b07cee 5731
1cb1d477
WL
5732 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5733 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5734 timing_out->vic = avi_frame.video_code;
5735 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5736 timing_out->hdmi_vic = hv_frame.vic;
5737 }
5738
fe8858bb
NC
5739 if (is_freesync_video_mode(mode_in, aconnector)) {
5740 timing_out->h_addressable = mode_in->hdisplay;
5741 timing_out->h_total = mode_in->htotal;
5742 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5743 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5744 timing_out->v_total = mode_in->vtotal;
5745 timing_out->v_addressable = mode_in->vdisplay;
5746 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5747 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5748 timing_out->pix_clk_100hz = mode_in->clock * 10;
5749 } else {
5750 timing_out->h_addressable = mode_in->crtc_hdisplay;
5751 timing_out->h_total = mode_in->crtc_htotal;
5752 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5753 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5754 timing_out->v_total = mode_in->crtc_vtotal;
5755 timing_out->v_addressable = mode_in->crtc_vdisplay;
5756 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5757 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5758 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5759 }
a85ba005 5760
e7b07cee 5761 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5762
5763 stream->output_color_space = get_output_color_space(timing_out);
5764
e43a432c
AK
5765 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5766 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5767 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5768 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5769 drm_mode_is_420_also(info, mode_in) &&
5770 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5771 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5772 adjust_colour_depth_from_display_info(timing_out, info);
5773 }
5774 }
e7b07cee
HW
5775}
5776
3ee6b26b
AD
5777static void fill_audio_info(struct audio_info *audio_info,
5778 const struct drm_connector *drm_connector,
5779 const struct dc_sink *dc_sink)
e7b07cee
HW
5780{
5781 int i = 0;
5782 int cea_revision = 0;
5783 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5784
5785 audio_info->manufacture_id = edid_caps->manufacturer_id;
5786 audio_info->product_id = edid_caps->product_id;
5787
5788 cea_revision = drm_connector->display_info.cea_rev;
5789
090afc1e 5790 strscpy(audio_info->display_name,
d2b2562c 5791 edid_caps->display_name,
090afc1e 5792 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5793
b830ebc9 5794 if (cea_revision >= 3) {
e7b07cee
HW
5795 audio_info->mode_count = edid_caps->audio_mode_count;
5796
5797 for (i = 0; i < audio_info->mode_count; ++i) {
5798 audio_info->modes[i].format_code =
5799 (enum audio_format_code)
5800 (edid_caps->audio_modes[i].format_code);
5801 audio_info->modes[i].channel_count =
5802 edid_caps->audio_modes[i].channel_count;
5803 audio_info->modes[i].sample_rates.all =
5804 edid_caps->audio_modes[i].sample_rate;
5805 audio_info->modes[i].sample_size =
5806 edid_caps->audio_modes[i].sample_size;
5807 }
5808 }
5809
5810 audio_info->flags.all = edid_caps->speaker_flags;
5811
5812 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5813 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5814 audio_info->video_latency = drm_connector->video_latency[0];
5815 audio_info->audio_latency = drm_connector->audio_latency[0];
5816 }
5817
5818 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5819
5820}
5821
3ee6b26b
AD
5822static void
5823copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5824 struct drm_display_mode *dst_mode)
e7b07cee
HW
5825{
5826 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5827 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5828 dst_mode->crtc_clock = src_mode->crtc_clock;
5829 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5830 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5831 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5832 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5833 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5834 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5835 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5836 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5837 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5838 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5839 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5840}
5841
3ee6b26b
AD
5842static void
5843decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5844 const struct drm_display_mode *native_mode,
5845 bool scale_enabled)
e7b07cee
HW
5846{
5847 if (scale_enabled) {
5848 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5849 } else if (native_mode->clock == drm_mode->clock &&
5850 native_mode->htotal == drm_mode->htotal &&
5851 native_mode->vtotal == drm_mode->vtotal) {
5852 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5853 } else {
5854 /* no scaling nor amdgpu inserted, no need to patch */
5855 }
5856}
5857
aed15309
ML
5858static struct dc_sink *
5859create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5860{
2e0ac3d6 5861 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5862 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5863 sink_init_data.link = aconnector->dc_link;
5864 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5865
5866 sink = dc_sink_create(&sink_init_data);
423788c7 5867 if (!sink) {
2e0ac3d6 5868 DRM_ERROR("Failed to create sink!\n");
aed15309 5869 return NULL;
423788c7 5870 }
2e0ac3d6 5871 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5872
aed15309 5873 return sink;
2e0ac3d6
HW
5874}
5875
fa2123db
ML
5876static void set_multisync_trigger_params(
5877 struct dc_stream_state *stream)
5878{
ec372186
ML
5879 struct dc_stream_state *master = NULL;
5880
fa2123db 5881 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5882 master = stream->triggered_crtc_reset.event_source;
5883 stream->triggered_crtc_reset.event =
5884 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5885 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5886 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5887 }
5888}
5889
5890static void set_master_stream(struct dc_stream_state *stream_set[],
5891 int stream_count)
5892{
5893 int j, highest_rfr = 0, master_stream = 0;
5894
5895 for (j = 0; j < stream_count; j++) {
5896 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5897 int refresh_rate = 0;
5898
380604e2 5899 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5900 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5901 if (refresh_rate > highest_rfr) {
5902 highest_rfr = refresh_rate;
5903 master_stream = j;
5904 }
5905 }
5906 }
5907 for (j = 0; j < stream_count; j++) {
03736f4c 5908 if (stream_set[j])
fa2123db
ML
5909 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5910 }
5911}
5912
5913static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5914{
5915 int i = 0;
ec372186 5916 struct dc_stream_state *stream;
fa2123db
ML
5917
5918 if (context->stream_count < 2)
5919 return;
5920 for (i = 0; i < context->stream_count ; i++) {
5921 if (!context->streams[i])
5922 continue;
1f6010a9
DF
5923 /*
5924 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5925 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5926 * For now it's set to false
fa2123db 5927 */
fa2123db 5928 }
ec372186 5929
fa2123db 5930 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5931
5932 for (i = 0; i < context->stream_count ; i++) {
5933 stream = context->streams[i];
5934
5935 if (!stream)
5936 continue;
5937
5938 set_multisync_trigger_params(stream);
5939 }
fa2123db
ML
5940}
5941
ea2be5c0 5942#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5943static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5944 struct dc_sink *sink, struct dc_stream_state *stream,
5945 struct dsc_dec_dpcd_caps *dsc_caps)
5946{
5947 stream->timing.flags.DSC = 0;
5948
5949 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
5950 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5951 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5952 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5953 dsc_caps);
998b7ad2
FZ
5954 }
5955}
5956
5957static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5958 struct dc_sink *sink, struct dc_stream_state *stream,
5959 struct dsc_dec_dpcd_caps *dsc_caps)
5960{
5961 struct drm_connector *drm_connector = &aconnector->base;
5962 uint32_t link_bandwidth_kbps;
f1c1a982 5963 uint32_t max_dsc_target_bpp_limit_override = 0;
998b7ad2
FZ
5964
5965 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5966 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
5967
5968 if (stream->link && stream->link->local_sink)
5969 max_dsc_target_bpp_limit_override =
5970 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5971
998b7ad2
FZ
5972 /* Set DSC policy according to dsc_clock_en */
5973 dc_dsc_policy_set_enable_dsc_when_not_needed(
5974 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5975
5976 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5977
5978 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5979 dsc_caps,
5980 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 5981 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
5982 link_bandwidth_kbps,
5983 &stream->timing,
5984 &stream->timing.dsc_cfg)) {
5985 stream->timing.flags.DSC = 1;
5986 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5987 }
5988 }
5989
5990 /* Overwrite the stream flag if DSC is enabled through debugfs */
5991 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5992 stream->timing.flags.DSC = 1;
5993
5994 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5995 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5996
5997 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5998 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5999
6000 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6001 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6002}
ea2be5c0 6003#endif
998b7ad2 6004
5fd953a3
RS
6005/**
6006 * DOC: FreeSync Video
6007 *
6008 * When a userspace application wants to play a video, the content follows a
6009 * standard format definition that usually specifies the FPS for that format.
6010 * The below list illustrates some video format and the expected FPS,
6011 * respectively:
6012 *
6013 * - TV/NTSC (23.976 FPS)
6014 * - Cinema (24 FPS)
6015 * - TV/PAL (25 FPS)
6016 * - TV/NTSC (29.97 FPS)
6017 * - TV/NTSC (30 FPS)
6018 * - Cinema HFR (48 FPS)
6019 * - TV/PAL (50 FPS)
6020 * - Commonly used (60 FPS)
6021 * - Multiples of 24 (48,72,96 FPS)
6022 *
6023 * The list of standards video format is not huge and can be added to the
6024 * connector modeset list beforehand. With that, userspace can leverage
6025 * FreeSync to extends the front porch in order to attain the target refresh
6026 * rate. Such a switch will happen seamlessly, without screen blanking or
6027 * reprogramming of the output in any other way. If the userspace requests a
6028 * modesetting change compatible with FreeSync modes that only differ in the
6029 * refresh rate, DC will skip the full update and avoid blink during the
6030 * transition. For example, the video player can change the modesetting from
6031 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6032 * causing any display blink. This same concept can be applied to a mode
6033 * setting change.
6034 */
a85ba005
NC
6035static struct drm_display_mode *
6036get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6037 bool use_probed_modes)
6038{
6039 struct drm_display_mode *m, *m_pref = NULL;
6040 u16 current_refresh, highest_refresh;
6041 struct list_head *list_head = use_probed_modes ?
6042 &aconnector->base.probed_modes :
6043 &aconnector->base.modes;
6044
6045 if (aconnector->freesync_vid_base.clock != 0)
6046 return &aconnector->freesync_vid_base;
6047
6048 /* Find the preferred mode */
6049 list_for_each_entry (m, list_head, head) {
6050 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6051 m_pref = m;
6052 break;
6053 }
6054 }
6055
6056 if (!m_pref) {
6057 /* Probably an EDID with no preferred mode. Fallback to first entry */
6058 m_pref = list_first_entry_or_null(
6059 &aconnector->base.modes, struct drm_display_mode, head);
6060 if (!m_pref) {
6061 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6062 return NULL;
6063 }
6064 }
6065
6066 highest_refresh = drm_mode_vrefresh(m_pref);
6067
6068 /*
6069 * Find the mode with highest refresh rate with same resolution.
6070 * For some monitors, preferred mode is not the mode with highest
6071 * supported refresh rate.
6072 */
6073 list_for_each_entry (m, list_head, head) {
6074 current_refresh = drm_mode_vrefresh(m);
6075
6076 if (m->hdisplay == m_pref->hdisplay &&
6077 m->vdisplay == m_pref->vdisplay &&
6078 highest_refresh < current_refresh) {
6079 highest_refresh = current_refresh;
6080 m_pref = m;
6081 }
6082 }
6083
6084 aconnector->freesync_vid_base = *m_pref;
6085 return m_pref;
6086}
6087
fe8858bb 6088static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6089 struct amdgpu_dm_connector *aconnector)
6090{
6091 struct drm_display_mode *high_mode;
6092 int timing_diff;
6093
6094 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6095 if (!high_mode || !mode)
6096 return false;
6097
6098 timing_diff = high_mode->vtotal - mode->vtotal;
6099
6100 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6101 high_mode->hdisplay != mode->hdisplay ||
6102 high_mode->vdisplay != mode->vdisplay ||
6103 high_mode->hsync_start != mode->hsync_start ||
6104 high_mode->hsync_end != mode->hsync_end ||
6105 high_mode->htotal != mode->htotal ||
6106 high_mode->hskew != mode->hskew ||
6107 high_mode->vscan != mode->vscan ||
6108 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6109 high_mode->vsync_end - mode->vsync_end != timing_diff)
6110 return false;
6111 else
6112 return true;
6113}
6114
3ee6b26b
AD
6115static struct dc_stream_state *
6116create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6117 const struct drm_display_mode *drm_mode,
b333730d 6118 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6119 const struct dc_stream_state *old_stream,
6120 int requested_bpc)
e7b07cee
HW
6121{
6122 struct drm_display_mode *preferred_mode = NULL;
391ef035 6123 struct drm_connector *drm_connector;
42ba01fc
NK
6124 const struct drm_connector_state *con_state =
6125 dm_state ? &dm_state->base : NULL;
0971c40e 6126 struct dc_stream_state *stream = NULL;
e7b07cee 6127 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6128 struct drm_display_mode saved_mode;
6129 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6130 bool native_mode_found = false;
b0781603
NK
6131 bool recalculate_timing = false;
6132 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6133 int mode_refresh;
58124bf8 6134 int preferred_refresh = 0;
defeb878 6135#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6136 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6137#endif
aed15309 6138 struct dc_sink *sink = NULL;
a85ba005
NC
6139
6140 memset(&saved_mode, 0, sizeof(saved_mode));
6141
b830ebc9 6142 if (aconnector == NULL) {
e7b07cee 6143 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6144 return stream;
e7b07cee
HW
6145 }
6146
e7b07cee 6147 drm_connector = &aconnector->base;
2e0ac3d6 6148
f4ac176e 6149 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6150 sink = create_fake_sink(aconnector);
6151 if (!sink)
6152 return stream;
aed15309
ML
6153 } else {
6154 sink = aconnector->dc_sink;
dcd5fb82 6155 dc_sink_retain(sink);
f4ac176e 6156 }
2e0ac3d6 6157
aed15309 6158 stream = dc_create_stream_for_sink(sink);
4562236b 6159
b830ebc9 6160 if (stream == NULL) {
e7b07cee 6161 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6162 goto finish;
e7b07cee
HW
6163 }
6164
ceb3dbb4
JL
6165 stream->dm_stream_context = aconnector;
6166
4a36fcba
WL
6167 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6168 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6169
e7b07cee
HW
6170 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6171 /* Search for preferred mode */
6172 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6173 native_mode_found = true;
6174 break;
6175 }
6176 }
6177 if (!native_mode_found)
6178 preferred_mode = list_first_entry_or_null(
6179 &aconnector->base.modes,
6180 struct drm_display_mode,
6181 head);
6182
b333730d
BL
6183 mode_refresh = drm_mode_vrefresh(&mode);
6184
b830ebc9 6185 if (preferred_mode == NULL) {
1f6010a9
DF
6186 /*
6187 * This may not be an error, the use case is when we have no
e7b07cee
HW
6188 * usermode calls to reset and set mode upon hotplug. In this
6189 * case, we call set mode ourselves to restore the previous mode
6190 * and the modelist may not be filled in in time.
6191 */
f1ad2f5e 6192 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6193 } else {
b0781603 6194 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6195 is_freesync_video_mode(&mode, aconnector);
6196 if (recalculate_timing) {
6197 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6198 saved_mode = mode;
6199 mode = *freesync_mode;
6200 } else {
6201 decide_crtc_timing_for_drm_display_mode(
b0781603 6202 &mode, preferred_mode, scale);
a85ba005 6203
b0781603
NK
6204 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6205 }
e7b07cee
HW
6206 }
6207
a85ba005
NC
6208 if (recalculate_timing)
6209 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6210 else if (!dm_state)
f783577c
JFZ
6211 drm_mode_set_crtcinfo(&mode, 0);
6212
a85ba005 6213 /*
b333730d
BL
6214 * If scaling is enabled and refresh rate didn't change
6215 * we copy the vic and polarities of the old timings
6216 */
b0781603 6217 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6218 fill_stream_properties_from_drm_display_mode(
6219 stream, &mode, &aconnector->base, con_state, NULL,
6220 requested_bpc);
b333730d 6221 else
a85ba005
NC
6222 fill_stream_properties_from_drm_display_mode(
6223 stream, &mode, &aconnector->base, con_state, old_stream,
6224 requested_bpc);
b333730d 6225
defeb878 6226#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6227 /* SST DSC determination policy */
6228 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6229 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6230 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6231#endif
6232
e7b07cee
HW
6233 update_stream_scaling_settings(&mode, dm_state, stream);
6234
6235 fill_audio_info(
6236 &stream->audio_info,
6237 drm_connector,
aed15309 6238 sink);
e7b07cee 6239
ceb3dbb4 6240 update_stream_signal(stream, sink);
9182b4cb 6241
d832fc3b 6242 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6243 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6244
8a488f5d
RL
6245 if (stream->link->psr_settings.psr_feature_enabled) {
6246 //
6247 // should decide stream support vsc sdp colorimetry capability
6248 // before building vsc info packet
6249 //
6250 stream->use_vsc_sdp_for_colorimetry = false;
6251 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6252 stream->use_vsc_sdp_for_colorimetry =
6253 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6254 } else {
6255 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6256 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6257 }
8a488f5d 6258 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6259 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6260
8c322309 6261 }
aed15309 6262finish:
dcd5fb82 6263 dc_sink_release(sink);
9e3efe3e 6264
e7b07cee
HW
6265 return stream;
6266}
6267
7578ecda 6268static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6269{
6270 drm_crtc_cleanup(crtc);
6271 kfree(crtc);
6272}
6273
6274static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6275 struct drm_crtc_state *state)
e7b07cee
HW
6276{
6277 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6278
6279 /* TODO Destroy dc_stream objects are stream object is flattened */
6280 if (cur->stream)
6281 dc_stream_release(cur->stream);
6282
6283
6284 __drm_atomic_helper_crtc_destroy_state(state);
6285
6286
6287 kfree(state);
6288}
6289
6290static void dm_crtc_reset_state(struct drm_crtc *crtc)
6291{
6292 struct dm_crtc_state *state;
6293
6294 if (crtc->state)
6295 dm_crtc_destroy_state(crtc, crtc->state);
6296
6297 state = kzalloc(sizeof(*state), GFP_KERNEL);
6298 if (WARN_ON(!state))
6299 return;
6300
1f8a52ec 6301 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6302}
6303
6304static struct drm_crtc_state *
6305dm_crtc_duplicate_state(struct drm_crtc *crtc)
6306{
6307 struct dm_crtc_state *state, *cur;
6308
6309 cur = to_dm_crtc_state(crtc->state);
6310
6311 if (WARN_ON(!crtc->state))
6312 return NULL;
6313
2004f45e 6314 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6315 if (!state)
6316 return NULL;
e7b07cee
HW
6317
6318 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6319
6320 if (cur->stream) {
6321 state->stream = cur->stream;
6322 dc_stream_retain(state->stream);
6323 }
6324
d6ef9b41 6325 state->active_planes = cur->active_planes;
98e6436d 6326 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6327 state->abm_level = cur->abm_level;
bb47de73
NK
6328 state->vrr_supported = cur->vrr_supported;
6329 state->freesync_config = cur->freesync_config;
cf020d49
NK
6330 state->cm_has_degamma = cur->cm_has_degamma;
6331 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6332 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6333 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6334
6335 return &state->base;
6336}
6337
86bc2219 6338#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6339static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6340{
6341 crtc_debugfs_init(crtc);
6342
6343 return 0;
6344}
6345#endif
6346
d2574c33
MK
6347static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6348{
6349 enum dc_irq_source irq_source;
6350 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6351 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6352 int rc;
6353
6354 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6355
6356 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6357
4711c033
LT
6358 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6359 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6360 return rc;
6361}
589d2739
HW
6362
6363static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6364{
6365 enum dc_irq_source irq_source;
6366 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6367 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6368 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6369#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6370 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6371 struct vblank_control_work *work;
ea3b4242 6372#endif
d2574c33
MK
6373 int rc = 0;
6374
6375 if (enable) {
6376 /* vblank irq on -> Only need vupdate irq in vrr mode */
6377 if (amdgpu_dm_vrr_active(acrtc_state))
6378 rc = dm_set_vupdate_irq(crtc, true);
6379 } else {
6380 /* vblank irq off -> vupdate irq off */
6381 rc = dm_set_vupdate_irq(crtc, false);
6382 }
6383
6384 if (rc)
6385 return rc;
589d2739
HW
6386
6387 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6388
6389 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6390 return -EBUSY;
6391
98ab5f35
BL
6392 if (amdgpu_in_reset(adev))
6393 return 0;
6394
4928b480 6395#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6396 if (dm->vblank_control_workqueue) {
6397 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6398 if (!work)
6399 return -ENOMEM;
09a5df6c 6400
06dd1888
NK
6401 INIT_WORK(&work->work, vblank_control_worker);
6402 work->dm = dm;
6403 work->acrtc = acrtc;
6404 work->enable = enable;
09a5df6c 6405
06dd1888
NK
6406 if (acrtc_state->stream) {
6407 dc_stream_retain(acrtc_state->stream);
6408 work->stream = acrtc_state->stream;
6409 }
58aa1c50 6410
06dd1888
NK
6411 queue_work(dm->vblank_control_workqueue, &work->work);
6412 }
4928b480 6413#endif
71338cb4 6414
71338cb4 6415 return 0;
589d2739
HW
6416}
6417
6418static int dm_enable_vblank(struct drm_crtc *crtc)
6419{
6420 return dm_set_vblank(crtc, true);
6421}
6422
6423static void dm_disable_vblank(struct drm_crtc *crtc)
6424{
6425 dm_set_vblank(crtc, false);
6426}
6427
e7b07cee
HW
6428/* Implemented only the options currently availible for the driver */
6429static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6430 .reset = dm_crtc_reset_state,
6431 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6432 .set_config = drm_atomic_helper_set_config,
6433 .page_flip = drm_atomic_helper_page_flip,
6434 .atomic_duplicate_state = dm_crtc_duplicate_state,
6435 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6436 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6437 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6438 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6439 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6440 .enable_vblank = dm_enable_vblank,
6441 .disable_vblank = dm_disable_vblank,
e3eff4b5 6442 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6443#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6444 .late_register = amdgpu_dm_crtc_late_register,
6445#endif
e7b07cee
HW
6446};
6447
6448static enum drm_connector_status
6449amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6450{
6451 bool connected;
c84dec2f 6452 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6453
1f6010a9
DF
6454 /*
6455 * Notes:
e7b07cee
HW
6456 * 1. This interface is NOT called in context of HPD irq.
6457 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6458 * makes it a bad place for *any* MST-related activity.
6459 */
e7b07cee 6460
8580d60b
HW
6461 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6462 !aconnector->fake_enable)
e7b07cee
HW
6463 connected = (aconnector->dc_sink != NULL);
6464 else
6465 connected = (aconnector->base.force == DRM_FORCE_ON);
6466
0f877894
OV
6467 update_subconnector_property(aconnector);
6468
e7b07cee
HW
6469 return (connected ? connector_status_connected :
6470 connector_status_disconnected);
6471}
6472
3ee6b26b
AD
6473int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6474 struct drm_connector_state *connector_state,
6475 struct drm_property *property,
6476 uint64_t val)
e7b07cee
HW
6477{
6478 struct drm_device *dev = connector->dev;
1348969a 6479 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6480 struct dm_connector_state *dm_old_state =
6481 to_dm_connector_state(connector->state);
6482 struct dm_connector_state *dm_new_state =
6483 to_dm_connector_state(connector_state);
6484
6485 int ret = -EINVAL;
6486
6487 if (property == dev->mode_config.scaling_mode_property) {
6488 enum amdgpu_rmx_type rmx_type;
6489
6490 switch (val) {
6491 case DRM_MODE_SCALE_CENTER:
6492 rmx_type = RMX_CENTER;
6493 break;
6494 case DRM_MODE_SCALE_ASPECT:
6495 rmx_type = RMX_ASPECT;
6496 break;
6497 case DRM_MODE_SCALE_FULLSCREEN:
6498 rmx_type = RMX_FULL;
6499 break;
6500 case DRM_MODE_SCALE_NONE:
6501 default:
6502 rmx_type = RMX_OFF;
6503 break;
6504 }
6505
6506 if (dm_old_state->scaling == rmx_type)
6507 return 0;
6508
6509 dm_new_state->scaling = rmx_type;
6510 ret = 0;
6511 } else if (property == adev->mode_info.underscan_hborder_property) {
6512 dm_new_state->underscan_hborder = val;
6513 ret = 0;
6514 } else if (property == adev->mode_info.underscan_vborder_property) {
6515 dm_new_state->underscan_vborder = val;
6516 ret = 0;
6517 } else if (property == adev->mode_info.underscan_property) {
6518 dm_new_state->underscan_enable = val;
6519 ret = 0;
c1ee92f9
DF
6520 } else if (property == adev->mode_info.abm_level_property) {
6521 dm_new_state->abm_level = val;
6522 ret = 0;
e7b07cee
HW
6523 }
6524
6525 return ret;
6526}
6527
3ee6b26b
AD
6528int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6529 const struct drm_connector_state *state,
6530 struct drm_property *property,
6531 uint64_t *val)
e7b07cee
HW
6532{
6533 struct drm_device *dev = connector->dev;
1348969a 6534 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6535 struct dm_connector_state *dm_state =
6536 to_dm_connector_state(state);
6537 int ret = -EINVAL;
6538
6539 if (property == dev->mode_config.scaling_mode_property) {
6540 switch (dm_state->scaling) {
6541 case RMX_CENTER:
6542 *val = DRM_MODE_SCALE_CENTER;
6543 break;
6544 case RMX_ASPECT:
6545 *val = DRM_MODE_SCALE_ASPECT;
6546 break;
6547 case RMX_FULL:
6548 *val = DRM_MODE_SCALE_FULLSCREEN;
6549 break;
6550 case RMX_OFF:
6551 default:
6552 *val = DRM_MODE_SCALE_NONE;
6553 break;
6554 }
6555 ret = 0;
6556 } else if (property == adev->mode_info.underscan_hborder_property) {
6557 *val = dm_state->underscan_hborder;
6558 ret = 0;
6559 } else if (property == adev->mode_info.underscan_vborder_property) {
6560 *val = dm_state->underscan_vborder;
6561 ret = 0;
6562 } else if (property == adev->mode_info.underscan_property) {
6563 *val = dm_state->underscan_enable;
6564 ret = 0;
c1ee92f9
DF
6565 } else if (property == adev->mode_info.abm_level_property) {
6566 *val = dm_state->abm_level;
6567 ret = 0;
e7b07cee 6568 }
c1ee92f9 6569
e7b07cee
HW
6570 return ret;
6571}
6572
526c654a
ED
6573static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6574{
6575 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6576
6577 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6578}
6579
7578ecda 6580static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6581{
c84dec2f 6582 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6583 const struct dc_link *link = aconnector->dc_link;
1348969a 6584 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6585 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6586 int i;
ada8ce15 6587
5dff80bd
AG
6588 /*
6589 * Call only if mst_mgr was iniitalized before since it's not done
6590 * for all connector types.
6591 */
6592 if (aconnector->mst_mgr.dev)
6593 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6594
e7b07cee
HW
6595#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6596 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6597 for (i = 0; i < dm->num_of_edps; i++) {
6598 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6599 backlight_device_unregister(dm->backlight_dev[i]);
6600 dm->backlight_dev[i] = NULL;
6601 }
e7b07cee
HW
6602 }
6603#endif
dcd5fb82
MF
6604
6605 if (aconnector->dc_em_sink)
6606 dc_sink_release(aconnector->dc_em_sink);
6607 aconnector->dc_em_sink = NULL;
6608 if (aconnector->dc_sink)
6609 dc_sink_release(aconnector->dc_sink);
6610 aconnector->dc_sink = NULL;
6611
e86e8947 6612 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6613 drm_connector_unregister(connector);
6614 drm_connector_cleanup(connector);
526c654a
ED
6615 if (aconnector->i2c) {
6616 i2c_del_adapter(&aconnector->i2c->base);
6617 kfree(aconnector->i2c);
6618 }
7daec99f 6619 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6620
e7b07cee
HW
6621 kfree(connector);
6622}
6623
6624void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6625{
6626 struct dm_connector_state *state =
6627 to_dm_connector_state(connector->state);
6628
df099b9b
LSL
6629 if (connector->state)
6630 __drm_atomic_helper_connector_destroy_state(connector->state);
6631
e7b07cee
HW
6632 kfree(state);
6633
6634 state = kzalloc(sizeof(*state), GFP_KERNEL);
6635
6636 if (state) {
6637 state->scaling = RMX_OFF;
6638 state->underscan_enable = false;
6639 state->underscan_hborder = 0;
6640 state->underscan_vborder = 0;
01933ba4 6641 state->base.max_requested_bpc = 8;
3261e013
ML
6642 state->vcpi_slots = 0;
6643 state->pbn = 0;
c3e50f89
NK
6644 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6645 state->abm_level = amdgpu_dm_abm_level;
6646
df099b9b 6647 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6648 }
6649}
6650
3ee6b26b
AD
6651struct drm_connector_state *
6652amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6653{
6654 struct dm_connector_state *state =
6655 to_dm_connector_state(connector->state);
6656
6657 struct dm_connector_state *new_state =
6658 kmemdup(state, sizeof(*state), GFP_KERNEL);
6659
98e6436d
AK
6660 if (!new_state)
6661 return NULL;
e7b07cee 6662
98e6436d
AK
6663 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6664
6665 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6666 new_state->abm_level = state->abm_level;
922454c2
NK
6667 new_state->scaling = state->scaling;
6668 new_state->underscan_enable = state->underscan_enable;
6669 new_state->underscan_hborder = state->underscan_hborder;
6670 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6671 new_state->vcpi_slots = state->vcpi_slots;
6672 new_state->pbn = state->pbn;
98e6436d 6673 return &new_state->base;
e7b07cee
HW
6674}
6675
14f04fa4
AD
6676static int
6677amdgpu_dm_connector_late_register(struct drm_connector *connector)
6678{
6679 struct amdgpu_dm_connector *amdgpu_dm_connector =
6680 to_amdgpu_dm_connector(connector);
00a8037e 6681 int r;
14f04fa4 6682
00a8037e
AD
6683 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6684 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6685 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6686 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6687 if (r)
6688 return r;
6689 }
6690
6691#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6692 connector_debugfs_init(amdgpu_dm_connector);
6693#endif
6694
6695 return 0;
6696}
6697
e7b07cee
HW
6698static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6699 .reset = amdgpu_dm_connector_funcs_reset,
6700 .detect = amdgpu_dm_connector_detect,
6701 .fill_modes = drm_helper_probe_single_connector_modes,
6702 .destroy = amdgpu_dm_connector_destroy,
6703 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6704 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6705 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6706 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6707 .late_register = amdgpu_dm_connector_late_register,
526c654a 6708 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6709};
6710
e7b07cee
HW
6711static int get_modes(struct drm_connector *connector)
6712{
6713 return amdgpu_dm_connector_get_modes(connector);
6714}
6715
c84dec2f 6716static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6717{
6718 struct dc_sink_init_data init_params = {
6719 .link = aconnector->dc_link,
6720 .sink_signal = SIGNAL_TYPE_VIRTUAL
6721 };
70e8ffc5 6722 struct edid *edid;
e7b07cee 6723
a89ff457 6724 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6725 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6726 aconnector->base.name);
6727
6728 aconnector->base.force = DRM_FORCE_OFF;
6729 aconnector->base.override_edid = false;
6730 return;
6731 }
6732
70e8ffc5
HW
6733 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6734
e7b07cee
HW
6735 aconnector->edid = edid;
6736
6737 aconnector->dc_em_sink = dc_link_add_remote_sink(
6738 aconnector->dc_link,
6739 (uint8_t *)edid,
6740 (edid->extensions + 1) * EDID_LENGTH,
6741 &init_params);
6742
dcd5fb82 6743 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6744 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6745 aconnector->dc_link->local_sink :
6746 aconnector->dc_em_sink;
dcd5fb82
MF
6747 dc_sink_retain(aconnector->dc_sink);
6748 }
e7b07cee
HW
6749}
6750
c84dec2f 6751static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6752{
6753 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6754
1f6010a9
DF
6755 /*
6756 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6757 * Those settings have to be != 0 to get initial modeset
6758 */
6759 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6760 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6761 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6762 }
6763
6764
6765 aconnector->base.override_edid = true;
6766 create_eml_sink(aconnector);
6767}
6768
cbd14ae7
SW
6769static struct dc_stream_state *
6770create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6771 const struct drm_display_mode *drm_mode,
6772 const struct dm_connector_state *dm_state,
6773 const struct dc_stream_state *old_stream)
6774{
6775 struct drm_connector *connector = &aconnector->base;
1348969a 6776 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6777 struct dc_stream_state *stream;
4b7da34b
SW
6778 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6779 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6780 enum dc_status dc_result = DC_OK;
6781
6782 do {
6783 stream = create_stream_for_sink(aconnector, drm_mode,
6784 dm_state, old_stream,
6785 requested_bpc);
6786 if (stream == NULL) {
6787 DRM_ERROR("Failed to create stream for sink!\n");
6788 break;
6789 }
6790
6791 dc_result = dc_validate_stream(adev->dm.dc, stream);
6792
6793 if (dc_result != DC_OK) {
74a16675 6794 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6795 drm_mode->hdisplay,
6796 drm_mode->vdisplay,
6797 drm_mode->clock,
74a16675
RS
6798 dc_result,
6799 dc_status_to_str(dc_result));
cbd14ae7
SW
6800
6801 dc_stream_release(stream);
6802 stream = NULL;
6803 requested_bpc -= 2; /* lower bpc to retry validation */
6804 }
6805
6806 } while (stream == NULL && requested_bpc >= 6);
6807
68eb3ae3
WS
6808 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6809 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6810
6811 aconnector->force_yuv420_output = true;
6812 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6813 dm_state, old_stream);
6814 aconnector->force_yuv420_output = false;
6815 }
6816
cbd14ae7
SW
6817 return stream;
6818}
6819
ba9ca088 6820enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6821 struct drm_display_mode *mode)
e7b07cee
HW
6822{
6823 int result = MODE_ERROR;
6824 struct dc_sink *dc_sink;
e7b07cee 6825 /* TODO: Unhardcode stream count */
0971c40e 6826 struct dc_stream_state *stream;
c84dec2f 6827 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6828
6829 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6830 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6831 return result;
6832
1f6010a9
DF
6833 /*
6834 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6835 * EDID mgmt
6836 */
6837 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6838 !aconnector->dc_em_sink)
6839 handle_edid_mgmt(aconnector);
6840
c84dec2f 6841 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6842
ad975f44
VL
6843 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6844 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6845 DRM_ERROR("dc_sink is NULL!\n");
6846 goto fail;
6847 }
6848
cbd14ae7
SW
6849 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6850 if (stream) {
6851 dc_stream_release(stream);
e7b07cee 6852 result = MODE_OK;
cbd14ae7 6853 }
e7b07cee
HW
6854
6855fail:
6856 /* TODO: error handling*/
6857 return result;
6858}
6859
88694af9
NK
6860static int fill_hdr_info_packet(const struct drm_connector_state *state,
6861 struct dc_info_packet *out)
6862{
6863 struct hdmi_drm_infoframe frame;
6864 unsigned char buf[30]; /* 26 + 4 */
6865 ssize_t len;
6866 int ret, i;
6867
6868 memset(out, 0, sizeof(*out));
6869
6870 if (!state->hdr_output_metadata)
6871 return 0;
6872
6873 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6874 if (ret)
6875 return ret;
6876
6877 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6878 if (len < 0)
6879 return (int)len;
6880
6881 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6882 if (len != 30)
6883 return -EINVAL;
6884
6885 /* Prepare the infopacket for DC. */
6886 switch (state->connector->connector_type) {
6887 case DRM_MODE_CONNECTOR_HDMIA:
6888 out->hb0 = 0x87; /* type */
6889 out->hb1 = 0x01; /* version */
6890 out->hb2 = 0x1A; /* length */
6891 out->sb[0] = buf[3]; /* checksum */
6892 i = 1;
6893 break;
6894
6895 case DRM_MODE_CONNECTOR_DisplayPort:
6896 case DRM_MODE_CONNECTOR_eDP:
6897 out->hb0 = 0x00; /* sdp id, zero */
6898 out->hb1 = 0x87; /* type */
6899 out->hb2 = 0x1D; /* payload len - 1 */
6900 out->hb3 = (0x13 << 2); /* sdp version */
6901 out->sb[0] = 0x01; /* version */
6902 out->sb[1] = 0x1A; /* length */
6903 i = 2;
6904 break;
6905
6906 default:
6907 return -EINVAL;
6908 }
6909
6910 memcpy(&out->sb[i], &buf[4], 26);
6911 out->valid = true;
6912
6913 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6914 sizeof(out->sb), false);
6915
6916 return 0;
6917}
6918
88694af9
NK
6919static int
6920amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6921 struct drm_atomic_state *state)
88694af9 6922{
51e857af
SP
6923 struct drm_connector_state *new_con_state =
6924 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6925 struct drm_connector_state *old_con_state =
6926 drm_atomic_get_old_connector_state(state, conn);
6927 struct drm_crtc *crtc = new_con_state->crtc;
6928 struct drm_crtc_state *new_crtc_state;
6929 int ret;
6930
e8a98235
RS
6931 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6932
88694af9
NK
6933 if (!crtc)
6934 return 0;
6935
72921cdf 6936 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6937 struct dc_info_packet hdr_infopacket;
6938
6939 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6940 if (ret)
6941 return ret;
6942
6943 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6944 if (IS_ERR(new_crtc_state))
6945 return PTR_ERR(new_crtc_state);
6946
6947 /*
6948 * DC considers the stream backends changed if the
6949 * static metadata changes. Forcing the modeset also
6950 * gives a simple way for userspace to switch from
b232d4ed
NK
6951 * 8bpc to 10bpc when setting the metadata to enter
6952 * or exit HDR.
6953 *
6954 * Changing the static metadata after it's been
6955 * set is permissible, however. So only force a
6956 * modeset if we're entering or exiting HDR.
88694af9 6957 */
b232d4ed
NK
6958 new_crtc_state->mode_changed =
6959 !old_con_state->hdr_output_metadata ||
6960 !new_con_state->hdr_output_metadata;
88694af9
NK
6961 }
6962
6963 return 0;
6964}
6965
e7b07cee
HW
6966static const struct drm_connector_helper_funcs
6967amdgpu_dm_connector_helper_funcs = {
6968 /*
1f6010a9 6969 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6970 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6971 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6972 * in get_modes call back, not just return the modes count
6973 */
e7b07cee
HW
6974 .get_modes = get_modes,
6975 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6976 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6977};
6978
6979static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6980{
6981}
6982
d6ef9b41 6983static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6984{
6985 struct drm_atomic_state *state = new_crtc_state->state;
6986 struct drm_plane *plane;
6987 int num_active = 0;
6988
6989 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6990 struct drm_plane_state *new_plane_state;
6991
6992 /* Cursor planes are "fake". */
6993 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6994 continue;
6995
6996 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6997
6998 if (!new_plane_state) {
6999 /*
7000 * The plane is enable on the CRTC and hasn't changed
7001 * state. This means that it previously passed
7002 * validation and is therefore enabled.
7003 */
7004 num_active += 1;
7005 continue;
7006 }
7007
7008 /* We need a framebuffer to be considered enabled. */
7009 num_active += (new_plane_state->fb != NULL);
7010 }
7011
d6ef9b41
NK
7012 return num_active;
7013}
7014
8fe684e9
NK
7015static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7016 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7017{
7018 struct dm_crtc_state *dm_new_crtc_state =
7019 to_dm_crtc_state(new_crtc_state);
7020
7021 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7022
7023 if (!dm_new_crtc_state->stream)
7024 return;
7025
7026 dm_new_crtc_state->active_planes =
7027 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7028}
7029
3ee6b26b 7030static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7031 struct drm_atomic_state *state)
e7b07cee 7032{
29b77ad7
MR
7033 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7034 crtc);
1348969a 7035 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7036 struct dc *dc = adev->dm.dc;
29b77ad7 7037 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7038 int ret = -EINVAL;
7039
5b8c5969 7040 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7041
29b77ad7 7042 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7043
bcd74374
ND
7044 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7045 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7046 return ret;
7047 }
7048
bc92c065 7049 /*
b836a274
MD
7050 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7051 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7052 * planes are disabled, which is not supported by the hardware. And there is legacy
7053 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7054 */
29b77ad7 7055 if (crtc_state->enable &&
ea9522f5
SS
7056 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7057 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7058 return -EINVAL;
ea9522f5 7059 }
c14a005c 7060
b836a274
MD
7061 /* In some use cases, like reset, no stream is attached */
7062 if (!dm_crtc_state->stream)
7063 return 0;
7064
62c933f9 7065 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7066 return 0;
7067
ea9522f5 7068 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7069 return ret;
7070}
7071
3ee6b26b
AD
7072static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7073 const struct drm_display_mode *mode,
7074 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7075{
7076 return true;
7077}
7078
7079static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7080 .disable = dm_crtc_helper_disable,
7081 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7082 .mode_fixup = dm_crtc_helper_mode_fixup,
7083 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7084};
7085
7086static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7087{
7088
7089}
7090
3261e013
ML
7091static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7092{
7093 switch (display_color_depth) {
7094 case COLOR_DEPTH_666:
7095 return 6;
7096 case COLOR_DEPTH_888:
7097 return 8;
7098 case COLOR_DEPTH_101010:
7099 return 10;
7100 case COLOR_DEPTH_121212:
7101 return 12;
7102 case COLOR_DEPTH_141414:
7103 return 14;
7104 case COLOR_DEPTH_161616:
7105 return 16;
7106 default:
7107 break;
7108 }
7109 return 0;
7110}
7111
3ee6b26b
AD
7112static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7113 struct drm_crtc_state *crtc_state,
7114 struct drm_connector_state *conn_state)
e7b07cee 7115{
3261e013
ML
7116 struct drm_atomic_state *state = crtc_state->state;
7117 struct drm_connector *connector = conn_state->connector;
7118 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7119 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7120 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7121 struct drm_dp_mst_topology_mgr *mst_mgr;
7122 struct drm_dp_mst_port *mst_port;
7123 enum dc_color_depth color_depth;
7124 int clock, bpp = 0;
1bc22f20 7125 bool is_y420 = false;
3261e013
ML
7126
7127 if (!aconnector->port || !aconnector->dc_sink)
7128 return 0;
7129
7130 mst_port = aconnector->port;
7131 mst_mgr = &aconnector->mst_port->mst_mgr;
7132
7133 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7134 return 0;
7135
7136 if (!state->duplicated) {
cbd14ae7 7137 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7138 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7139 aconnector->force_yuv420_output;
cbd14ae7
SW
7140 color_depth = convert_color_depth_from_display_info(connector,
7141 is_y420,
7142 max_bpc);
3261e013
ML
7143 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7144 clock = adjusted_mode->clock;
dc48529f 7145 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7146 }
7147 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7148 mst_mgr,
7149 mst_port,
1c6c1cb5 7150 dm_new_connector_state->pbn,
03ca9600 7151 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7152 if (dm_new_connector_state->vcpi_slots < 0) {
7153 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7154 return dm_new_connector_state->vcpi_slots;
7155 }
e7b07cee
HW
7156 return 0;
7157}
7158
7159const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7160 .disable = dm_encoder_helper_disable,
7161 .atomic_check = dm_encoder_helper_atomic_check
7162};
7163
d9fe1a4c 7164#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7165static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7166 struct dc_state *dc_state,
7167 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7168{
7169 struct dc_stream_state *stream = NULL;
7170 struct drm_connector *connector;
5760dcb9 7171 struct drm_connector_state *new_con_state;
29b9ba74
ML
7172 struct amdgpu_dm_connector *aconnector;
7173 struct dm_connector_state *dm_conn_state;
6513104b 7174 int i, j, clock;
29b9ba74
ML
7175 int vcpi, pbn_div, pbn = 0;
7176
5760dcb9 7177 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7178
7179 aconnector = to_amdgpu_dm_connector(connector);
7180
7181 if (!aconnector->port)
7182 continue;
7183
7184 if (!new_con_state || !new_con_state->crtc)
7185 continue;
7186
7187 dm_conn_state = to_dm_connector_state(new_con_state);
7188
7189 for (j = 0; j < dc_state->stream_count; j++) {
7190 stream = dc_state->streams[j];
7191 if (!stream)
7192 continue;
7193
7194 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7195 break;
7196
7197 stream = NULL;
7198 }
7199
7200 if (!stream)
7201 continue;
7202
7203 if (stream->timing.flags.DSC != 1) {
7204 drm_dp_mst_atomic_enable_dsc(state,
7205 aconnector->port,
7206 dm_conn_state->pbn,
7207 0,
7208 false);
7209 continue;
7210 }
7211
7212 pbn_div = dm_mst_get_pbn_divider(stream->link);
29b9ba74 7213 clock = stream->timing.pix_clk_100hz / 10;
6513104b
HW
7214 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7215 for (j = 0; j < dc_state->stream_count; j++) {
7216 if (vars[j].aconnector == aconnector) {
7217 pbn = vars[j].pbn;
7218 break;
7219 }
7220 }
7221
29b9ba74
ML
7222 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7223 aconnector->port,
7224 pbn, pbn_div,
7225 true);
7226 if (vcpi < 0)
7227 return vcpi;
7228
7229 dm_conn_state->pbn = pbn;
7230 dm_conn_state->vcpi_slots = vcpi;
7231 }
7232 return 0;
7233}
d9fe1a4c 7234#endif
29b9ba74 7235
e7b07cee
HW
7236static void dm_drm_plane_reset(struct drm_plane *plane)
7237{
7238 struct dm_plane_state *amdgpu_state = NULL;
7239
7240 if (plane->state)
7241 plane->funcs->atomic_destroy_state(plane, plane->state);
7242
7243 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7244 WARN_ON(amdgpu_state == NULL);
1f6010a9 7245
7ddaef96
NK
7246 if (amdgpu_state)
7247 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7248}
7249
7250static struct drm_plane_state *
7251dm_drm_plane_duplicate_state(struct drm_plane *plane)
7252{
7253 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7254
7255 old_dm_plane_state = to_dm_plane_state(plane->state);
7256 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7257 if (!dm_plane_state)
7258 return NULL;
7259
7260 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7261
3be5262e
HW
7262 if (old_dm_plane_state->dc_state) {
7263 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7264 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7265 }
7266
7267 return &dm_plane_state->base;
7268}
7269
dfd84d90 7270static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7271 struct drm_plane_state *state)
e7b07cee
HW
7272{
7273 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7274
3be5262e
HW
7275 if (dm_plane_state->dc_state)
7276 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7277
0627bbd3 7278 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7279}
7280
7281static const struct drm_plane_funcs dm_plane_funcs = {
7282 .update_plane = drm_atomic_helper_update_plane,
7283 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7284 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7285 .reset = dm_drm_plane_reset,
7286 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7287 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7288 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7289};
7290
3ee6b26b
AD
7291static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7292 struct drm_plane_state *new_state)
e7b07cee
HW
7293{
7294 struct amdgpu_framebuffer *afb;
7295 struct drm_gem_object *obj;
5d43be0c 7296 struct amdgpu_device *adev;
e7b07cee 7297 struct amdgpu_bo *rbo;
e7b07cee 7298 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7299 struct list_head list;
7300 struct ttm_validate_buffer tv;
7301 struct ww_acquire_ctx ticket;
5d43be0c
CK
7302 uint32_t domain;
7303 int r;
e7b07cee
HW
7304
7305 if (!new_state->fb) {
4711c033 7306 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7307 return 0;
7308 }
7309
7310 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7311 obj = new_state->fb->obj[0];
e7b07cee 7312 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7313 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7314 INIT_LIST_HEAD(&list);
7315
7316 tv.bo = &rbo->tbo;
7317 tv.num_shared = 1;
7318 list_add(&tv.head, &list);
7319
9165fb87 7320 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7321 if (r) {
7322 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7323 return r;
0f257b09 7324 }
e7b07cee 7325
5d43be0c 7326 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7327 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7328 else
7329 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7330
7b7c6c81 7331 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7332 if (unlikely(r != 0)) {
30b7c614
HW
7333 if (r != -ERESTARTSYS)
7334 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7335 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7336 return r;
7337 }
7338
bb812f1e
JZ
7339 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7340 if (unlikely(r != 0)) {
7341 amdgpu_bo_unpin(rbo);
0f257b09 7342 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7343 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7344 return r;
7345 }
7df7e505 7346
0f257b09 7347 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7348
7b7c6c81 7349 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7350
7351 amdgpu_bo_ref(rbo);
7352
cf322b49
NK
7353 /**
7354 * We don't do surface updates on planes that have been newly created,
7355 * but we also don't have the afb->address during atomic check.
7356 *
7357 * Fill in buffer attributes depending on the address here, but only on
7358 * newly created planes since they're not being used by DC yet and this
7359 * won't modify global state.
7360 */
7361 dm_plane_state_old = to_dm_plane_state(plane->state);
7362 dm_plane_state_new = to_dm_plane_state(new_state);
7363
3be5262e 7364 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7365 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7366 struct dc_plane_state *plane_state =
7367 dm_plane_state_new->dc_state;
7368 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7369
320932bf 7370 fill_plane_buffer_attributes(
695af5f9 7371 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7372 afb->tiling_flags,
cf322b49
NK
7373 &plane_state->tiling_info, &plane_state->plane_size,
7374 &plane_state->dcc, &plane_state->address,
6eed95b0 7375 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7376 }
7377
e7b07cee
HW
7378 return 0;
7379}
7380
3ee6b26b
AD
7381static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7382 struct drm_plane_state *old_state)
e7b07cee
HW
7383{
7384 struct amdgpu_bo *rbo;
e7b07cee
HW
7385 int r;
7386
7387 if (!old_state->fb)
7388 return;
7389
e68d14dd 7390 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7391 r = amdgpu_bo_reserve(rbo, false);
7392 if (unlikely(r)) {
7393 DRM_ERROR("failed to reserve rbo before unpin\n");
7394 return;
b830ebc9
HW
7395 }
7396
7397 amdgpu_bo_unpin(rbo);
7398 amdgpu_bo_unreserve(rbo);
7399 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7400}
7401
8c44515b
AP
7402static int dm_plane_helper_check_state(struct drm_plane_state *state,
7403 struct drm_crtc_state *new_crtc_state)
7404{
6300b3bd
MK
7405 struct drm_framebuffer *fb = state->fb;
7406 int min_downscale, max_upscale;
7407 int min_scale = 0;
7408 int max_scale = INT_MAX;
7409
40d916a2 7410 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7411 if (fb && state->crtc) {
40d916a2
NC
7412 /* Validate viewport to cover the case when only the position changes */
7413 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7414 int viewport_width = state->crtc_w;
7415 int viewport_height = state->crtc_h;
7416
7417 if (state->crtc_x < 0)
7418 viewport_width += state->crtc_x;
7419 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7420 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7421
7422 if (state->crtc_y < 0)
7423 viewport_height += state->crtc_y;
7424 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7425 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7426
4abdb72b
NC
7427 if (viewport_width < 0 || viewport_height < 0) {
7428 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7429 return -EINVAL;
7430 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7431 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7432 return -EINVAL;
4abdb72b
NC
7433 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7434 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7435 return -EINVAL;
4abdb72b
NC
7436 }
7437
40d916a2
NC
7438 }
7439
7440 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7441 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7442 &min_downscale, &max_upscale);
7443 /*
7444 * Convert to drm convention: 16.16 fixed point, instead of dc's
7445 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7446 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7447 */
7448 min_scale = (1000 << 16) / max_upscale;
7449 max_scale = (1000 << 16) / min_downscale;
7450 }
8c44515b 7451
8c44515b 7452 return drm_atomic_helper_check_plane_state(
6300b3bd 7453 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7454}
7455
7578ecda 7456static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7457 struct drm_atomic_state *state)
cbd19488 7458{
7c11b99a
MR
7459 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7460 plane);
1348969a 7461 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7462 struct dc *dc = adev->dm.dc;
78171832 7463 struct dm_plane_state *dm_plane_state;
695af5f9 7464 struct dc_scaling_info scaling_info;
8c44515b 7465 struct drm_crtc_state *new_crtc_state;
695af5f9 7466 int ret;
78171832 7467
ba5c1649 7468 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7469
ba5c1649 7470 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7471
3be5262e 7472 if (!dm_plane_state->dc_state)
9a3329b1 7473 return 0;
cbd19488 7474
8c44515b 7475 new_crtc_state =
dec92020 7476 drm_atomic_get_new_crtc_state(state,
ba5c1649 7477 new_plane_state->crtc);
8c44515b
AP
7478 if (!new_crtc_state)
7479 return -EINVAL;
7480
ba5c1649 7481 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7482 if (ret)
7483 return ret;
7484
ba5c1649 7485 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7486 if (ret)
7487 return ret;
a05bcff1 7488
62c933f9 7489 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7490 return 0;
7491
7492 return -EINVAL;
7493}
7494
674e78ac 7495static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7496 struct drm_atomic_state *state)
674e78ac
NK
7497{
7498 /* Only support async updates on cursor planes. */
7499 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7500 return -EINVAL;
7501
7502 return 0;
7503}
7504
7505static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7506 struct drm_atomic_state *state)
674e78ac 7507{
5ddb0bd4
MR
7508 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7509 plane);
674e78ac 7510 struct drm_plane_state *old_state =
5ddb0bd4 7511 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7512
e8a98235
RS
7513 trace_amdgpu_dm_atomic_update_cursor(new_state);
7514
332af874 7515 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7516
7517 plane->state->src_x = new_state->src_x;
7518 plane->state->src_y = new_state->src_y;
7519 plane->state->src_w = new_state->src_w;
7520 plane->state->src_h = new_state->src_h;
7521 plane->state->crtc_x = new_state->crtc_x;
7522 plane->state->crtc_y = new_state->crtc_y;
7523 plane->state->crtc_w = new_state->crtc_w;
7524 plane->state->crtc_h = new_state->crtc_h;
7525
7526 handle_cursor_update(plane, old_state);
7527}
7528
e7b07cee
HW
7529static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7530 .prepare_fb = dm_plane_helper_prepare_fb,
7531 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7532 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7533 .atomic_async_check = dm_plane_atomic_async_check,
7534 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7535};
7536
7537/*
7538 * TODO: these are currently initialized to rgb formats only.
7539 * For future use cases we should either initialize them dynamically based on
7540 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7541 * check will succeed, and let DC implement proper check
e7b07cee 7542 */
d90371b0 7543static const uint32_t rgb_formats[] = {
e7b07cee
HW
7544 DRM_FORMAT_XRGB8888,
7545 DRM_FORMAT_ARGB8888,
7546 DRM_FORMAT_RGBA8888,
7547 DRM_FORMAT_XRGB2101010,
7548 DRM_FORMAT_XBGR2101010,
7549 DRM_FORMAT_ARGB2101010,
7550 DRM_FORMAT_ABGR2101010,
58020403
MK
7551 DRM_FORMAT_XRGB16161616,
7552 DRM_FORMAT_XBGR16161616,
7553 DRM_FORMAT_ARGB16161616,
7554 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7555 DRM_FORMAT_XBGR8888,
7556 DRM_FORMAT_ABGR8888,
46dd9ff7 7557 DRM_FORMAT_RGB565,
e7b07cee
HW
7558};
7559
0d579c7e
NK
7560static const uint32_t overlay_formats[] = {
7561 DRM_FORMAT_XRGB8888,
7562 DRM_FORMAT_ARGB8888,
7563 DRM_FORMAT_RGBA8888,
7564 DRM_FORMAT_XBGR8888,
7565 DRM_FORMAT_ABGR8888,
7267a1a9 7566 DRM_FORMAT_RGB565
e7b07cee
HW
7567};
7568
7569static const u32 cursor_formats[] = {
7570 DRM_FORMAT_ARGB8888
7571};
7572
37c6a93b
NK
7573static int get_plane_formats(const struct drm_plane *plane,
7574 const struct dc_plane_cap *plane_cap,
7575 uint32_t *formats, int max_formats)
e7b07cee 7576{
37c6a93b
NK
7577 int i, num_formats = 0;
7578
7579 /*
7580 * TODO: Query support for each group of formats directly from
7581 * DC plane caps. This will require adding more formats to the
7582 * caps list.
7583 */
e7b07cee 7584
f180b4bc 7585 switch (plane->type) {
e7b07cee 7586 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7587 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7588 if (num_formats >= max_formats)
7589 break;
7590
7591 formats[num_formats++] = rgb_formats[i];
7592 }
7593
ea36ad34 7594 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7595 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7596 if (plane_cap && plane_cap->pixel_format_support.p010)
7597 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7598 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7599 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7600 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7601 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7602 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7603 }
e7b07cee 7604 break;
37c6a93b 7605
e7b07cee 7606 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7607 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7608 if (num_formats >= max_formats)
7609 break;
7610
7611 formats[num_formats++] = overlay_formats[i];
7612 }
e7b07cee 7613 break;
37c6a93b 7614
e7b07cee 7615 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7616 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7617 if (num_formats >= max_formats)
7618 break;
7619
7620 formats[num_formats++] = cursor_formats[i];
7621 }
e7b07cee
HW
7622 break;
7623 }
7624
37c6a93b
NK
7625 return num_formats;
7626}
7627
7628static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7629 struct drm_plane *plane,
7630 unsigned long possible_crtcs,
7631 const struct dc_plane_cap *plane_cap)
7632{
7633 uint32_t formats[32];
7634 int num_formats;
7635 int res = -EPERM;
ecc874a6 7636 unsigned int supported_rotations;
faa37f54 7637 uint64_t *modifiers = NULL;
37c6a93b
NK
7638
7639 num_formats = get_plane_formats(plane, plane_cap, formats,
7640 ARRAY_SIZE(formats));
7641
faa37f54
BN
7642 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7643 if (res)
7644 return res;
7645
4a580877 7646 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7647 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7648 modifiers, plane->type, NULL);
7649 kfree(modifiers);
37c6a93b
NK
7650 if (res)
7651 return res;
7652
cc1fec57
NK
7653 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7654 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7655 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7656 BIT(DRM_MODE_BLEND_PREMULTI);
7657
7658 drm_plane_create_alpha_property(plane);
7659 drm_plane_create_blend_mode_property(plane, blend_caps);
7660 }
7661
fc8e5230 7662 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7663 plane_cap &&
7664 (plane_cap->pixel_format_support.nv12 ||
7665 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7666 /* This only affects YUV formats. */
7667 drm_plane_create_color_properties(
7668 plane,
7669 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7670 BIT(DRM_COLOR_YCBCR_BT709) |
7671 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7672 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7673 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7674 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7675 }
7676
ecc874a6
PLG
7677 supported_rotations =
7678 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7679 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7680
1347385f
SS
7681 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7682 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7683 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7684 supported_rotations);
ecc874a6 7685
f180b4bc 7686 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7687
96719c54 7688 /* Create (reset) the plane state */
f180b4bc
HW
7689 if (plane->funcs->reset)
7690 plane->funcs->reset(plane);
96719c54 7691
37c6a93b 7692 return 0;
e7b07cee
HW
7693}
7694
7578ecda
AD
7695static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7696 struct drm_plane *plane,
7697 uint32_t crtc_index)
e7b07cee
HW
7698{
7699 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7700 struct drm_plane *cursor_plane;
e7b07cee
HW
7701
7702 int res = -ENOMEM;
7703
7704 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7705 if (!cursor_plane)
7706 goto fail;
7707
f180b4bc 7708 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7709 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7710
7711 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7712 if (!acrtc)
7713 goto fail;
7714
7715 res = drm_crtc_init_with_planes(
7716 dm->ddev,
7717 &acrtc->base,
7718 plane,
f180b4bc 7719 cursor_plane,
e7b07cee
HW
7720 &amdgpu_dm_crtc_funcs, NULL);
7721
7722 if (res)
7723 goto fail;
7724
7725 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7726
96719c54
HW
7727 /* Create (reset) the plane state */
7728 if (acrtc->base.funcs->reset)
7729 acrtc->base.funcs->reset(&acrtc->base);
7730
e7b07cee
HW
7731 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7732 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7733
7734 acrtc->crtc_id = crtc_index;
7735 acrtc->base.enabled = false;
c37e2d29 7736 acrtc->otg_inst = -1;
e7b07cee
HW
7737
7738 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7739 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7740 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7741 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7742
e7b07cee
HW
7743 return 0;
7744
7745fail:
b830ebc9
HW
7746 kfree(acrtc);
7747 kfree(cursor_plane);
e7b07cee
HW
7748 return res;
7749}
7750
7751
7752static int to_drm_connector_type(enum signal_type st)
7753{
7754 switch (st) {
7755 case SIGNAL_TYPE_HDMI_TYPE_A:
7756 return DRM_MODE_CONNECTOR_HDMIA;
7757 case SIGNAL_TYPE_EDP:
7758 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7759 case SIGNAL_TYPE_LVDS:
7760 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7761 case SIGNAL_TYPE_RGB:
7762 return DRM_MODE_CONNECTOR_VGA;
7763 case SIGNAL_TYPE_DISPLAY_PORT:
7764 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7765 return DRM_MODE_CONNECTOR_DisplayPort;
7766 case SIGNAL_TYPE_DVI_DUAL_LINK:
7767 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7768 return DRM_MODE_CONNECTOR_DVID;
7769 case SIGNAL_TYPE_VIRTUAL:
7770 return DRM_MODE_CONNECTOR_VIRTUAL;
7771
7772 default:
7773 return DRM_MODE_CONNECTOR_Unknown;
7774 }
7775}
7776
2b4c1c05
DV
7777static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7778{
62afb4ad
JRS
7779 struct drm_encoder *encoder;
7780
7781 /* There is only one encoder per connector */
7782 drm_connector_for_each_possible_encoder(connector, encoder)
7783 return encoder;
7784
7785 return NULL;
2b4c1c05
DV
7786}
7787
e7b07cee
HW
7788static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7789{
e7b07cee
HW
7790 struct drm_encoder *encoder;
7791 struct amdgpu_encoder *amdgpu_encoder;
7792
2b4c1c05 7793 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7794
7795 if (encoder == NULL)
7796 return;
7797
7798 amdgpu_encoder = to_amdgpu_encoder(encoder);
7799
7800 amdgpu_encoder->native_mode.clock = 0;
7801
7802 if (!list_empty(&connector->probed_modes)) {
7803 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7804
e7b07cee 7805 list_for_each_entry(preferred_mode,
b830ebc9
HW
7806 &connector->probed_modes,
7807 head) {
7808 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7809 amdgpu_encoder->native_mode = *preferred_mode;
7810
e7b07cee
HW
7811 break;
7812 }
7813
7814 }
7815}
7816
3ee6b26b
AD
7817static struct drm_display_mode *
7818amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7819 char *name,
7820 int hdisplay, int vdisplay)
e7b07cee
HW
7821{
7822 struct drm_device *dev = encoder->dev;
7823 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7824 struct drm_display_mode *mode = NULL;
7825 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7826
7827 mode = drm_mode_duplicate(dev, native_mode);
7828
b830ebc9 7829 if (mode == NULL)
e7b07cee
HW
7830 return NULL;
7831
7832 mode->hdisplay = hdisplay;
7833 mode->vdisplay = vdisplay;
7834 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7835 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7836
7837 return mode;
7838
7839}
7840
7841static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7842 struct drm_connector *connector)
e7b07cee
HW
7843{
7844 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7845 struct drm_display_mode *mode = NULL;
7846 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7847 struct amdgpu_dm_connector *amdgpu_dm_connector =
7848 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7849 int i;
7850 int n;
7851 struct mode_size {
7852 char name[DRM_DISPLAY_MODE_LEN];
7853 int w;
7854 int h;
b830ebc9 7855 } common_modes[] = {
e7b07cee
HW
7856 { "640x480", 640, 480},
7857 { "800x600", 800, 600},
7858 { "1024x768", 1024, 768},
7859 { "1280x720", 1280, 720},
7860 { "1280x800", 1280, 800},
7861 {"1280x1024", 1280, 1024},
7862 { "1440x900", 1440, 900},
7863 {"1680x1050", 1680, 1050},
7864 {"1600x1200", 1600, 1200},
7865 {"1920x1080", 1920, 1080},
7866 {"1920x1200", 1920, 1200}
7867 };
7868
b830ebc9 7869 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7870
7871 for (i = 0; i < n; i++) {
7872 struct drm_display_mode *curmode = NULL;
7873 bool mode_existed = false;
7874
7875 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7876 common_modes[i].h > native_mode->vdisplay ||
7877 (common_modes[i].w == native_mode->hdisplay &&
7878 common_modes[i].h == native_mode->vdisplay))
7879 continue;
e7b07cee
HW
7880
7881 list_for_each_entry(curmode, &connector->probed_modes, head) {
7882 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7883 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7884 mode_existed = true;
7885 break;
7886 }
7887 }
7888
7889 if (mode_existed)
7890 continue;
7891
7892 mode = amdgpu_dm_create_common_mode(encoder,
7893 common_modes[i].name, common_modes[i].w,
7894 common_modes[i].h);
7895 drm_mode_probed_add(connector, mode);
c84dec2f 7896 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7897 }
7898}
7899
d77de788
SS
7900static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7901{
7902 struct drm_encoder *encoder;
7903 struct amdgpu_encoder *amdgpu_encoder;
7904 const struct drm_display_mode *native_mode;
7905
7906 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7907 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7908 return;
7909
7910 encoder = amdgpu_dm_connector_to_encoder(connector);
7911 if (!encoder)
7912 return;
7913
7914 amdgpu_encoder = to_amdgpu_encoder(encoder);
7915
7916 native_mode = &amdgpu_encoder->native_mode;
7917 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7918 return;
7919
7920 drm_connector_set_panel_orientation_with_quirk(connector,
7921 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7922 native_mode->hdisplay,
7923 native_mode->vdisplay);
7924}
7925
3ee6b26b
AD
7926static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7927 struct edid *edid)
e7b07cee 7928{
c84dec2f
HW
7929 struct amdgpu_dm_connector *amdgpu_dm_connector =
7930 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7931
7932 if (edid) {
7933 /* empty probed_modes */
7934 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7935 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7936 drm_add_edid_modes(connector, edid);
7937
f1e5e913
YMM
7938 /* sorting the probed modes before calling function
7939 * amdgpu_dm_get_native_mode() since EDID can have
7940 * more than one preferred mode. The modes that are
7941 * later in the probed mode list could be of higher
7942 * and preferred resolution. For example, 3840x2160
7943 * resolution in base EDID preferred timing and 4096x2160
7944 * preferred resolution in DID extension block later.
7945 */
7946 drm_mode_sort(&connector->probed_modes);
e7b07cee 7947 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7948
7949 /* Freesync capabilities are reset by calling
7950 * drm_add_edid_modes() and need to be
7951 * restored here.
7952 */
7953 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
7954
7955 amdgpu_set_panel_orientation(connector);
a8d8d3dc 7956 } else {
c84dec2f 7957 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7958 }
e7b07cee
HW
7959}
7960
a85ba005
NC
7961static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7962 struct drm_display_mode *mode)
7963{
7964 struct drm_display_mode *m;
7965
7966 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7967 if (drm_mode_equal(m, mode))
7968 return true;
7969 }
7970
7971 return false;
7972}
7973
7974static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7975{
7976 const struct drm_display_mode *m;
7977 struct drm_display_mode *new_mode;
7978 uint i;
7979 uint32_t new_modes_count = 0;
7980
7981 /* Standard FPS values
7982 *
7983 * 23.976 - TV/NTSC
7984 * 24 - Cinema
7985 * 25 - TV/PAL
7986 * 29.97 - TV/NTSC
7987 * 30 - TV/NTSC
7988 * 48 - Cinema HFR
7989 * 50 - TV/PAL
7990 * 60 - Commonly used
7991 * 48,72,96 - Multiples of 24
7992 */
9ce5ed6e
CIK
7993 static const uint32_t common_rates[] = {
7994 23976, 24000, 25000, 29970, 30000,
7995 48000, 50000, 60000, 72000, 96000
7996 };
a85ba005
NC
7997
7998 /*
7999 * Find mode with highest refresh rate with the same resolution
8000 * as the preferred mode. Some monitors report a preferred mode
8001 * with lower resolution than the highest refresh rate supported.
8002 */
8003
8004 m = get_highest_refresh_rate_mode(aconnector, true);
8005 if (!m)
8006 return 0;
8007
8008 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8009 uint64_t target_vtotal, target_vtotal_diff;
8010 uint64_t num, den;
8011
8012 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8013 continue;
8014
8015 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8016 common_rates[i] > aconnector->max_vfreq * 1000)
8017 continue;
8018
8019 num = (unsigned long long)m->clock * 1000 * 1000;
8020 den = common_rates[i] * (unsigned long long)m->htotal;
8021 target_vtotal = div_u64(num, den);
8022 target_vtotal_diff = target_vtotal - m->vtotal;
8023
8024 /* Check for illegal modes */
8025 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8026 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8027 m->vtotal + target_vtotal_diff < m->vsync_end)
8028 continue;
8029
8030 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8031 if (!new_mode)
8032 goto out;
8033
8034 new_mode->vtotal += (u16)target_vtotal_diff;
8035 new_mode->vsync_start += (u16)target_vtotal_diff;
8036 new_mode->vsync_end += (u16)target_vtotal_diff;
8037 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8038 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8039
8040 if (!is_duplicate_mode(aconnector, new_mode)) {
8041 drm_mode_probed_add(&aconnector->base, new_mode);
8042 new_modes_count += 1;
8043 } else
8044 drm_mode_destroy(aconnector->base.dev, new_mode);
8045 }
8046 out:
8047 return new_modes_count;
8048}
8049
8050static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8051 struct edid *edid)
8052{
8053 struct amdgpu_dm_connector *amdgpu_dm_connector =
8054 to_amdgpu_dm_connector(connector);
8055
8056 if (!(amdgpu_freesync_vid_mode && edid))
8057 return;
fe8858bb 8058
a85ba005
NC
8059 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8060 amdgpu_dm_connector->num_modes +=
8061 add_fs_modes(amdgpu_dm_connector);
8062}
8063
7578ecda 8064static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8065{
c84dec2f
HW
8066 struct amdgpu_dm_connector *amdgpu_dm_connector =
8067 to_amdgpu_dm_connector(connector);
e7b07cee 8068 struct drm_encoder *encoder;
c84dec2f 8069 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8070
2b4c1c05 8071 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8072
5c0e6840 8073 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8074 amdgpu_dm_connector->num_modes =
8075 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8076 } else {
8077 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8078 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8079 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8080 }
3e332d3a 8081 amdgpu_dm_fbc_init(connector);
5099114b 8082
c84dec2f 8083 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8084}
8085
3ee6b26b
AD
8086void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8087 struct amdgpu_dm_connector *aconnector,
8088 int connector_type,
8089 struct dc_link *link,
8090 int link_index)
e7b07cee 8091{
1348969a 8092 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8093
f04bee34
NK
8094 /*
8095 * Some of the properties below require access to state, like bpc.
8096 * Allocate some default initial connector state with our reset helper.
8097 */
8098 if (aconnector->base.funcs->reset)
8099 aconnector->base.funcs->reset(&aconnector->base);
8100
e7b07cee
HW
8101 aconnector->connector_id = link_index;
8102 aconnector->dc_link = link;
8103 aconnector->base.interlace_allowed = false;
8104 aconnector->base.doublescan_allowed = false;
8105 aconnector->base.stereo_allowed = false;
8106 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8107 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8108 aconnector->audio_inst = -1;
e7b07cee
HW
8109 mutex_init(&aconnector->hpd_lock);
8110
1f6010a9
DF
8111 /*
8112 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8113 * which means HPD hot plug not supported
8114 */
e7b07cee
HW
8115 switch (connector_type) {
8116 case DRM_MODE_CONNECTOR_HDMIA:
8117 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8118 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8119 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8120 break;
8121 case DRM_MODE_CONNECTOR_DisplayPort:
8122 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
f6e03f80
JS
8123 if (link->is_dig_mapping_flexible &&
8124 link->dc->res_pool->funcs->link_encs_assign) {
8125 link->link_enc =
8126 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8127 if (!link->link_enc)
8128 link->link_enc =
8129 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8130 }
8131
8132 if (link->link_enc)
8133 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8134 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8135 break;
8136 case DRM_MODE_CONNECTOR_DVID:
8137 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8138 break;
8139 default:
8140 break;
8141 }
8142
8143 drm_object_attach_property(&aconnector->base.base,
8144 dm->ddev->mode_config.scaling_mode_property,
8145 DRM_MODE_SCALE_NONE);
8146
8147 drm_object_attach_property(&aconnector->base.base,
8148 adev->mode_info.underscan_property,
8149 UNDERSCAN_OFF);
8150 drm_object_attach_property(&aconnector->base.base,
8151 adev->mode_info.underscan_hborder_property,
8152 0);
8153 drm_object_attach_property(&aconnector->base.base,
8154 adev->mode_info.underscan_vborder_property,
8155 0);
1825fd34 8156
8c61b31e
JFZ
8157 if (!aconnector->mst_port)
8158 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8159
4a8ca46b
RL
8160 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8161 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8162 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8163
c1ee92f9 8164 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8165 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8166 drm_object_attach_property(&aconnector->base.base,
8167 adev->mode_info.abm_level_property, 0);
8168 }
bb47de73
NK
8169
8170 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8171 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8172 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8173 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8174
8c61b31e
JFZ
8175 if (!aconnector->mst_port)
8176 drm_connector_attach_vrr_capable_property(&aconnector->base);
8177
0c8620d6 8178#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8179 if (adev->dm.hdcp_workqueue)
53e108aa 8180 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8181#endif
bb47de73 8182 }
e7b07cee
HW
8183}
8184
7578ecda
AD
8185static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8186 struct i2c_msg *msgs, int num)
e7b07cee
HW
8187{
8188 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8189 struct ddc_service *ddc_service = i2c->ddc_service;
8190 struct i2c_command cmd;
8191 int i;
8192 int result = -EIO;
8193
b830ebc9 8194 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8195
8196 if (!cmd.payloads)
8197 return result;
8198
8199 cmd.number_of_payloads = num;
8200 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8201 cmd.speed = 100;
8202
8203 for (i = 0; i < num; i++) {
8204 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8205 cmd.payloads[i].address = msgs[i].addr;
8206 cmd.payloads[i].length = msgs[i].len;
8207 cmd.payloads[i].data = msgs[i].buf;
8208 }
8209
c85e6e54
DF
8210 if (dc_submit_i2c(
8211 ddc_service->ctx->dc,
8212 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8213 &cmd))
8214 result = num;
8215
8216 kfree(cmd.payloads);
8217 return result;
8218}
8219
7578ecda 8220static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8221{
8222 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8223}
8224
8225static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8226 .master_xfer = amdgpu_dm_i2c_xfer,
8227 .functionality = amdgpu_dm_i2c_func,
8228};
8229
3ee6b26b
AD
8230static struct amdgpu_i2c_adapter *
8231create_i2c(struct ddc_service *ddc_service,
8232 int link_index,
8233 int *res)
e7b07cee
HW
8234{
8235 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8236 struct amdgpu_i2c_adapter *i2c;
8237
b830ebc9 8238 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8239 if (!i2c)
8240 return NULL;
e7b07cee
HW
8241 i2c->base.owner = THIS_MODULE;
8242 i2c->base.class = I2C_CLASS_DDC;
8243 i2c->base.dev.parent = &adev->pdev->dev;
8244 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8245 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8246 i2c_set_adapdata(&i2c->base, i2c);
8247 i2c->ddc_service = ddc_service;
f6e03f80
JS
8248 if (i2c->ddc_service->ddc_pin)
8249 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8250
8251 return i2c;
8252}
8253
89fc8d4e 8254
1f6010a9
DF
8255/*
8256 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8257 * dc_link which will be represented by this aconnector.
8258 */
7578ecda
AD
8259static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8260 struct amdgpu_dm_connector *aconnector,
8261 uint32_t link_index,
8262 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8263{
8264 int res = 0;
8265 int connector_type;
8266 struct dc *dc = dm->dc;
8267 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8268 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8269
8270 link->priv = aconnector;
e7b07cee 8271
f1ad2f5e 8272 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8273
8274 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8275 if (!i2c) {
8276 DRM_ERROR("Failed to create i2c adapter data\n");
8277 return -ENOMEM;
8278 }
8279
e7b07cee
HW
8280 aconnector->i2c = i2c;
8281 res = i2c_add_adapter(&i2c->base);
8282
8283 if (res) {
8284 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8285 goto out_free;
8286 }
8287
8288 connector_type = to_drm_connector_type(link->connector_signal);
8289
17165de2 8290 res = drm_connector_init_with_ddc(
e7b07cee
HW
8291 dm->ddev,
8292 &aconnector->base,
8293 &amdgpu_dm_connector_funcs,
17165de2
AP
8294 connector_type,
8295 &i2c->base);
e7b07cee
HW
8296
8297 if (res) {
8298 DRM_ERROR("connector_init failed\n");
8299 aconnector->connector_id = -1;
8300 goto out_free;
8301 }
8302
8303 drm_connector_helper_add(
8304 &aconnector->base,
8305 &amdgpu_dm_connector_helper_funcs);
8306
8307 amdgpu_dm_connector_init_helper(
8308 dm,
8309 aconnector,
8310 connector_type,
8311 link,
8312 link_index);
8313
cde4c44d 8314 drm_connector_attach_encoder(
e7b07cee
HW
8315 &aconnector->base, &aencoder->base);
8316
e7b07cee
HW
8317 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8318 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8319 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8320
e7b07cee
HW
8321out_free:
8322 if (res) {
8323 kfree(i2c);
8324 aconnector->i2c = NULL;
8325 }
8326 return res;
8327}
8328
8329int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8330{
8331 switch (adev->mode_info.num_crtc) {
8332 case 1:
8333 return 0x1;
8334 case 2:
8335 return 0x3;
8336 case 3:
8337 return 0x7;
8338 case 4:
8339 return 0xf;
8340 case 5:
8341 return 0x1f;
8342 case 6:
8343 default:
8344 return 0x3f;
8345 }
8346}
8347
7578ecda
AD
8348static int amdgpu_dm_encoder_init(struct drm_device *dev,
8349 struct amdgpu_encoder *aencoder,
8350 uint32_t link_index)
e7b07cee 8351{
1348969a 8352 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8353
8354 int res = drm_encoder_init(dev,
8355 &aencoder->base,
8356 &amdgpu_dm_encoder_funcs,
8357 DRM_MODE_ENCODER_TMDS,
8358 NULL);
8359
8360 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8361
8362 if (!res)
8363 aencoder->encoder_id = link_index;
8364 else
8365 aencoder->encoder_id = -1;
8366
8367 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8368
8369 return res;
8370}
8371
3ee6b26b
AD
8372static void manage_dm_interrupts(struct amdgpu_device *adev,
8373 struct amdgpu_crtc *acrtc,
8374 bool enable)
e7b07cee
HW
8375{
8376 /*
8fe684e9
NK
8377 * We have no guarantee that the frontend index maps to the same
8378 * backend index - some even map to more than one.
8379 *
8380 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8381 */
8382 int irq_type =
734dd01d 8383 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8384 adev,
8385 acrtc->crtc_id);
8386
8387 if (enable) {
8388 drm_crtc_vblank_on(&acrtc->base);
8389 amdgpu_irq_get(
8390 adev,
8391 &adev->pageflip_irq,
8392 irq_type);
86bc2219
WL
8393#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8394 amdgpu_irq_get(
8395 adev,
8396 &adev->vline0_irq,
8397 irq_type);
8398#endif
e7b07cee 8399 } else {
86bc2219
WL
8400#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8401 amdgpu_irq_put(
8402 adev,
8403 &adev->vline0_irq,
8404 irq_type);
8405#endif
e7b07cee
HW
8406 amdgpu_irq_put(
8407 adev,
8408 &adev->pageflip_irq,
8409 irq_type);
8410 drm_crtc_vblank_off(&acrtc->base);
8411 }
8412}
8413
8fe684e9
NK
8414static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8415 struct amdgpu_crtc *acrtc)
8416{
8417 int irq_type =
8418 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8419
8420 /**
8421 * This reads the current state for the IRQ and force reapplies
8422 * the setting to hardware.
8423 */
8424 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8425}
8426
3ee6b26b
AD
8427static bool
8428is_scaling_state_different(const struct dm_connector_state *dm_state,
8429 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8430{
8431 if (dm_state->scaling != old_dm_state->scaling)
8432 return true;
8433 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8434 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8435 return true;
8436 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8437 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8438 return true;
b830ebc9
HW
8439 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8440 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8441 return true;
e7b07cee
HW
8442 return false;
8443}
8444
0c8620d6
BL
8445#ifdef CONFIG_DRM_AMD_DC_HDCP
8446static bool is_content_protection_different(struct drm_connector_state *state,
8447 const struct drm_connector_state *old_state,
8448 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8449{
8450 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8451 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8452
31c0ed90 8453 /* Handle: Type0/1 change */
53e108aa
BL
8454 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8455 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8456 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8457 return true;
8458 }
8459
31c0ed90
BL
8460 /* CP is being re enabled, ignore this
8461 *
8462 * Handles: ENABLED -> DESIRED
8463 */
0c8620d6
BL
8464 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8465 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8466 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8467 return false;
8468 }
8469
31c0ed90
BL
8470 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8471 *
8472 * Handles: UNDESIRED -> ENABLED
8473 */
0c8620d6
BL
8474 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8475 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8476 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8477
0d9a947b
QZ
8478 /* Stream removed and re-enabled
8479 *
8480 * Can sometimes overlap with the HPD case,
8481 * thus set update_hdcp to false to avoid
8482 * setting HDCP multiple times.
8483 *
8484 * Handles: DESIRED -> DESIRED (Special case)
8485 */
8486 if (!(old_state->crtc && old_state->crtc->enabled) &&
8487 state->crtc && state->crtc->enabled &&
8488 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8489 dm_con_state->update_hdcp = false;
8490 return true;
8491 }
8492
8493 /* Hot-plug, headless s3, dpms
8494 *
8495 * Only start HDCP if the display is connected/enabled.
8496 * update_hdcp flag will be set to false until the next
8497 * HPD comes in.
31c0ed90
BL
8498 *
8499 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8500 */
97f6c917
BL
8501 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8502 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8503 dm_con_state->update_hdcp = false;
0c8620d6 8504 return true;
97f6c917 8505 }
0c8620d6 8506
31c0ed90
BL
8507 /*
8508 * Handles: UNDESIRED -> UNDESIRED
8509 * DESIRED -> DESIRED
8510 * ENABLED -> ENABLED
8511 */
0c8620d6
BL
8512 if (old_state->content_protection == state->content_protection)
8513 return false;
8514
31c0ed90
BL
8515 /*
8516 * Handles: UNDESIRED -> DESIRED
8517 * DESIRED -> UNDESIRED
8518 * ENABLED -> UNDESIRED
8519 */
97f6c917 8520 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8521 return true;
8522
31c0ed90
BL
8523 /*
8524 * Handles: DESIRED -> ENABLED
8525 */
0c8620d6
BL
8526 return false;
8527}
8528
0c8620d6 8529#endif
3ee6b26b
AD
8530static void remove_stream(struct amdgpu_device *adev,
8531 struct amdgpu_crtc *acrtc,
8532 struct dc_stream_state *stream)
e7b07cee
HW
8533{
8534 /* this is the update mode case */
e7b07cee
HW
8535
8536 acrtc->otg_inst = -1;
8537 acrtc->enabled = false;
8538}
8539
7578ecda
AD
8540static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8541 struct dc_cursor_position *position)
2a8f6ccb 8542{
f4c2cc43 8543 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8544 int x, y;
8545 int xorigin = 0, yorigin = 0;
8546
e371e19c 8547 if (!crtc || !plane->state->fb)
2a8f6ccb 8548 return 0;
2a8f6ccb
HW
8549
8550 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8551 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8552 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8553 __func__,
8554 plane->state->crtc_w,
8555 plane->state->crtc_h);
8556 return -EINVAL;
8557 }
8558
8559 x = plane->state->crtc_x;
8560 y = plane->state->crtc_y;
c14a005c 8561
e371e19c
NK
8562 if (x <= -amdgpu_crtc->max_cursor_width ||
8563 y <= -amdgpu_crtc->max_cursor_height)
8564 return 0;
8565
2a8f6ccb
HW
8566 if (x < 0) {
8567 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8568 x = 0;
8569 }
8570 if (y < 0) {
8571 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8572 y = 0;
8573 }
8574 position->enable = true;
d243b6ff 8575 position->translate_by_source = true;
2a8f6ccb
HW
8576 position->x = x;
8577 position->y = y;
8578 position->x_hotspot = xorigin;
8579 position->y_hotspot = yorigin;
8580
8581 return 0;
8582}
8583
3ee6b26b
AD
8584static void handle_cursor_update(struct drm_plane *plane,
8585 struct drm_plane_state *old_plane_state)
e7b07cee 8586{
1348969a 8587 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8588 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8589 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8590 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8591 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8592 uint64_t address = afb ? afb->address : 0;
6a30a929 8593 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8594 struct dc_cursor_attributes attributes;
8595 int ret;
8596
e7b07cee
HW
8597 if (!plane->state->fb && !old_plane_state->fb)
8598 return;
8599
cb2318b7 8600 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8601 __func__,
8602 amdgpu_crtc->crtc_id,
8603 plane->state->crtc_w,
8604 plane->state->crtc_h);
2a8f6ccb
HW
8605
8606 ret = get_cursor_position(plane, crtc, &position);
8607 if (ret)
8608 return;
8609
8610 if (!position.enable) {
8611 /* turn off cursor */
674e78ac
NK
8612 if (crtc_state && crtc_state->stream) {
8613 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8614 dc_stream_set_cursor_position(crtc_state->stream,
8615 &position);
674e78ac
NK
8616 mutex_unlock(&adev->dm.dc_lock);
8617 }
2a8f6ccb 8618 return;
e7b07cee 8619 }
e7b07cee 8620
2a8f6ccb
HW
8621 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8622 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8623
c1cefe11 8624 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8625 attributes.address.high_part = upper_32_bits(address);
8626 attributes.address.low_part = lower_32_bits(address);
8627 attributes.width = plane->state->crtc_w;
8628 attributes.height = plane->state->crtc_h;
8629 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8630 attributes.rotation_angle = 0;
8631 attributes.attribute_flags.value = 0;
8632
03a66367 8633 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8634
886daac9 8635 if (crtc_state->stream) {
674e78ac 8636 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8637 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8638 &attributes))
8639 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8640
2a8f6ccb
HW
8641 if (!dc_stream_set_cursor_position(crtc_state->stream,
8642 &position))
8643 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8644 mutex_unlock(&adev->dm.dc_lock);
886daac9 8645 }
2a8f6ccb 8646}
e7b07cee
HW
8647
8648static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8649{
8650
8651 assert_spin_locked(&acrtc->base.dev->event_lock);
8652 WARN_ON(acrtc->event);
8653
8654 acrtc->event = acrtc->base.state->event;
8655
8656 /* Set the flip status */
8657 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8658
8659 /* Mark this event as consumed */
8660 acrtc->base.state->event = NULL;
8661
cb2318b7
VL
8662 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8663 acrtc->crtc_id);
e7b07cee
HW
8664}
8665
bb47de73
NK
8666static void update_freesync_state_on_stream(
8667 struct amdgpu_display_manager *dm,
8668 struct dm_crtc_state *new_crtc_state,
180db303
NK
8669 struct dc_stream_state *new_stream,
8670 struct dc_plane_state *surface,
8671 u32 flip_timestamp_in_us)
bb47de73 8672{
09aef2c4 8673 struct mod_vrr_params vrr_params;
bb47de73 8674 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8675 struct amdgpu_device *adev = dm->adev;
585d450c 8676 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8677 unsigned long flags;
4cda3243 8678 bool pack_sdp_v1_3 = false;
bb47de73
NK
8679
8680 if (!new_stream)
8681 return;
8682
8683 /*
8684 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8685 * For now it's sufficient to just guard against these conditions.
8686 */
8687
8688 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8689 return;
8690
4a580877 8691 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8692 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8693
180db303
NK
8694 if (surface) {
8695 mod_freesync_handle_preflip(
8696 dm->freesync_module,
8697 surface,
8698 new_stream,
8699 flip_timestamp_in_us,
8700 &vrr_params);
09aef2c4
MK
8701
8702 if (adev->family < AMDGPU_FAMILY_AI &&
8703 amdgpu_dm_vrr_active(new_crtc_state)) {
8704 mod_freesync_handle_v_update(dm->freesync_module,
8705 new_stream, &vrr_params);
e63e2491
EB
8706
8707 /* Need to call this before the frame ends. */
8708 dc_stream_adjust_vmin_vmax(dm->dc,
8709 new_crtc_state->stream,
8710 &vrr_params.adjust);
09aef2c4 8711 }
180db303 8712 }
bb47de73
NK
8713
8714 mod_freesync_build_vrr_infopacket(
8715 dm->freesync_module,
8716 new_stream,
180db303 8717 &vrr_params,
ecd0136b
HT
8718 PACKET_TYPE_VRR,
8719 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8720 &vrr_infopacket,
8721 pack_sdp_v1_3);
bb47de73 8722
8a48b44c 8723 new_crtc_state->freesync_timing_changed |=
585d450c 8724 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8725 &vrr_params.adjust,
8726 sizeof(vrr_params.adjust)) != 0);
bb47de73 8727
8a48b44c 8728 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8729 (memcmp(&new_crtc_state->vrr_infopacket,
8730 &vrr_infopacket,
8731 sizeof(vrr_infopacket)) != 0);
8732
585d450c 8733 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8734 new_crtc_state->vrr_infopacket = vrr_infopacket;
8735
585d450c 8736 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8737 new_stream->vrr_infopacket = vrr_infopacket;
8738
8739 if (new_crtc_state->freesync_vrr_info_changed)
8740 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8741 new_crtc_state->base.crtc->base.id,
8742 (int)new_crtc_state->base.vrr_enabled,
180db303 8743 (int)vrr_params.state);
09aef2c4 8744
4a580877 8745 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8746}
8747
585d450c 8748static void update_stream_irq_parameters(
e854194c
MK
8749 struct amdgpu_display_manager *dm,
8750 struct dm_crtc_state *new_crtc_state)
8751{
8752 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8753 struct mod_vrr_params vrr_params;
e854194c 8754 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8755 struct amdgpu_device *adev = dm->adev;
585d450c 8756 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8757 unsigned long flags;
e854194c
MK
8758
8759 if (!new_stream)
8760 return;
8761
8762 /*
8763 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8764 * For now it's sufficient to just guard against these conditions.
8765 */
8766 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8767 return;
8768
4a580877 8769 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8770 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8771
e854194c
MK
8772 if (new_crtc_state->vrr_supported &&
8773 config.min_refresh_in_uhz &&
8774 config.max_refresh_in_uhz) {
a85ba005
NC
8775 /*
8776 * if freesync compatible mode was set, config.state will be set
8777 * in atomic check
8778 */
8779 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8780 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8781 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8782 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8783 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8784 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8785 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8786 } else {
8787 config.state = new_crtc_state->base.vrr_enabled ?
8788 VRR_STATE_ACTIVE_VARIABLE :
8789 VRR_STATE_INACTIVE;
8790 }
e854194c
MK
8791 } else {
8792 config.state = VRR_STATE_UNSUPPORTED;
8793 }
8794
8795 mod_freesync_build_vrr_params(dm->freesync_module,
8796 new_stream,
8797 &config, &vrr_params);
8798
8799 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8800 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8801 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8802
585d450c
AP
8803 new_crtc_state->freesync_config = config;
8804 /* Copy state for access from DM IRQ handler */
8805 acrtc->dm_irq_params.freesync_config = config;
8806 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8807 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8808 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8809}
8810
66b0c973
MK
8811static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8812 struct dm_crtc_state *new_state)
8813{
8814 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8815 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8816
8817 if (!old_vrr_active && new_vrr_active) {
8818 /* Transition VRR inactive -> active:
8819 * While VRR is active, we must not disable vblank irq, as a
8820 * reenable after disable would compute bogus vblank/pflip
8821 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8822 *
8823 * We also need vupdate irq for the actual core vblank handling
8824 * at end of vblank.
66b0c973 8825 */
d2574c33 8826 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8827 drm_crtc_vblank_get(new_state->base.crtc);
8828 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8829 __func__, new_state->base.crtc->base.id);
8830 } else if (old_vrr_active && !new_vrr_active) {
8831 /* Transition VRR active -> inactive:
8832 * Allow vblank irq disable again for fixed refresh rate.
8833 */
d2574c33 8834 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8835 drm_crtc_vblank_put(new_state->base.crtc);
8836 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8837 __func__, new_state->base.crtc->base.id);
8838 }
8839}
8840
8ad27806
NK
8841static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8842{
8843 struct drm_plane *plane;
5760dcb9 8844 struct drm_plane_state *old_plane_state;
8ad27806
NK
8845 int i;
8846
8847 /*
8848 * TODO: Make this per-stream so we don't issue redundant updates for
8849 * commits with multiple streams.
8850 */
5760dcb9 8851 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8852 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8853 handle_cursor_update(plane, old_plane_state);
8854}
8855
3be5262e 8856static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8857 struct dc_state *dc_state,
3ee6b26b
AD
8858 struct drm_device *dev,
8859 struct amdgpu_display_manager *dm,
8860 struct drm_crtc *pcrtc,
420cd472 8861 bool wait_for_vblank)
e7b07cee 8862{
efc8278e 8863 uint32_t i;
8a48b44c 8864 uint64_t timestamp_ns;
e7b07cee 8865 struct drm_plane *plane;
0bc9706d 8866 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8867 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8868 struct drm_crtc_state *new_pcrtc_state =
8869 drm_atomic_get_new_crtc_state(state, pcrtc);
8870 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8871 struct dm_crtc_state *dm_old_crtc_state =
8872 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8873 int planes_count = 0, vpos, hpos;
570c91d5 8874 long r;
e7b07cee 8875 unsigned long flags;
8a48b44c 8876 struct amdgpu_bo *abo;
fdd1fe57
MK
8877 uint32_t target_vblank, last_flip_vblank;
8878 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8879 bool pflip_present = false;
bc7f670e
DF
8880 struct {
8881 struct dc_surface_update surface_updates[MAX_SURFACES];
8882 struct dc_plane_info plane_infos[MAX_SURFACES];
8883 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8884 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8885 struct dc_stream_update stream_update;
74aa7bd4 8886 } *bundle;
bc7f670e 8887
74aa7bd4 8888 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8889
74aa7bd4
DF
8890 if (!bundle) {
8891 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8892 goto cleanup;
8893 }
e7b07cee 8894
8ad27806
NK
8895 /*
8896 * Disable the cursor first if we're disabling all the planes.
8897 * It'll remain on the screen after the planes are re-enabled
8898 * if we don't.
8899 */
8900 if (acrtc_state->active_planes == 0)
8901 amdgpu_dm_commit_cursors(state);
8902
e7b07cee 8903 /* update planes when needed */
efc8278e 8904 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8905 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8906 struct drm_crtc_state *new_crtc_state;
0bc9706d 8907 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8908 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8909 bool plane_needs_flip;
c7af5f77 8910 struct dc_plane_state *dc_plane;
54d76575 8911 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8912
80c218d5
NK
8913 /* Cursor plane is handled after stream updates */
8914 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8915 continue;
e7b07cee 8916
f5ba60fe
DD
8917 if (!fb || !crtc || pcrtc != crtc)
8918 continue;
8919
8920 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8921 if (!new_crtc_state->active)
e7b07cee
HW
8922 continue;
8923
bc7f670e 8924 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8925
74aa7bd4 8926 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8927 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8928 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8929 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8930 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8931 }
8a48b44c 8932
695af5f9
NK
8933 fill_dc_scaling_info(new_plane_state,
8934 &bundle->scaling_infos[planes_count]);
8a48b44c 8935
695af5f9
NK
8936 bundle->surface_updates[planes_count].scaling_info =
8937 &bundle->scaling_infos[planes_count];
8a48b44c 8938
f5031000 8939 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8940
f5031000 8941 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8942
f5031000
DF
8943 if (!plane_needs_flip) {
8944 planes_count += 1;
8945 continue;
8946 }
8a48b44c 8947
2fac0f53
CK
8948 abo = gem_to_amdgpu_bo(fb->obj[0]);
8949
f8308898
AG
8950 /*
8951 * Wait for all fences on this FB. Do limited wait to avoid
8952 * deadlock during GPU reset when this fence will not signal
8953 * but we hold reservation lock for the BO.
8954 */
d3fae3b3
CK
8955 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8956 msecs_to_jiffies(5000));
f8308898 8957 if (unlikely(r <= 0))
ed8a5fb2 8958 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8959
695af5f9 8960 fill_dc_plane_info_and_addr(
8ce5d842 8961 dm->adev, new_plane_state,
6eed95b0 8962 afb->tiling_flags,
695af5f9 8963 &bundle->plane_infos[planes_count],
87b7ebc2 8964 &bundle->flip_addrs[planes_count].address,
6eed95b0 8965 afb->tmz_surface, false);
87b7ebc2 8966
4711c033 8967 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8968 new_plane_state->plane->index,
8969 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8970
8971 bundle->surface_updates[planes_count].plane_info =
8972 &bundle->plane_infos[planes_count];
8a48b44c 8973
caff0e66
NK
8974 /*
8975 * Only allow immediate flips for fast updates that don't
8976 * change FB pitch, DCC state, rotation or mirroing.
8977 */
f5031000 8978 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8979 crtc->state->async_flip &&
caff0e66 8980 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8981
f5031000
DF
8982 timestamp_ns = ktime_get_ns();
8983 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8984 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8985 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8986
f5031000
DF
8987 if (!bundle->surface_updates[planes_count].surface) {
8988 DRM_ERROR("No surface for CRTC: id=%d\n",
8989 acrtc_attach->crtc_id);
8990 continue;
bc7f670e
DF
8991 }
8992
f5031000
DF
8993 if (plane == pcrtc->primary)
8994 update_freesync_state_on_stream(
8995 dm,
8996 acrtc_state,
8997 acrtc_state->stream,
8998 dc_plane,
8999 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9000
4711c033 9001 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9002 __func__,
9003 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9004 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9005
9006 planes_count += 1;
9007
8a48b44c
DF
9008 }
9009
74aa7bd4 9010 if (pflip_present) {
634092b1
MK
9011 if (!vrr_active) {
9012 /* Use old throttling in non-vrr fixed refresh rate mode
9013 * to keep flip scheduling based on target vblank counts
9014 * working in a backwards compatible way, e.g., for
9015 * clients using the GLX_OML_sync_control extension or
9016 * DRI3/Present extension with defined target_msc.
9017 */
e3eff4b5 9018 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9019 }
9020 else {
9021 /* For variable refresh rate mode only:
9022 * Get vblank of last completed flip to avoid > 1 vrr
9023 * flips per video frame by use of throttling, but allow
9024 * flip programming anywhere in the possibly large
9025 * variable vrr vblank interval for fine-grained flip
9026 * timing control and more opportunity to avoid stutter
9027 * on late submission of flips.
9028 */
9029 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9030 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9031 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9032 }
9033
fdd1fe57 9034 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9035
9036 /*
9037 * Wait until we're out of the vertical blank period before the one
9038 * targeted by the flip
9039 */
9040 while ((acrtc_attach->enabled &&
9041 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9042 0, &vpos, &hpos, NULL,
9043 NULL, &pcrtc->hwmode)
9044 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9045 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9046 (int)(target_vblank -
e3eff4b5 9047 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9048 usleep_range(1000, 1100);
9049 }
9050
8fe684e9
NK
9051 /**
9052 * Prepare the flip event for the pageflip interrupt to handle.
9053 *
9054 * This only works in the case where we've already turned on the
9055 * appropriate hardware blocks (eg. HUBP) so in the transition case
9056 * from 0 -> n planes we have to skip a hardware generated event
9057 * and rely on sending it from software.
9058 */
9059 if (acrtc_attach->base.state->event &&
035f5496
AP
9060 acrtc_state->active_planes > 0 &&
9061 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9062 drm_crtc_vblank_get(pcrtc);
9063
9064 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9065
9066 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9067 prepare_flip_isr(acrtc_attach);
9068
9069 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9070 }
9071
9072 if (acrtc_state->stream) {
8a48b44c 9073 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9074 bundle->stream_update.vrr_infopacket =
8a48b44c 9075 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9076 }
e7b07cee
HW
9077 }
9078
bc92c065 9079 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9080 if ((planes_count || acrtc_state->active_planes == 0) &&
9081 acrtc_state->stream) {
96160687 9082#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9083 /*
9084 * If PSR or idle optimizations are enabled then flush out
9085 * any pending work before hardware programming.
9086 */
06dd1888
NK
9087 if (dm->vblank_control_workqueue)
9088 flush_workqueue(dm->vblank_control_workqueue);
96160687 9089#endif
58aa1c50 9090
b6e881c9 9091 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9092 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9093 bundle->stream_update.src = acrtc_state->stream->src;
9094 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9095 }
9096
cf020d49
NK
9097 if (new_pcrtc_state->color_mgmt_changed) {
9098 /*
9099 * TODO: This isn't fully correct since we've actually
9100 * already modified the stream in place.
9101 */
9102 bundle->stream_update.gamut_remap =
9103 &acrtc_state->stream->gamut_remap_matrix;
9104 bundle->stream_update.output_csc_transform =
9105 &acrtc_state->stream->csc_color_matrix;
9106 bundle->stream_update.out_transfer_func =
9107 acrtc_state->stream->out_transfer_func;
9108 }
bc7f670e 9109
8a48b44c 9110 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9111 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9112 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9113
e63e2491
EB
9114 /*
9115 * If FreeSync state on the stream has changed then we need to
9116 * re-adjust the min/max bounds now that DC doesn't handle this
9117 * as part of commit.
9118 */
a85ba005 9119 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9120 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9121 dc_stream_adjust_vmin_vmax(
9122 dm->dc, acrtc_state->stream,
585d450c 9123 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9124 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9125 }
bc7f670e 9126 mutex_lock(&dm->dc_lock);
8c322309 9127 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9128 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9129 amdgpu_dm_psr_disable(acrtc_state->stream);
9130
bc7f670e 9131 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9132 bundle->surface_updates,
bc7f670e
DF
9133 planes_count,
9134 acrtc_state->stream,
efc8278e
AJ
9135 &bundle->stream_update,
9136 dc_state);
8c322309 9137
8fe684e9
NK
9138 /**
9139 * Enable or disable the interrupts on the backend.
9140 *
9141 * Most pipes are put into power gating when unused.
9142 *
9143 * When power gating is enabled on a pipe we lose the
9144 * interrupt enablement state when power gating is disabled.
9145 *
9146 * So we need to update the IRQ control state in hardware
9147 * whenever the pipe turns on (since it could be previously
9148 * power gated) or off (since some pipes can't be power gated
9149 * on some ASICs).
9150 */
9151 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9152 dm_update_pflip_irq_state(drm_to_adev(dev),
9153 acrtc_attach);
8fe684e9 9154
8c322309 9155 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9156 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9157 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9158 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9159
9160 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9161 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9162 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9163 struct amdgpu_dm_connector *aconn =
9164 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9165
9166 if (aconn->psr_skip_count > 0)
9167 aconn->psr_skip_count--;
58aa1c50
NK
9168
9169 /* Allow PSR when skip count is 0. */
9170 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9171 } else {
9172 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9173 }
9174
bc7f670e 9175 mutex_unlock(&dm->dc_lock);
e7b07cee 9176 }
4b510503 9177
8ad27806
NK
9178 /*
9179 * Update cursor state *after* programming all the planes.
9180 * This avoids redundant programming in the case where we're going
9181 * to be disabling a single plane - those pipes are being disabled.
9182 */
9183 if (acrtc_state->active_planes)
9184 amdgpu_dm_commit_cursors(state);
80c218d5 9185
4b510503 9186cleanup:
74aa7bd4 9187 kfree(bundle);
e7b07cee
HW
9188}
9189
6ce8f316
NK
9190static void amdgpu_dm_commit_audio(struct drm_device *dev,
9191 struct drm_atomic_state *state)
9192{
1348969a 9193 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9194 struct amdgpu_dm_connector *aconnector;
9195 struct drm_connector *connector;
9196 struct drm_connector_state *old_con_state, *new_con_state;
9197 struct drm_crtc_state *new_crtc_state;
9198 struct dm_crtc_state *new_dm_crtc_state;
9199 const struct dc_stream_status *status;
9200 int i, inst;
9201
9202 /* Notify device removals. */
9203 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9204 if (old_con_state->crtc != new_con_state->crtc) {
9205 /* CRTC changes require notification. */
9206 goto notify;
9207 }
9208
9209 if (!new_con_state->crtc)
9210 continue;
9211
9212 new_crtc_state = drm_atomic_get_new_crtc_state(
9213 state, new_con_state->crtc);
9214
9215 if (!new_crtc_state)
9216 continue;
9217
9218 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9219 continue;
9220
9221 notify:
9222 aconnector = to_amdgpu_dm_connector(connector);
9223
9224 mutex_lock(&adev->dm.audio_lock);
9225 inst = aconnector->audio_inst;
9226 aconnector->audio_inst = -1;
9227 mutex_unlock(&adev->dm.audio_lock);
9228
9229 amdgpu_dm_audio_eld_notify(adev, inst);
9230 }
9231
9232 /* Notify audio device additions. */
9233 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9234 if (!new_con_state->crtc)
9235 continue;
9236
9237 new_crtc_state = drm_atomic_get_new_crtc_state(
9238 state, new_con_state->crtc);
9239
9240 if (!new_crtc_state)
9241 continue;
9242
9243 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9244 continue;
9245
9246 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9247 if (!new_dm_crtc_state->stream)
9248 continue;
9249
9250 status = dc_stream_get_status(new_dm_crtc_state->stream);
9251 if (!status)
9252 continue;
9253
9254 aconnector = to_amdgpu_dm_connector(connector);
9255
9256 mutex_lock(&adev->dm.audio_lock);
9257 inst = status->audio_inst;
9258 aconnector->audio_inst = inst;
9259 mutex_unlock(&adev->dm.audio_lock);
9260
9261 amdgpu_dm_audio_eld_notify(adev, inst);
9262 }
9263}
9264
1f6010a9 9265/*
27b3f4fc
LSL
9266 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9267 * @crtc_state: the DRM CRTC state
9268 * @stream_state: the DC stream state.
9269 *
9270 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9271 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9272 */
9273static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9274 struct dc_stream_state *stream_state)
9275{
b9952f93 9276 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9277}
e7b07cee 9278
b8592b48
LL
9279/**
9280 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9281 * @state: The atomic state to commit
9282 *
9283 * This will tell DC to commit the constructed DC state from atomic_check,
9284 * programming the hardware. Any failures here implies a hardware failure, since
9285 * atomic check should have filtered anything non-kosher.
9286 */
7578ecda 9287static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9288{
9289 struct drm_device *dev = state->dev;
1348969a 9290 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9291 struct amdgpu_display_manager *dm = &adev->dm;
9292 struct dm_atomic_state *dm_state;
eb3dc897 9293 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9294 uint32_t i, j;
5cc6dcbd 9295 struct drm_crtc *crtc;
0bc9706d 9296 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9297 unsigned long flags;
9298 bool wait_for_vblank = true;
9299 struct drm_connector *connector;
c2cea706 9300 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9301 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9302 int crtc_disable_count = 0;
6ee90e88 9303 bool mode_set_reset_required = false;
e7b07cee 9304
e8a98235
RS
9305 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9306
e7b07cee
HW
9307 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9308
eb3dc897
NK
9309 dm_state = dm_atomic_get_new_state(state);
9310 if (dm_state && dm_state->context) {
9311 dc_state = dm_state->context;
9312 } else {
9313 /* No state changes, retain current state. */
813d20dc 9314 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9315 ASSERT(dc_state_temp);
9316 dc_state = dc_state_temp;
9317 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9318 }
e7b07cee 9319
6d90a208
AP
9320 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9321 new_crtc_state, i) {
9322 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9323
9324 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9325
9326 if (old_crtc_state->active &&
9327 (!new_crtc_state->active ||
9328 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9329 manage_dm_interrupts(adev, acrtc, false);
9330 dc_stream_release(dm_old_crtc_state->stream);
9331 }
9332 }
9333
8976f73b
RS
9334 drm_atomic_helper_calc_timestamping_constants(state);
9335
e7b07cee 9336 /* update changed items */
0bc9706d 9337 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9338 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9339
54d76575
LSL
9340 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9341 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9342
4711c033 9343 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9344 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9345 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9346 "connectors_changed:%d\n",
9347 acrtc->crtc_id,
0bc9706d
LSL
9348 new_crtc_state->enable,
9349 new_crtc_state->active,
9350 new_crtc_state->planes_changed,
9351 new_crtc_state->mode_changed,
9352 new_crtc_state->active_changed,
9353 new_crtc_state->connectors_changed);
e7b07cee 9354
5c68c652
VL
9355 /* Disable cursor if disabling crtc */
9356 if (old_crtc_state->active && !new_crtc_state->active) {
9357 struct dc_cursor_position position;
9358
9359 memset(&position, 0, sizeof(position));
9360 mutex_lock(&dm->dc_lock);
9361 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9362 mutex_unlock(&dm->dc_lock);
9363 }
9364
27b3f4fc
LSL
9365 /* Copy all transient state flags into dc state */
9366 if (dm_new_crtc_state->stream) {
9367 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9368 dm_new_crtc_state->stream);
9369 }
9370
e7b07cee
HW
9371 /* handles headless hotplug case, updating new_state and
9372 * aconnector as needed
9373 */
9374
54d76575 9375 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9376
4711c033 9377 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9378
54d76575 9379 if (!dm_new_crtc_state->stream) {
e7b07cee 9380 /*
b830ebc9
HW
9381 * this could happen because of issues with
9382 * userspace notifications delivery.
9383 * In this case userspace tries to set mode on
1f6010a9
DF
9384 * display which is disconnected in fact.
9385 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9386 * We expect reset mode will come soon.
9387 *
9388 * This can also happen when unplug is done
9389 * during resume sequence ended
9390 *
9391 * In this case, we want to pretend we still
9392 * have a sink to keep the pipe running so that
9393 * hw state is consistent with the sw state
9394 */
f1ad2f5e 9395 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9396 __func__, acrtc->base.base.id);
9397 continue;
9398 }
9399
54d76575
LSL
9400 if (dm_old_crtc_state->stream)
9401 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9402
97028037
LP
9403 pm_runtime_get_noresume(dev->dev);
9404
e7b07cee 9405 acrtc->enabled = true;
0bc9706d
LSL
9406 acrtc->hw_mode = new_crtc_state->mode;
9407 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9408 mode_set_reset_required = true;
0bc9706d 9409 } else if (modereset_required(new_crtc_state)) {
4711c033 9410 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9411 /* i.e. reset mode */
6ee90e88 9412 if (dm_old_crtc_state->stream)
54d76575 9413 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9414
6ee90e88 9415 mode_set_reset_required = true;
e7b07cee
HW
9416 }
9417 } /* for_each_crtc_in_state() */
9418
eb3dc897 9419 if (dc_state) {
6ee90e88 9420 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9421 if (mode_set_reset_required) {
96160687 9422#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9423 if (dm->vblank_control_workqueue)
9424 flush_workqueue(dm->vblank_control_workqueue);
96160687 9425#endif
6ee90e88 9426 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9427 }
6ee90e88 9428
eb3dc897 9429 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9430 mutex_lock(&dm->dc_lock);
eb3dc897 9431 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9432#if defined(CONFIG_DRM_AMD_DC_DCN)
9433 /* Allow idle optimization when vblank count is 0 for display off */
9434 if (dm->active_vblank_irq_count == 0)
9435 dc_allow_idle_optimizations(dm->dc,true);
9436#endif
674e78ac 9437 mutex_unlock(&dm->dc_lock);
fa2123db 9438 }
fe8858bb 9439
0bc9706d 9440 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9441 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9442
54d76575 9443 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9444
54d76575 9445 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9446 const struct dc_stream_status *status =
54d76575 9447 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9448
eb3dc897 9449 if (!status)
09f609c3
LL
9450 status = dc_stream_get_status_from_state(dc_state,
9451 dm_new_crtc_state->stream);
e7b07cee 9452 if (!status)
54d76575 9453 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9454 else
9455 acrtc->otg_inst = status->primary_otg_inst;
9456 }
9457 }
0c8620d6
BL
9458#ifdef CONFIG_DRM_AMD_DC_HDCP
9459 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9460 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9461 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9462 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9463
9464 new_crtc_state = NULL;
9465
9466 if (acrtc)
9467 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9468
9469 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9470
9471 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9472 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9473 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9474 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9475 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9476 continue;
9477 }
9478
9479 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9480 hdcp_update_display(
9481 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9482 new_con_state->hdcp_content_type,
0e86d3d4 9483 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9484 }
9485#endif
e7b07cee 9486
02d6a6fc 9487 /* Handle connector state changes */
c2cea706 9488 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9489 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9490 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9491 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9492 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9493 struct dc_stream_update stream_update;
b232d4ed 9494 struct dc_info_packet hdr_packet;
e7b07cee 9495 struct dc_stream_status *status = NULL;
b232d4ed 9496 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9497
efc8278e 9498 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9499 memset(&stream_update, 0, sizeof(stream_update));
9500
44d09c6a 9501 if (acrtc) {
0bc9706d 9502 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9503 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9504 }
0bc9706d 9505
e7b07cee 9506 /* Skip any modesets/resets */
0bc9706d 9507 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9508 continue;
9509
54d76575 9510 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9511 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9512
b232d4ed
NK
9513 scaling_changed = is_scaling_state_different(dm_new_con_state,
9514 dm_old_con_state);
9515
9516 abm_changed = dm_new_crtc_state->abm_level !=
9517 dm_old_crtc_state->abm_level;
9518
9519 hdr_changed =
72921cdf 9520 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9521
9522 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9523 continue;
e7b07cee 9524
b6e881c9 9525 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9526 if (scaling_changed) {
02d6a6fc 9527 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9528 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9529
02d6a6fc
DF
9530 stream_update.src = dm_new_crtc_state->stream->src;
9531 stream_update.dst = dm_new_crtc_state->stream->dst;
9532 }
9533
b232d4ed 9534 if (abm_changed) {
02d6a6fc
DF
9535 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9536
9537 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9538 }
70e8ffc5 9539
b232d4ed
NK
9540 if (hdr_changed) {
9541 fill_hdr_info_packet(new_con_state, &hdr_packet);
9542 stream_update.hdr_static_metadata = &hdr_packet;
9543 }
9544
54d76575 9545 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9546
9547 if (WARN_ON(!status))
9548 continue;
9549
3be5262e 9550 WARN_ON(!status->plane_count);
e7b07cee 9551
02d6a6fc
DF
9552 /*
9553 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9554 * Here we create an empty update on each plane.
9555 * To fix this, DC should permit updating only stream properties.
9556 */
9557 for (j = 0; j < status->plane_count; j++)
efc8278e 9558 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9559
9560
9561 mutex_lock(&dm->dc_lock);
9562 dc_commit_updates_for_stream(dm->dc,
efc8278e 9563 dummy_updates,
02d6a6fc
DF
9564 status->plane_count,
9565 dm_new_crtc_state->stream,
efc8278e
AJ
9566 &stream_update,
9567 dc_state);
02d6a6fc 9568 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9569 }
9570
b5e83f6f 9571 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9572 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9573 new_crtc_state, i) {
fe2a1965
LP
9574 if (old_crtc_state->active && !new_crtc_state->active)
9575 crtc_disable_count++;
9576
54d76575 9577 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9578 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9579
585d450c
AP
9580 /* For freesync config update on crtc state and params for irq */
9581 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9582
66b0c973
MK
9583 /* Handle vrr on->off / off->on transitions */
9584 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9585 dm_new_crtc_state);
e7b07cee
HW
9586 }
9587
8fe684e9
NK
9588 /**
9589 * Enable interrupts for CRTCs that are newly enabled or went through
9590 * a modeset. It was intentionally deferred until after the front end
9591 * state was modified to wait until the OTG was on and so the IRQ
9592 * handlers didn't access stale or invalid state.
9593 */
9594 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9595 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9596#ifdef CONFIG_DEBUG_FS
86bc2219 9597 bool configure_crc = false;
8e7b6fee 9598 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9599#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9600 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9601#endif
9602 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9603 cur_crc_src = acrtc->dm_irq_params.crc_src;
9604 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9605#endif
585d450c
AP
9606 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9607
8fe684e9
NK
9608 if (new_crtc_state->active &&
9609 (!old_crtc_state->active ||
9610 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9611 dc_stream_retain(dm_new_crtc_state->stream);
9612 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9613 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9614
24eb9374 9615#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9616 /**
9617 * Frontend may have changed so reapply the CRC capture
9618 * settings for the stream.
9619 */
9620 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9621
8e7b6fee 9622 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9623 configure_crc = true;
9624#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9625 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9626 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9627 acrtc->dm_irq_params.crc_window.update_win = true;
9628 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9629 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9630 crc_rd_wrk->crtc = crtc;
9631 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9632 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9633 }
86bc2219 9634#endif
e2881d6d 9635 }
c920888c 9636
86bc2219 9637 if (configure_crc)
bbc49fc0
WL
9638 if (amdgpu_dm_crtc_configure_crc_source(
9639 crtc, dm_new_crtc_state, cur_crc_src))
9640 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9641#endif
8fe684e9
NK
9642 }
9643 }
e7b07cee 9644
420cd472 9645 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9646 if (new_crtc_state->async_flip)
420cd472
DF
9647 wait_for_vblank = false;
9648
e7b07cee 9649 /* update planes when needed per crtc*/
5cc6dcbd 9650 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9651 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9652
54d76575 9653 if (dm_new_crtc_state->stream)
eb3dc897 9654 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9655 dm, crtc, wait_for_vblank);
e7b07cee
HW
9656 }
9657
6ce8f316
NK
9658 /* Update audio instances for each connector. */
9659 amdgpu_dm_commit_audio(dev, state);
9660
7230362c
AD
9661#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9662 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9663 /* restore the backlight level */
7fd13bae
AD
9664 for (i = 0; i < dm->num_of_edps; i++) {
9665 if (dm->backlight_dev[i] &&
9666 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9667 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9668 }
7230362c 9669#endif
e7b07cee
HW
9670 /*
9671 * send vblank event on all events not handled in flip and
9672 * mark consumed event for drm_atomic_helper_commit_hw_done
9673 */
4a580877 9674 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9675 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9676
0bc9706d
LSL
9677 if (new_crtc_state->event)
9678 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9679
0bc9706d 9680 new_crtc_state->event = NULL;
e7b07cee 9681 }
4a580877 9682 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9683
29c8f234
LL
9684 /* Signal HW programming completion */
9685 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9686
9687 if (wait_for_vblank)
320a1274 9688 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9689
9690 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9691
5f6fab24
AD
9692 /* return the stolen vga memory back to VRAM */
9693 if (!adev->mman.keep_stolen_vga_memory)
9694 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9695 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9696
1f6010a9
DF
9697 /*
9698 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9699 * so we can put the GPU into runtime suspend if we're not driving any
9700 * displays anymore
9701 */
fe2a1965
LP
9702 for (i = 0; i < crtc_disable_count; i++)
9703 pm_runtime_put_autosuspend(dev->dev);
97028037 9704 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9705
9706 if (dc_state_temp)
9707 dc_release_state(dc_state_temp);
e7b07cee
HW
9708}
9709
9710
9711static int dm_force_atomic_commit(struct drm_connector *connector)
9712{
9713 int ret = 0;
9714 struct drm_device *ddev = connector->dev;
9715 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9716 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9717 struct drm_plane *plane = disconnected_acrtc->base.primary;
9718 struct drm_connector_state *conn_state;
9719 struct drm_crtc_state *crtc_state;
9720 struct drm_plane_state *plane_state;
9721
9722 if (!state)
9723 return -ENOMEM;
9724
9725 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9726
9727 /* Construct an atomic state to restore previous display setting */
9728
9729 /*
9730 * Attach connectors to drm_atomic_state
9731 */
9732 conn_state = drm_atomic_get_connector_state(state, connector);
9733
9734 ret = PTR_ERR_OR_ZERO(conn_state);
9735 if (ret)
2dc39051 9736 goto out;
e7b07cee
HW
9737
9738 /* Attach crtc to drm_atomic_state*/
9739 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9740
9741 ret = PTR_ERR_OR_ZERO(crtc_state);
9742 if (ret)
2dc39051 9743 goto out;
e7b07cee
HW
9744
9745 /* force a restore */
9746 crtc_state->mode_changed = true;
9747
9748 /* Attach plane to drm_atomic_state */
9749 plane_state = drm_atomic_get_plane_state(state, plane);
9750
9751 ret = PTR_ERR_OR_ZERO(plane_state);
9752 if (ret)
2dc39051 9753 goto out;
e7b07cee
HW
9754
9755 /* Call commit internally with the state we just constructed */
9756 ret = drm_atomic_commit(state);
e7b07cee 9757
2dc39051 9758out:
e7b07cee 9759 drm_atomic_state_put(state);
2dc39051
VL
9760 if (ret)
9761 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9762
9763 return ret;
9764}
9765
9766/*
1f6010a9
DF
9767 * This function handles all cases when set mode does not come upon hotplug.
9768 * This includes when a display is unplugged then plugged back into the
9769 * same port and when running without usermode desktop manager supprot
e7b07cee 9770 */
3ee6b26b
AD
9771void dm_restore_drm_connector_state(struct drm_device *dev,
9772 struct drm_connector *connector)
e7b07cee 9773{
c84dec2f 9774 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9775 struct amdgpu_crtc *disconnected_acrtc;
9776 struct dm_crtc_state *acrtc_state;
9777
9778 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9779 return;
9780
9781 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9782 if (!disconnected_acrtc)
9783 return;
e7b07cee 9784
70e8ffc5
HW
9785 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9786 if (!acrtc_state->stream)
e7b07cee
HW
9787 return;
9788
9789 /*
9790 * If the previous sink is not released and different from the current,
9791 * we deduce we are in a state where we can not rely on usermode call
9792 * to turn on the display, so we do it here
9793 */
9794 if (acrtc_state->stream->sink != aconnector->dc_sink)
9795 dm_force_atomic_commit(&aconnector->base);
9796}
9797
1f6010a9 9798/*
e7b07cee
HW
9799 * Grabs all modesetting locks to serialize against any blocking commits,
9800 * Waits for completion of all non blocking commits.
9801 */
3ee6b26b
AD
9802static int do_aquire_global_lock(struct drm_device *dev,
9803 struct drm_atomic_state *state)
e7b07cee
HW
9804{
9805 struct drm_crtc *crtc;
9806 struct drm_crtc_commit *commit;
9807 long ret;
9808
1f6010a9
DF
9809 /*
9810 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9811 * ensure that when the framework release it the
9812 * extra locks we are locking here will get released to
9813 */
9814 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9815 if (ret)
9816 return ret;
9817
9818 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9819 spin_lock(&crtc->commit_lock);
9820 commit = list_first_entry_or_null(&crtc->commit_list,
9821 struct drm_crtc_commit, commit_entry);
9822 if (commit)
9823 drm_crtc_commit_get(commit);
9824 spin_unlock(&crtc->commit_lock);
9825
9826 if (!commit)
9827 continue;
9828
1f6010a9
DF
9829 /*
9830 * Make sure all pending HW programming completed and
e7b07cee
HW
9831 * page flips done
9832 */
9833 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9834
9835 if (ret > 0)
9836 ret = wait_for_completion_interruptible_timeout(
9837 &commit->flip_done, 10*HZ);
9838
9839 if (ret == 0)
9840 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9841 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9842
9843 drm_crtc_commit_put(commit);
9844 }
9845
9846 return ret < 0 ? ret : 0;
9847}
9848
bb47de73
NK
9849static void get_freesync_config_for_crtc(
9850 struct dm_crtc_state *new_crtc_state,
9851 struct dm_connector_state *new_con_state)
98e6436d
AK
9852{
9853 struct mod_freesync_config config = {0};
98e6436d
AK
9854 struct amdgpu_dm_connector *aconnector =
9855 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9856 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9857 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9858 bool fs_vid_mode = false;
98e6436d 9859
a057ec46 9860 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9861 vrefresh >= aconnector->min_vfreq &&
9862 vrefresh <= aconnector->max_vfreq;
bb47de73 9863
a057ec46
IB
9864 if (new_crtc_state->vrr_supported) {
9865 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9866 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9867
9868 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9869 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9870 config.vsif_supported = true;
180db303 9871 config.btr = true;
98e6436d 9872
a85ba005
NC
9873 if (fs_vid_mode) {
9874 config.state = VRR_STATE_ACTIVE_FIXED;
9875 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9876 goto out;
9877 } else if (new_crtc_state->base.vrr_enabled) {
9878 config.state = VRR_STATE_ACTIVE_VARIABLE;
9879 } else {
9880 config.state = VRR_STATE_INACTIVE;
9881 }
9882 }
9883out:
bb47de73
NK
9884 new_crtc_state->freesync_config = config;
9885}
98e6436d 9886
bb47de73
NK
9887static void reset_freesync_config_for_crtc(
9888 struct dm_crtc_state *new_crtc_state)
9889{
9890 new_crtc_state->vrr_supported = false;
98e6436d 9891
bb47de73
NK
9892 memset(&new_crtc_state->vrr_infopacket, 0,
9893 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9894}
9895
a85ba005
NC
9896static bool
9897is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9898 struct drm_crtc_state *new_crtc_state)
9899{
9900 struct drm_display_mode old_mode, new_mode;
9901
9902 if (!old_crtc_state || !new_crtc_state)
9903 return false;
9904
9905 old_mode = old_crtc_state->mode;
9906 new_mode = new_crtc_state->mode;
9907
9908 if (old_mode.clock == new_mode.clock &&
9909 old_mode.hdisplay == new_mode.hdisplay &&
9910 old_mode.vdisplay == new_mode.vdisplay &&
9911 old_mode.htotal == new_mode.htotal &&
9912 old_mode.vtotal != new_mode.vtotal &&
9913 old_mode.hsync_start == new_mode.hsync_start &&
9914 old_mode.vsync_start != new_mode.vsync_start &&
9915 old_mode.hsync_end == new_mode.hsync_end &&
9916 old_mode.vsync_end != new_mode.vsync_end &&
9917 old_mode.hskew == new_mode.hskew &&
9918 old_mode.vscan == new_mode.vscan &&
9919 (old_mode.vsync_end - old_mode.vsync_start) ==
9920 (new_mode.vsync_end - new_mode.vsync_start))
9921 return true;
9922
9923 return false;
9924}
9925
9926static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9927 uint64_t num, den, res;
9928 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9929
9930 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9931
9932 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9933 den = (unsigned long long)new_crtc_state->mode.htotal *
9934 (unsigned long long)new_crtc_state->mode.vtotal;
9935
9936 res = div_u64(num, den);
9937 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9938}
9939
4b9674e5
LL
9940static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9941 struct drm_atomic_state *state,
9942 struct drm_crtc *crtc,
9943 struct drm_crtc_state *old_crtc_state,
9944 struct drm_crtc_state *new_crtc_state,
9945 bool enable,
9946 bool *lock_and_validation_needed)
e7b07cee 9947{
eb3dc897 9948 struct dm_atomic_state *dm_state = NULL;
54d76575 9949 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9950 struct dc_stream_state *new_stream;
62f55537 9951 int ret = 0;
d4d4a645 9952
1f6010a9
DF
9953 /*
9954 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9955 * update changed items
9956 */
4b9674e5
LL
9957 struct amdgpu_crtc *acrtc = NULL;
9958 struct amdgpu_dm_connector *aconnector = NULL;
9959 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9960 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9961
4b9674e5 9962 new_stream = NULL;
9635b754 9963
4b9674e5
LL
9964 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9965 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9966 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9967 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9968
4b9674e5
LL
9969 /* TODO This hack should go away */
9970 if (aconnector && enable) {
9971 /* Make sure fake sink is created in plug-in scenario */
9972 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9973 &aconnector->base);
9974 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9975 &aconnector->base);
19f89e23 9976
4b9674e5
LL
9977 if (IS_ERR(drm_new_conn_state)) {
9978 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9979 goto fail;
9980 }
19f89e23 9981
4b9674e5
LL
9982 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9983 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9984
02d35a67
JFZ
9985 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9986 goto skip_modeset;
9987
cbd14ae7
SW
9988 new_stream = create_validate_stream_for_sink(aconnector,
9989 &new_crtc_state->mode,
9990 dm_new_conn_state,
9991 dm_old_crtc_state->stream);
19f89e23 9992
4b9674e5
LL
9993 /*
9994 * we can have no stream on ACTION_SET if a display
9995 * was disconnected during S3, in this case it is not an
9996 * error, the OS will be updated after detection, and
9997 * will do the right thing on next atomic commit
9998 */
19f89e23 9999
4b9674e5
LL
10000 if (!new_stream) {
10001 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10002 __func__, acrtc->base.base.id);
10003 ret = -ENOMEM;
10004 goto fail;
10005 }
e7b07cee 10006
3d4e52d0
VL
10007 /*
10008 * TODO: Check VSDB bits to decide whether this should
10009 * be enabled or not.
10010 */
10011 new_stream->triggered_crtc_reset.enabled =
10012 dm->force_timing_sync;
10013
4b9674e5 10014 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10015
88694af9
NK
10016 ret = fill_hdr_info_packet(drm_new_conn_state,
10017 &new_stream->hdr_static_metadata);
10018 if (ret)
10019 goto fail;
10020
7e930949
NK
10021 /*
10022 * If we already removed the old stream from the context
10023 * (and set the new stream to NULL) then we can't reuse
10024 * the old stream even if the stream and scaling are unchanged.
10025 * We'll hit the BUG_ON and black screen.
10026 *
10027 * TODO: Refactor this function to allow this check to work
10028 * in all conditions.
10029 */
a85ba005
NC
10030 if (amdgpu_freesync_vid_mode &&
10031 dm_new_crtc_state->stream &&
10032 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10033 goto skip_modeset;
10034
7e930949
NK
10035 if (dm_new_crtc_state->stream &&
10036 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10037 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10038 new_crtc_state->mode_changed = false;
10039 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10040 new_crtc_state->mode_changed);
62f55537 10041 }
4b9674e5 10042 }
b830ebc9 10043
02d35a67 10044 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10045 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10046 goto skip_modeset;
e7b07cee 10047
4711c033 10048 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10049 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10050 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10051 "connectors_changed:%d\n",
10052 acrtc->crtc_id,
10053 new_crtc_state->enable,
10054 new_crtc_state->active,
10055 new_crtc_state->planes_changed,
10056 new_crtc_state->mode_changed,
10057 new_crtc_state->active_changed,
10058 new_crtc_state->connectors_changed);
62f55537 10059
4b9674e5
LL
10060 /* Remove stream for any changed/disabled CRTC */
10061 if (!enable) {
62f55537 10062
4b9674e5
LL
10063 if (!dm_old_crtc_state->stream)
10064 goto skip_modeset;
eb3dc897 10065
a85ba005
NC
10066 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10067 is_timing_unchanged_for_freesync(new_crtc_state,
10068 old_crtc_state)) {
10069 new_crtc_state->mode_changed = false;
10070 DRM_DEBUG_DRIVER(
10071 "Mode change not required for front porch change, "
10072 "setting mode_changed to %d",
10073 new_crtc_state->mode_changed);
10074
10075 set_freesync_fixed_config(dm_new_crtc_state);
10076
10077 goto skip_modeset;
10078 } else if (amdgpu_freesync_vid_mode && aconnector &&
10079 is_freesync_video_mode(&new_crtc_state->mode,
10080 aconnector)) {
e88ebd83
SC
10081 struct drm_display_mode *high_mode;
10082
10083 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10084 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10085 set_freesync_fixed_config(dm_new_crtc_state);
10086 }
a85ba005
NC
10087 }
10088
4b9674e5
LL
10089 ret = dm_atomic_get_state(state, &dm_state);
10090 if (ret)
10091 goto fail;
e7b07cee 10092
4b9674e5
LL
10093 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10094 crtc->base.id);
62f55537 10095
4b9674e5
LL
10096 /* i.e. reset mode */
10097 if (dc_remove_stream_from_ctx(
10098 dm->dc,
10099 dm_state->context,
10100 dm_old_crtc_state->stream) != DC_OK) {
10101 ret = -EINVAL;
10102 goto fail;
10103 }
62f55537 10104
4b9674e5
LL
10105 dc_stream_release(dm_old_crtc_state->stream);
10106 dm_new_crtc_state->stream = NULL;
bb47de73 10107
4b9674e5 10108 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10109
4b9674e5 10110 *lock_and_validation_needed = true;
62f55537 10111
4b9674e5
LL
10112 } else {/* Add stream for any updated/enabled CRTC */
10113 /*
10114 * Quick fix to prevent NULL pointer on new_stream when
10115 * added MST connectors not found in existing crtc_state in the chained mode
10116 * TODO: need to dig out the root cause of that
10117 */
10118 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10119 goto skip_modeset;
62f55537 10120
4b9674e5
LL
10121 if (modereset_required(new_crtc_state))
10122 goto skip_modeset;
62f55537 10123
4b9674e5
LL
10124 if (modeset_required(new_crtc_state, new_stream,
10125 dm_old_crtc_state->stream)) {
62f55537 10126
4b9674e5 10127 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10128
4b9674e5
LL
10129 ret = dm_atomic_get_state(state, &dm_state);
10130 if (ret)
10131 goto fail;
27b3f4fc 10132
4b9674e5 10133 dm_new_crtc_state->stream = new_stream;
62f55537 10134
4b9674e5 10135 dc_stream_retain(new_stream);
1dc90497 10136
4711c033
LT
10137 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10138 crtc->base.id);
1dc90497 10139
4b9674e5
LL
10140 if (dc_add_stream_to_ctx(
10141 dm->dc,
10142 dm_state->context,
10143 dm_new_crtc_state->stream) != DC_OK) {
10144 ret = -EINVAL;
10145 goto fail;
9b690ef3
BL
10146 }
10147
4b9674e5
LL
10148 *lock_and_validation_needed = true;
10149 }
10150 }
e277adc5 10151
4b9674e5
LL
10152skip_modeset:
10153 /* Release extra reference */
10154 if (new_stream)
10155 dc_stream_release(new_stream);
e277adc5 10156
4b9674e5
LL
10157 /*
10158 * We want to do dc stream updates that do not require a
10159 * full modeset below.
10160 */
2afda735 10161 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10162 return 0;
10163 /*
10164 * Given above conditions, the dc state cannot be NULL because:
10165 * 1. We're in the process of enabling CRTCs (just been added
10166 * to the dc context, or already is on the context)
10167 * 2. Has a valid connector attached, and
10168 * 3. Is currently active and enabled.
10169 * => The dc stream state currently exists.
10170 */
10171 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10172
4b9674e5 10173 /* Scaling or underscan settings */
c521fc31
RL
10174 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10175 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10176 update_stream_scaling_settings(
10177 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10178
b05e2c5e
DF
10179 /* ABM settings */
10180 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10181
4b9674e5
LL
10182 /*
10183 * Color management settings. We also update color properties
10184 * when a modeset is needed, to ensure it gets reprogrammed.
10185 */
10186 if (dm_new_crtc_state->base.color_mgmt_changed ||
10187 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10188 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10189 if (ret)
10190 goto fail;
62f55537 10191 }
e7b07cee 10192
4b9674e5
LL
10193 /* Update Freesync settings. */
10194 get_freesync_config_for_crtc(dm_new_crtc_state,
10195 dm_new_conn_state);
10196
62f55537 10197 return ret;
9635b754
DS
10198
10199fail:
10200 if (new_stream)
10201 dc_stream_release(new_stream);
10202 return ret;
62f55537 10203}
9b690ef3 10204
f6ff2a08
NK
10205static bool should_reset_plane(struct drm_atomic_state *state,
10206 struct drm_plane *plane,
10207 struct drm_plane_state *old_plane_state,
10208 struct drm_plane_state *new_plane_state)
10209{
10210 struct drm_plane *other;
10211 struct drm_plane_state *old_other_state, *new_other_state;
10212 struct drm_crtc_state *new_crtc_state;
10213 int i;
10214
70a1efac
NK
10215 /*
10216 * TODO: Remove this hack once the checks below are sufficient
10217 * enough to determine when we need to reset all the planes on
10218 * the stream.
10219 */
10220 if (state->allow_modeset)
10221 return true;
10222
f6ff2a08
NK
10223 /* Exit early if we know that we're adding or removing the plane. */
10224 if (old_plane_state->crtc != new_plane_state->crtc)
10225 return true;
10226
10227 /* old crtc == new_crtc == NULL, plane not in context. */
10228 if (!new_plane_state->crtc)
10229 return false;
10230
10231 new_crtc_state =
10232 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10233
10234 if (!new_crtc_state)
10235 return true;
10236
7316c4ad
NK
10237 /* CRTC Degamma changes currently require us to recreate planes. */
10238 if (new_crtc_state->color_mgmt_changed)
10239 return true;
10240
f6ff2a08
NK
10241 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10242 return true;
10243
10244 /*
10245 * If there are any new primary or overlay planes being added or
10246 * removed then the z-order can potentially change. To ensure
10247 * correct z-order and pipe acquisition the current DC architecture
10248 * requires us to remove and recreate all existing planes.
10249 *
10250 * TODO: Come up with a more elegant solution for this.
10251 */
10252 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10253 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10254 if (other->type == DRM_PLANE_TYPE_CURSOR)
10255 continue;
10256
10257 if (old_other_state->crtc != new_plane_state->crtc &&
10258 new_other_state->crtc != new_plane_state->crtc)
10259 continue;
10260
10261 if (old_other_state->crtc != new_other_state->crtc)
10262 return true;
10263
dc4cb30d
NK
10264 /* Src/dst size and scaling updates. */
10265 if (old_other_state->src_w != new_other_state->src_w ||
10266 old_other_state->src_h != new_other_state->src_h ||
10267 old_other_state->crtc_w != new_other_state->crtc_w ||
10268 old_other_state->crtc_h != new_other_state->crtc_h)
10269 return true;
10270
10271 /* Rotation / mirroring updates. */
10272 if (old_other_state->rotation != new_other_state->rotation)
10273 return true;
10274
10275 /* Blending updates. */
10276 if (old_other_state->pixel_blend_mode !=
10277 new_other_state->pixel_blend_mode)
10278 return true;
10279
10280 /* Alpha updates. */
10281 if (old_other_state->alpha != new_other_state->alpha)
10282 return true;
10283
10284 /* Colorspace changes. */
10285 if (old_other_state->color_range != new_other_state->color_range ||
10286 old_other_state->color_encoding != new_other_state->color_encoding)
10287 return true;
10288
9a81cc60
NK
10289 /* Framebuffer checks fall at the end. */
10290 if (!old_other_state->fb || !new_other_state->fb)
10291 continue;
10292
10293 /* Pixel format changes can require bandwidth updates. */
10294 if (old_other_state->fb->format != new_other_state->fb->format)
10295 return true;
10296
6eed95b0
BN
10297 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10298 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10299
10300 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10301 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10302 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10303 return true;
10304 }
10305
10306 return false;
10307}
10308
b0455fda
SS
10309static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10310 struct drm_plane_state *new_plane_state,
10311 struct drm_framebuffer *fb)
10312{
e72868c4
SS
10313 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10314 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10315 unsigned int pitch;
e72868c4 10316 bool linear;
b0455fda
SS
10317
10318 if (fb->width > new_acrtc->max_cursor_width ||
10319 fb->height > new_acrtc->max_cursor_height) {
10320 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10321 new_plane_state->fb->width,
10322 new_plane_state->fb->height);
10323 return -EINVAL;
10324 }
10325 if (new_plane_state->src_w != fb->width << 16 ||
10326 new_plane_state->src_h != fb->height << 16) {
10327 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10328 return -EINVAL;
10329 }
10330
10331 /* Pitch in pixels */
10332 pitch = fb->pitches[0] / fb->format->cpp[0];
10333
10334 if (fb->width != pitch) {
10335 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10336 fb->width, pitch);
10337 return -EINVAL;
10338 }
10339
10340 switch (pitch) {
10341 case 64:
10342 case 128:
10343 case 256:
10344 /* FB pitch is supported by cursor plane */
10345 break;
10346 default:
10347 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10348 return -EINVAL;
10349 }
10350
e72868c4
SS
10351 /* Core DRM takes care of checking FB modifiers, so we only need to
10352 * check tiling flags when the FB doesn't have a modifier. */
10353 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10354 if (adev->family < AMDGPU_FAMILY_AI) {
10355 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10356 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10357 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10358 } else {
10359 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10360 }
10361 if (!linear) {
10362 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10363 return -EINVAL;
10364 }
10365 }
10366
b0455fda
SS
10367 return 0;
10368}
10369
9e869063
LL
10370static int dm_update_plane_state(struct dc *dc,
10371 struct drm_atomic_state *state,
10372 struct drm_plane *plane,
10373 struct drm_plane_state *old_plane_state,
10374 struct drm_plane_state *new_plane_state,
10375 bool enable,
10376 bool *lock_and_validation_needed)
62f55537 10377{
eb3dc897
NK
10378
10379 struct dm_atomic_state *dm_state = NULL;
62f55537 10380 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10381 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10382 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10383 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10384 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10385 bool needs_reset;
62f55537 10386 int ret = 0;
e7b07cee 10387
9b690ef3 10388
9e869063
LL
10389 new_plane_crtc = new_plane_state->crtc;
10390 old_plane_crtc = old_plane_state->crtc;
10391 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10392 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10393
626bf90f
SS
10394 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10395 if (!enable || !new_plane_crtc ||
10396 drm_atomic_plane_disabling(plane->state, new_plane_state))
10397 return 0;
10398
10399 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10400
5f581248
SS
10401 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10402 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10403 return -EINVAL;
10404 }
10405
24f99d2b 10406 if (new_plane_state->fb) {
b0455fda
SS
10407 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10408 new_plane_state->fb);
10409 if (ret)
10410 return ret;
24f99d2b
SS
10411 }
10412
9e869063 10413 return 0;
626bf90f 10414 }
9b690ef3 10415
f6ff2a08
NK
10416 needs_reset = should_reset_plane(state, plane, old_plane_state,
10417 new_plane_state);
10418
9e869063
LL
10419 /* Remove any changed/removed planes */
10420 if (!enable) {
f6ff2a08 10421 if (!needs_reset)
9e869063 10422 return 0;
a7b06724 10423
9e869063
LL
10424 if (!old_plane_crtc)
10425 return 0;
62f55537 10426
9e869063
LL
10427 old_crtc_state = drm_atomic_get_old_crtc_state(
10428 state, old_plane_crtc);
10429 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10430
9e869063
LL
10431 if (!dm_old_crtc_state->stream)
10432 return 0;
62f55537 10433
9e869063
LL
10434 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10435 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10436
9e869063
LL
10437 ret = dm_atomic_get_state(state, &dm_state);
10438 if (ret)
10439 return ret;
eb3dc897 10440
9e869063
LL
10441 if (!dc_remove_plane_from_context(
10442 dc,
10443 dm_old_crtc_state->stream,
10444 dm_old_plane_state->dc_state,
10445 dm_state->context)) {
62f55537 10446
c3537613 10447 return -EINVAL;
9e869063 10448 }
e7b07cee 10449
9b690ef3 10450
9e869063
LL
10451 dc_plane_state_release(dm_old_plane_state->dc_state);
10452 dm_new_plane_state->dc_state = NULL;
1dc90497 10453
9e869063 10454 *lock_and_validation_needed = true;
1dc90497 10455
9e869063
LL
10456 } else { /* Add new planes */
10457 struct dc_plane_state *dc_new_plane_state;
1dc90497 10458
9e869063
LL
10459 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10460 return 0;
e7b07cee 10461
9e869063
LL
10462 if (!new_plane_crtc)
10463 return 0;
e7b07cee 10464
9e869063
LL
10465 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10466 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10467
9e869063
LL
10468 if (!dm_new_crtc_state->stream)
10469 return 0;
62f55537 10470
f6ff2a08 10471 if (!needs_reset)
9e869063 10472 return 0;
62f55537 10473
8c44515b
AP
10474 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10475 if (ret)
10476 return ret;
10477
9e869063 10478 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10479
9e869063
LL
10480 dc_new_plane_state = dc_create_plane_state(dc);
10481 if (!dc_new_plane_state)
10482 return -ENOMEM;
62f55537 10483
4711c033
LT
10484 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10485 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10486
695af5f9 10487 ret = fill_dc_plane_attributes(
1348969a 10488 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10489 dc_new_plane_state,
10490 new_plane_state,
10491 new_crtc_state);
10492 if (ret) {
10493 dc_plane_state_release(dc_new_plane_state);
10494 return ret;
10495 }
62f55537 10496
9e869063
LL
10497 ret = dm_atomic_get_state(state, &dm_state);
10498 if (ret) {
10499 dc_plane_state_release(dc_new_plane_state);
10500 return ret;
10501 }
eb3dc897 10502
9e869063
LL
10503 /*
10504 * Any atomic check errors that occur after this will
10505 * not need a release. The plane state will be attached
10506 * to the stream, and therefore part of the atomic
10507 * state. It'll be released when the atomic state is
10508 * cleaned.
10509 */
10510 if (!dc_add_plane_to_context(
10511 dc,
10512 dm_new_crtc_state->stream,
10513 dc_new_plane_state,
10514 dm_state->context)) {
62f55537 10515
9e869063
LL
10516 dc_plane_state_release(dc_new_plane_state);
10517 return -EINVAL;
10518 }
8c45c5db 10519
9e869063 10520 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10521
9e869063
LL
10522 /* Tell DC to do a full surface update every time there
10523 * is a plane change. Inefficient, but works for now.
10524 */
10525 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10526
10527 *lock_and_validation_needed = true;
62f55537 10528 }
e7b07cee
HW
10529
10530
62f55537
AG
10531 return ret;
10532}
a87fa993 10533
12f4849a
SS
10534static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10535 struct drm_crtc *crtc,
10536 struct drm_crtc_state *new_crtc_state)
10537{
10538 struct drm_plane_state *new_cursor_state, *new_primary_state;
10539 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10540
10541 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10542 * cursor per pipe but it's going to inherit the scaling and
10543 * positioning from the underlying pipe. Check the cursor plane's
10544 * blending properties match the primary plane's. */
10545
10546 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10547 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
10548 if (!new_cursor_state || !new_primary_state ||
10549 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
10550 return 0;
10551 }
10552
10553 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10554 (new_cursor_state->src_w >> 16);
10555 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10556 (new_cursor_state->src_h >> 16);
10557
10558 primary_scale_w = new_primary_state->crtc_w * 1000 /
10559 (new_primary_state->src_w >> 16);
10560 primary_scale_h = new_primary_state->crtc_h * 1000 /
10561 (new_primary_state->src_h >> 16);
10562
10563 if (cursor_scale_w != primary_scale_w ||
10564 cursor_scale_h != primary_scale_h) {
8333388b 10565 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
12f4849a
SS
10566 return -EINVAL;
10567 }
10568
10569 return 0;
10570}
10571
e10517b3 10572#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10573static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10574{
10575 struct drm_connector *connector;
10576 struct drm_connector_state *conn_state;
10577 struct amdgpu_dm_connector *aconnector = NULL;
10578 int i;
10579 for_each_new_connector_in_state(state, connector, conn_state, i) {
10580 if (conn_state->crtc != crtc)
10581 continue;
10582
10583 aconnector = to_amdgpu_dm_connector(connector);
10584 if (!aconnector->port || !aconnector->mst_port)
10585 aconnector = NULL;
10586 else
10587 break;
10588 }
10589
10590 if (!aconnector)
10591 return 0;
10592
10593 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10594}
e10517b3 10595#endif
44be939f 10596
16e9b3e5
RS
10597static int validate_overlay(struct drm_atomic_state *state)
10598{
10599 int i;
10600 struct drm_plane *plane;
ed509955 10601 struct drm_plane_state *new_plane_state;
e7d9560a 10602 struct drm_plane_state *primary_state, *overlay_state = NULL;
16e9b3e5
RS
10603
10604 /* Check if primary plane is contained inside overlay */
a6c3c37b 10605 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
16e9b3e5
RS
10606 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10607 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10608 return 0;
10609
10610 overlay_state = new_plane_state;
10611 continue;
10612 }
10613 }
10614
10615 /* check if we're making changes to the overlay plane */
10616 if (!overlay_state)
10617 return 0;
10618
10619 /* check if overlay plane is enabled */
10620 if (!overlay_state->crtc)
10621 return 0;
10622
10623 /* find the primary plane for the CRTC that the overlay is enabled on */
10624 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10625 if (IS_ERR(primary_state))
10626 return PTR_ERR(primary_state);
10627
10628 /* check if primary plane is enabled */
10629 if (!primary_state->crtc)
10630 return 0;
10631
10632 /* Perform the bounds check to ensure the overlay plane covers the primary */
10633 if (primary_state->crtc_x < overlay_state->crtc_x ||
10634 primary_state->crtc_y < overlay_state->crtc_y ||
10635 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10636 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10637 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10638 return -EINVAL;
10639 }
10640
10641 return 0;
10642}
10643
b8592b48
LL
10644/**
10645 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10646 * @dev: The DRM device
10647 * @state: The atomic state to commit
10648 *
10649 * Validate that the given atomic state is programmable by DC into hardware.
10650 * This involves constructing a &struct dc_state reflecting the new hardware
10651 * state we wish to commit, then querying DC to see if it is programmable. It's
10652 * important not to modify the existing DC state. Otherwise, atomic_check
10653 * may unexpectedly commit hardware changes.
10654 *
10655 * When validating the DC state, it's important that the right locks are
10656 * acquired. For full updates case which removes/adds/updates streams on one
10657 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10658 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10659 * flip using DRMs synchronization events.
b8592b48
LL
10660 *
10661 * Note that DM adds the affected connectors for all CRTCs in state, when that
10662 * might not seem necessary. This is because DC stream creation requires the
10663 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10664 * be possible but non-trivial - a possible TODO item.
10665 *
10666 * Return: -Error code if validation failed.
10667 */
7578ecda
AD
10668static int amdgpu_dm_atomic_check(struct drm_device *dev,
10669 struct drm_atomic_state *state)
62f55537 10670{
1348969a 10671 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10672 struct dm_atomic_state *dm_state = NULL;
62f55537 10673 struct dc *dc = adev->dm.dc;
62f55537 10674 struct drm_connector *connector;
c2cea706 10675 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10676 struct drm_crtc *crtc;
fc9e9920 10677 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10678 struct drm_plane *plane;
10679 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10680 enum dc_status status;
1e88ad0a 10681 int ret, i;
62f55537 10682 bool lock_and_validation_needed = false;
886876ec 10683 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10684#if defined(CONFIG_DRM_AMD_DC_DCN)
10685 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10686#endif
62f55537 10687
e8a98235 10688 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10689
62f55537 10690 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10691 if (ret)
10692 goto fail;
62f55537 10693
c5892a10
SW
10694 /* Check connector changes */
10695 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10696 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10697 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10698
10699 /* Skip connectors that are disabled or part of modeset already. */
10700 if (!old_con_state->crtc && !new_con_state->crtc)
10701 continue;
10702
10703 if (!new_con_state->crtc)
10704 continue;
10705
10706 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10707 if (IS_ERR(new_crtc_state)) {
10708 ret = PTR_ERR(new_crtc_state);
10709 goto fail;
10710 }
10711
10712 if (dm_old_con_state->abm_level !=
10713 dm_new_con_state->abm_level)
10714 new_crtc_state->connectors_changed = true;
10715 }
10716
e10517b3 10717#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10718 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10719 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10720 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10721 ret = add_affected_mst_dsc_crtcs(state, crtc);
10722 if (ret)
10723 goto fail;
10724 }
10725 }
10726 }
e10517b3 10727#endif
1e88ad0a 10728 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10729 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10730
1e88ad0a 10731 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10732 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10733 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10734 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10735 continue;
7bef1af3 10736
03fc4cf4
MY
10737 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10738 if (ret)
10739 goto fail;
10740
1e88ad0a
S
10741 if (!new_crtc_state->enable)
10742 continue;
fc9e9920 10743
1e88ad0a
S
10744 ret = drm_atomic_add_affected_connectors(state, crtc);
10745 if (ret)
10746 return ret;
fc9e9920 10747
1e88ad0a
S
10748 ret = drm_atomic_add_affected_planes(state, crtc);
10749 if (ret)
10750 goto fail;
115a385c 10751
cbac53f7 10752 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10753 new_crtc_state->mode_changed = true;
e7b07cee
HW
10754 }
10755
2d9e6431
NK
10756 /*
10757 * Add all primary and overlay planes on the CRTC to the state
10758 * whenever a plane is enabled to maintain correct z-ordering
10759 * and to enable fast surface updates.
10760 */
10761 drm_for_each_crtc(crtc, dev) {
10762 bool modified = false;
10763
10764 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10765 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10766 continue;
10767
10768 if (new_plane_state->crtc == crtc ||
10769 old_plane_state->crtc == crtc) {
10770 modified = true;
10771 break;
10772 }
10773 }
10774
10775 if (!modified)
10776 continue;
10777
10778 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10779 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10780 continue;
10781
10782 new_plane_state =
10783 drm_atomic_get_plane_state(state, plane);
10784
10785 if (IS_ERR(new_plane_state)) {
10786 ret = PTR_ERR(new_plane_state);
10787 goto fail;
10788 }
10789 }
10790 }
10791
62f55537 10792 /* Remove exiting planes if they are modified */
9e869063
LL
10793 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10794 ret = dm_update_plane_state(dc, state, plane,
10795 old_plane_state,
10796 new_plane_state,
10797 false,
10798 &lock_and_validation_needed);
10799 if (ret)
10800 goto fail;
62f55537
AG
10801 }
10802
10803 /* Disable all crtcs which require disable */
4b9674e5
LL
10804 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10805 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10806 old_crtc_state,
10807 new_crtc_state,
10808 false,
10809 &lock_and_validation_needed);
10810 if (ret)
10811 goto fail;
62f55537
AG
10812 }
10813
10814 /* Enable all crtcs which require enable */
4b9674e5
LL
10815 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10816 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10817 old_crtc_state,
10818 new_crtc_state,
10819 true,
10820 &lock_and_validation_needed);
10821 if (ret)
10822 goto fail;
62f55537
AG
10823 }
10824
16e9b3e5
RS
10825 ret = validate_overlay(state);
10826 if (ret)
10827 goto fail;
10828
62f55537 10829 /* Add new/modified planes */
9e869063
LL
10830 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10831 ret = dm_update_plane_state(dc, state, plane,
10832 old_plane_state,
10833 new_plane_state,
10834 true,
10835 &lock_and_validation_needed);
10836 if (ret)
10837 goto fail;
62f55537
AG
10838 }
10839
b349f76e
ES
10840 /* Run this here since we want to validate the streams we created */
10841 ret = drm_atomic_helper_check_planes(dev, state);
10842 if (ret)
10843 goto fail;
62f55537 10844
12f4849a
SS
10845 /* Check cursor planes scaling */
10846 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10847 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10848 if (ret)
10849 goto fail;
10850 }
10851
43d10d30
NK
10852 if (state->legacy_cursor_update) {
10853 /*
10854 * This is a fast cursor update coming from the plane update
10855 * helper, check if it can be done asynchronously for better
10856 * performance.
10857 */
10858 state->async_update =
10859 !drm_atomic_helper_async_check(dev, state);
10860
10861 /*
10862 * Skip the remaining global validation if this is an async
10863 * update. Cursor updates can be done without affecting
10864 * state or bandwidth calcs and this avoids the performance
10865 * penalty of locking the private state object and
10866 * allocating a new dc_state.
10867 */
10868 if (state->async_update)
10869 return 0;
10870 }
10871
ebdd27e1 10872 /* Check scaling and underscan changes*/
1f6010a9 10873 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10874 * new stream into context w\o causing full reset. Need to
10875 * decide how to handle.
10876 */
c2cea706 10877 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10878 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10879 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10880 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10881
10882 /* Skip any modesets/resets */
0bc9706d
LSL
10883 if (!acrtc || drm_atomic_crtc_needs_modeset(
10884 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10885 continue;
10886
b830ebc9 10887 /* Skip any thing not scale or underscan changes */
54d76575 10888 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10889 continue;
10890
10891 lock_and_validation_needed = true;
10892 }
10893
f6d7c7fa
NK
10894 /**
10895 * Streams and planes are reset when there are changes that affect
10896 * bandwidth. Anything that affects bandwidth needs to go through
10897 * DC global validation to ensure that the configuration can be applied
10898 * to hardware.
10899 *
10900 * We have to currently stall out here in atomic_check for outstanding
10901 * commits to finish in this case because our IRQ handlers reference
10902 * DRM state directly - we can end up disabling interrupts too early
10903 * if we don't.
10904 *
10905 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10906 */
f6d7c7fa 10907 if (lock_and_validation_needed) {
eb3dc897
NK
10908 ret = dm_atomic_get_state(state, &dm_state);
10909 if (ret)
10910 goto fail;
e7b07cee
HW
10911
10912 ret = do_aquire_global_lock(dev, state);
10913 if (ret)
10914 goto fail;
1dc90497 10915
d9fe1a4c 10916#if defined(CONFIG_DRM_AMD_DC_DCN)
6513104b 10917 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
8c20a1ed
DF
10918 goto fail;
10919
6513104b 10920 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
29b9ba74
ML
10921 if (ret)
10922 goto fail;
d9fe1a4c 10923#endif
29b9ba74 10924
ded58c7b
ZL
10925 /*
10926 * Perform validation of MST topology in the state:
10927 * We need to perform MST atomic check before calling
10928 * dc_validate_global_state(), or there is a chance
10929 * to get stuck in an infinite loop and hang eventually.
10930 */
10931 ret = drm_dp_mst_atomic_check(state);
10932 if (ret)
10933 goto fail;
74a16675
RS
10934 status = dc_validate_global_state(dc, dm_state->context, false);
10935 if (status != DC_OK) {
a906331c
SS
10936 drm_dbg_atomic(dev,
10937 "DC global validation failure: %s (%d)",
74a16675 10938 dc_status_to_str(status), status);
e7b07cee
HW
10939 ret = -EINVAL;
10940 goto fail;
10941 }
bd200d19 10942 } else {
674e78ac 10943 /*
bd200d19
NK
10944 * The commit is a fast update. Fast updates shouldn't change
10945 * the DC context, affect global validation, and can have their
10946 * commit work done in parallel with other commits not touching
10947 * the same resource. If we have a new DC context as part of
10948 * the DM atomic state from validation we need to free it and
10949 * retain the existing one instead.
fde9f39a
MR
10950 *
10951 * Furthermore, since the DM atomic state only contains the DC
10952 * context and can safely be annulled, we can free the state
10953 * and clear the associated private object now to free
10954 * some memory and avoid a possible use-after-free later.
674e78ac 10955 */
bd200d19 10956
fde9f39a
MR
10957 for (i = 0; i < state->num_private_objs; i++) {
10958 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10959
fde9f39a
MR
10960 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10961 int j = state->num_private_objs-1;
bd200d19 10962
fde9f39a
MR
10963 dm_atomic_destroy_state(obj,
10964 state->private_objs[i].state);
10965
10966 /* If i is not at the end of the array then the
10967 * last element needs to be moved to where i was
10968 * before the array can safely be truncated.
10969 */
10970 if (i != j)
10971 state->private_objs[i] =
10972 state->private_objs[j];
bd200d19 10973
fde9f39a
MR
10974 state->private_objs[j].ptr = NULL;
10975 state->private_objs[j].state = NULL;
10976 state->private_objs[j].old_state = NULL;
10977 state->private_objs[j].new_state = NULL;
10978
10979 state->num_private_objs = j;
10980 break;
10981 }
bd200d19 10982 }
e7b07cee
HW
10983 }
10984
caff0e66
NK
10985 /* Store the overall update type for use later in atomic check. */
10986 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10987 struct dm_crtc_state *dm_new_crtc_state =
10988 to_dm_crtc_state(new_crtc_state);
10989
f6d7c7fa
NK
10990 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10991 UPDATE_TYPE_FULL :
10992 UPDATE_TYPE_FAST;
e7b07cee
HW
10993 }
10994
10995 /* Must be success */
10996 WARN_ON(ret);
e8a98235
RS
10997
10998 trace_amdgpu_dm_atomic_check_finish(state, ret);
10999
e7b07cee
HW
11000 return ret;
11001
11002fail:
11003 if (ret == -EDEADLK)
01e28f9c 11004 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11005 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11006 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11007 else
01e28f9c 11008 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11009
e8a98235
RS
11010 trace_amdgpu_dm_atomic_check_finish(state, ret);
11011
e7b07cee
HW
11012 return ret;
11013}
11014
3ee6b26b
AD
11015static bool is_dp_capable_without_timing_msa(struct dc *dc,
11016 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11017{
11018 uint8_t dpcd_data;
11019 bool capable = false;
11020
c84dec2f 11021 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11022 dm_helpers_dp_read_dpcd(
11023 NULL,
c84dec2f 11024 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11025 DP_DOWN_STREAM_PORT_COUNT,
11026 &dpcd_data,
11027 sizeof(dpcd_data))) {
11028 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11029 }
11030
11031 return capable;
11032}
f9b4f20c 11033
46db138d
SW
11034static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11035 unsigned int offset,
11036 unsigned int total_length,
11037 uint8_t *data,
11038 unsigned int length,
11039 struct amdgpu_hdmi_vsdb_info *vsdb)
11040{
11041 bool res;
11042 union dmub_rb_cmd cmd;
11043 struct dmub_cmd_send_edid_cea *input;
11044 struct dmub_cmd_edid_cea_output *output;
11045
11046 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11047 return false;
11048
11049 memset(&cmd, 0, sizeof(cmd));
11050
11051 input = &cmd.edid_cea.data.input;
11052
11053 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11054 cmd.edid_cea.header.sub_type = 0;
11055 cmd.edid_cea.header.payload_bytes =
11056 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11057 input->offset = offset;
11058 input->length = length;
11059 input->total_length = total_length;
11060 memcpy(input->payload, data, length);
11061
11062 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11063 if (!res) {
11064 DRM_ERROR("EDID CEA parser failed\n");
11065 return false;
11066 }
11067
11068 output = &cmd.edid_cea.data.output;
11069
11070 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11071 if (!output->ack.success) {
11072 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11073 output->ack.offset);
11074 }
11075 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11076 if (!output->amd_vsdb.vsdb_found)
11077 return false;
11078
11079 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11080 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11081 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11082 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11083 } else {
b76a8062 11084 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11085 return false;
11086 }
11087
11088 return true;
11089}
11090
11091static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11092 uint8_t *edid_ext, int len,
11093 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11094{
11095 int i;
f9b4f20c
SW
11096
11097 /* send extension block to DMCU for parsing */
11098 for (i = 0; i < len; i += 8) {
11099 bool res;
11100 int offset;
11101
11102 /* send 8 bytes a time */
46db138d 11103 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11104 return false;
11105
11106 if (i+8 == len) {
11107 /* EDID block sent completed, expect result */
11108 int version, min_rate, max_rate;
11109
46db138d 11110 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11111 if (res) {
11112 /* amd vsdb found */
11113 vsdb_info->freesync_supported = 1;
11114 vsdb_info->amd_vsdb_version = version;
11115 vsdb_info->min_refresh_rate_hz = min_rate;
11116 vsdb_info->max_refresh_rate_hz = max_rate;
11117 return true;
11118 }
11119 /* not amd vsdb */
11120 return false;
11121 }
11122
11123 /* check for ack*/
46db138d 11124 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11125 if (!res)
11126 return false;
11127 }
11128
11129 return false;
11130}
11131
46db138d
SW
11132static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11133 uint8_t *edid_ext, int len,
11134 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11135{
11136 int i;
11137
11138 /* send extension block to DMCU for parsing */
11139 for (i = 0; i < len; i += 8) {
11140 /* send 8 bytes a time */
11141 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11142 return false;
11143 }
11144
11145 return vsdb_info->freesync_supported;
11146}
11147
11148static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11149 uint8_t *edid_ext, int len,
11150 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11151{
11152 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11153
11154 if (adev->dm.dmub_srv)
11155 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11156 else
11157 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11158}
11159
7c7dd774 11160static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11161 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11162{
11163 uint8_t *edid_ext = NULL;
11164 int i;
11165 bool valid_vsdb_found = false;
11166
11167 /*----- drm_find_cea_extension() -----*/
11168 /* No EDID or EDID extensions */
11169 if (edid == NULL || edid->extensions == 0)
7c7dd774 11170 return -ENODEV;
f9b4f20c
SW
11171
11172 /* Find CEA extension */
11173 for (i = 0; i < edid->extensions; i++) {
11174 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11175 if (edid_ext[0] == CEA_EXT)
11176 break;
11177 }
11178
11179 if (i == edid->extensions)
7c7dd774 11180 return -ENODEV;
f9b4f20c
SW
11181
11182 /*----- cea_db_offsets() -----*/
11183 if (edid_ext[0] != CEA_EXT)
7c7dd774 11184 return -ENODEV;
f9b4f20c
SW
11185
11186 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11187
11188 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11189}
11190
98e6436d
AK
11191void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11192 struct edid *edid)
e7b07cee 11193{
eb0709ba 11194 int i = 0;
e7b07cee
HW
11195 struct detailed_timing *timing;
11196 struct detailed_non_pixel *data;
11197 struct detailed_data_monitor_range *range;
c84dec2f
HW
11198 struct amdgpu_dm_connector *amdgpu_dm_connector =
11199 to_amdgpu_dm_connector(connector);
bb47de73 11200 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11201 struct dc_sink *sink;
e7b07cee
HW
11202
11203 struct drm_device *dev = connector->dev;
1348969a 11204 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11205 bool freesync_capable = false;
f9b4f20c 11206 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11207
8218d7f1
HW
11208 if (!connector->state) {
11209 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11210 goto update;
8218d7f1
HW
11211 }
11212
9b2fdc33
AP
11213 sink = amdgpu_dm_connector->dc_sink ?
11214 amdgpu_dm_connector->dc_sink :
11215 amdgpu_dm_connector->dc_em_sink;
11216
11217 if (!edid || !sink) {
98e6436d
AK
11218 dm_con_state = to_dm_connector_state(connector->state);
11219
11220 amdgpu_dm_connector->min_vfreq = 0;
11221 amdgpu_dm_connector->max_vfreq = 0;
11222 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11223 connector->display_info.monitor_range.min_vfreq = 0;
11224 connector->display_info.monitor_range.max_vfreq = 0;
11225 freesync_capable = false;
98e6436d 11226
bb47de73 11227 goto update;
98e6436d
AK
11228 }
11229
8218d7f1
HW
11230 dm_con_state = to_dm_connector_state(connector->state);
11231
e7b07cee 11232 if (!adev->dm.freesync_module)
bb47de73 11233 goto update;
f9b4f20c
SW
11234
11235
9b2fdc33
AP
11236 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11237 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11238 bool edid_check_required = false;
11239
11240 if (edid) {
e7b07cee
HW
11241 edid_check_required = is_dp_capable_without_timing_msa(
11242 adev->dm.dc,
c84dec2f 11243 amdgpu_dm_connector);
e7b07cee 11244 }
e7b07cee 11245
f9b4f20c
SW
11246 if (edid_check_required == true && (edid->version > 1 ||
11247 (edid->version == 1 && edid->revision > 1))) {
11248 for (i = 0; i < 4; i++) {
e7b07cee 11249
f9b4f20c
SW
11250 timing = &edid->detailed_timings[i];
11251 data = &timing->data.other_data;
11252 range = &data->data.range;
11253 /*
11254 * Check if monitor has continuous frequency mode
11255 */
11256 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11257 continue;
11258 /*
11259 * Check for flag range limits only. If flag == 1 then
11260 * no additional timing information provided.
11261 * Default GTF, GTF Secondary curve and CVT are not
11262 * supported
11263 */
11264 if (range->flags != 1)
11265 continue;
a0ffc3fd 11266
f9b4f20c
SW
11267 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11268 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11269 amdgpu_dm_connector->pixel_clock_mhz =
11270 range->pixel_clock_mhz * 10;
a0ffc3fd 11271
f9b4f20c
SW
11272 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11273 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11274
f9b4f20c
SW
11275 break;
11276 }
98e6436d 11277
f9b4f20c
SW
11278 if (amdgpu_dm_connector->max_vfreq -
11279 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11280
f9b4f20c
SW
11281 freesync_capable = true;
11282 }
11283 }
9b2fdc33 11284 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11285 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11286 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11287 timing = &edid->detailed_timings[i];
11288 data = &timing->data.other_data;
11289
11290 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11291 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11292 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11293 freesync_capable = true;
11294
11295 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11296 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11297 }
11298 }
bb47de73
NK
11299
11300update:
11301 if (dm_con_state)
11302 dm_con_state->freesync_capable = freesync_capable;
11303
11304 if (connector->vrr_capable_property)
11305 drm_connector_set_vrr_capable_property(connector,
11306 freesync_capable);
e7b07cee
HW
11307}
11308
3d4e52d0
VL
11309void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11310{
1348969a 11311 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11312 struct dc *dc = adev->dm.dc;
11313 int i;
11314
11315 mutex_lock(&adev->dm.dc_lock);
11316 if (dc->current_state) {
11317 for (i = 0; i < dc->current_state->stream_count; ++i)
11318 dc->current_state->streams[i]
11319 ->triggered_crtc_reset.enabled =
11320 adev->dm.force_timing_sync;
11321
11322 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11323 dc_trigger_sync(dc, dc->current_state);
11324 }
11325 mutex_unlock(&adev->dm.dc_lock);
11326}
9d83722d
RS
11327
11328void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11329 uint32_t value, const char *func_name)
11330{
11331#ifdef DM_CHECK_ADDR_0
11332 if (address == 0) {
11333 DC_ERR("invalid register write. address = 0");
11334 return;
11335 }
11336#endif
11337 cgs_write_register(ctx->cgs_device, address, value);
11338 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11339}
11340
11341uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11342 const char *func_name)
11343{
11344 uint32_t value;
11345#ifdef DM_CHECK_ADDR_0
11346 if (address == 0) {
11347 DC_ERR("invalid register read; address = 0\n");
11348 return 0;
11349 }
11350#endif
11351
11352 if (ctx->dmub_srv &&
11353 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11354 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11355 ASSERT(false);
11356 return 0;
11357 }
11358
11359 value = cgs_read_register(ctx->cgs_device, address);
11360
11361 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11362
11363 return value;
11364}
81927e28 11365
88f52b1f
JS
11366int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11367 uint8_t status_type, uint32_t *operation_result)
11368{
11369 struct amdgpu_device *adev = ctx->driver_context;
11370 int return_status = -1;
11371 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11372
11373 if (is_cmd_aux) {
11374 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11375 return_status = p_notify->aux_reply.length;
11376 *operation_result = p_notify->result;
11377 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11378 *operation_result = AUX_RET_ERROR_TIMEOUT;
11379 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11380 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11381 } else {
11382 *operation_result = AUX_RET_ERROR_UNKNOWN;
11383 }
11384 } else {
11385 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11386 return_status = 0;
11387 *operation_result = p_notify->sc_status;
11388 } else {
11389 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11390 }
11391 }
11392
11393 return return_status;
11394}
11395
11396int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11397 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11398{
11399 struct amdgpu_device *adev = ctx->driver_context;
11400 int ret = 0;
11401
88f52b1f
JS
11402 if (is_cmd_aux) {
11403 dc_process_dmub_aux_transfer_async(ctx->dc,
11404 link_index, (struct aux_payload *)cmd_payload);
11405 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11406 (struct set_config_cmd_payload *)cmd_payload,
11407 adev->dm.dmub_notify)) {
11408 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11409 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11410 (uint32_t *)operation_result);
11411 }
11412
9e3a50d2 11413 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11414 if (ret == 0) {
9e3a50d2 11415 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11416 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11417 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11418 (uint32_t *)operation_result);
81927e28 11419 }
81927e28 11420
88f52b1f
JS
11421 if (is_cmd_aux) {
11422 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11423 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11424
88f52b1f
JS
11425 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11426 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11427 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11428 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11429 adev->dm.dmub_notify->aux_reply.length);
11430 }
11431 }
81927e28
JS
11432 }
11433
88f52b1f
JS
11434 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11435 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11436 (uint32_t *)operation_result);
81927e28 11437}