drm/amd/pm: fix spelling mistakes in dev_warn messages
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
9d83722d 37#include "amdgpu_dm_trace.h"
4562236b
HW
38
39#include "vid.h"
40#include "amdgpu.h"
a49dcb88 41#include "amdgpu_display.h"
a94d5569 42#include "amdgpu_ucode.h"
4562236b
HW
43#include "atom.h"
44#include "amdgpu_dm.h"
52704fca
BL
45#ifdef CONFIG_DRM_AMD_DC_HDCP
46#include "amdgpu_dm_hdcp.h"
53e108aa 47#include <drm/drm_hdcp.h>
52704fca 48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
63#include <linux/version.h>
e7b07cee 64#include <linux/types.h>
97028037 65#include <linux/pm_runtime.h>
09d21852 66#include <linux/pci.h>
a94d5569 67#include <linux/firmware.h>
6ce8f316 68#include <linux/component.h>
4562236b
HW
69
70#include <drm/drm_atomic.h>
674e78ac 71#include <drm/drm_atomic_uapi.h>
4562236b
HW
72#include <drm/drm_atomic_helper.h>
73#include <drm/drm_dp_mst_helper.h>
e7b07cee 74#include <drm/drm_fb_helper.h>
09d21852 75#include <drm/drm_fourcc.h>
e7b07cee 76#include <drm/drm_edid.h>
09d21852 77#include <drm/drm_vblank.h>
6ce8f316 78#include <drm/drm_audio_component.h>
0c8620d6 79#include <drm/drm_hdcp.h>
4562236b 80
b86a1aa3 81#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 82#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 83
ad941f7a
FX
84#include "dcn/dcn_1_0_offset.h"
85#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
86#include "soc15_hw_ip.h"
87#include "vega10_ip_offset.h"
ff5ef992
AD
88
89#include "soc15_common.h"
90#endif
91
e7b07cee 92#include "modules/inc/mod_freesync.h"
bbf854dc 93#include "modules/power/power_helpers.h"
ecd0136b 94#include "modules/inc/mod_info_packet.h"
e7b07cee 95
743b9786
NK
96#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
102#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
104#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
106#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 108
a94d5569
DF
109#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 111
5ea23931
RL
112#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114
8c7aea40
NK
115/* Number of bytes in PSP header for firmware. */
116#define PSP_HEADER_BYTES 0x100
117
118/* Number of bytes in PSP footer for firmware. */
119#define PSP_FOOTER_BYTES 0x100
120
b8592b48
LL
121/**
122 * DOC: overview
123 *
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
127 *
128 * The root control structure is &struct amdgpu_display_manager.
129 */
130
7578ecda
AD
131/* basic init/fini API */
132static int amdgpu_dm_init(struct amdgpu_device *adev);
133static void amdgpu_dm_fini(struct amdgpu_device *adev);
134
0f877894
OV
135static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136{
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 default:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
151 }
152}
153
154static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155{
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 return;
162
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
165
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
168 subconnector);
169}
170
1f6010a9
DF
171/*
172 * initializes drm_device display related structures, based on the information
7578ecda
AD
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
175 *
176 * Returns 0 on success
177 */
178static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179/* removes and deallocates the drm structures, created by the above function */
180static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
7578ecda 182static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 183 struct drm_plane *plane,
cc1fec57
NK
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
7578ecda
AD
186static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 uint32_t link_index,
192 struct amdgpu_encoder *amdgpu_encoder);
193static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
196
197static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200 struct drm_atomic_state *state,
201 bool nonblock);
202
203static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204
205static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 struct drm_atomic_state *state);
207
674e78ac
NK
208static void handle_cursor_update(struct drm_plane *plane,
209 struct drm_plane_state *old_plane_state);
7578ecda 210
8c322309
RL
211static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 215static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
4562236b
HW
220/*
221 * dm_vblank_get_counter
222 *
223 * @brief
224 * Get counter for number of vertical blanks
225 *
226 * @param
227 * struct amdgpu_device *adev - [in] desired amdgpu device
228 * int disp_idx - [in] which CRTC to get the counter from
229 *
230 * @return
231 * Counter for vertical blanks
232 */
233static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234{
235 if (crtc >= adev->mode_info.num_crtc)
236 return 0;
237 else {
238 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239
585d450c 240 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
241 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 crtc);
4562236b
HW
243 return 0;
244 }
245
585d450c 246 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
247 }
248}
249
250static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 251 u32 *vbl, u32 *position)
4562236b 252{
81c50963
ST
253 uint32_t v_blank_start, v_blank_end, h_position, v_position;
254
4562236b
HW
255 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 return -EINVAL;
257 else {
258 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259
585d450c 260 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
261 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 crtc);
4562236b
HW
263 return 0;
264 }
265
81c50963
ST
266 /*
267 * TODO rework base driver to use values directly.
268 * for now parse it back into reg-format
269 */
585d450c 270 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
271 &v_blank_start,
272 &v_blank_end,
273 &h_position,
274 &v_position);
275
e806208d
AG
276 *position = v_position | (h_position << 16);
277 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
278 }
279
280 return 0;
281}
282
283static bool dm_is_idle(void *handle)
284{
285 /* XXX todo */
286 return true;
287}
288
289static int dm_wait_for_idle(void *handle)
290{
291 /* XXX todo */
292 return 0;
293}
294
295static bool dm_check_soft_reset(void *handle)
296{
297 return false;
298}
299
300static int dm_soft_reset(void *handle)
301{
302 /* XXX todo */
303 return 0;
304}
305
3ee6b26b
AD
306static struct amdgpu_crtc *
307get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 int otg_inst)
4562236b 309{
4a580877 310 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
311 struct drm_crtc *crtc;
312 struct amdgpu_crtc *amdgpu_crtc;
313
4562236b
HW
314 if (otg_inst == -1) {
315 WARN_ON(1);
316 return adev->mode_info.crtcs[0];
317 }
318
319 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320 amdgpu_crtc = to_amdgpu_crtc(crtc);
321
322 if (amdgpu_crtc->otg_inst == otg_inst)
323 return amdgpu_crtc;
324 }
325
326 return NULL;
327}
328
585d450c
AP
329static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330{
331 return acrtc->dm_irq_params.freesync_config.state ==
332 VRR_STATE_ACTIVE_VARIABLE ||
333 acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_FIXED;
335}
336
66b0c973
MK
337static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338{
339 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341}
342
b8e8c934
HW
343/**
344 * dm_pflip_high_irq() - Handle pageflip interrupt
345 * @interrupt_params: ignored
346 *
347 * Handles the pageflip interrupt by notifying all interested parties
348 * that the pageflip has been completed.
349 */
4562236b
HW
350static void dm_pflip_high_irq(void *interrupt_params)
351{
4562236b
HW
352 struct amdgpu_crtc *amdgpu_crtc;
353 struct common_irq_params *irq_params = interrupt_params;
354 struct amdgpu_device *adev = irq_params->adev;
355 unsigned long flags;
71bbe51a 356 struct drm_pending_vblank_event *e;
71bbe51a
MK
357 uint32_t vpos, hpos, v_blank_start, v_blank_end;
358 bool vrr_active;
4562236b
HW
359
360 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
361
362 /* IRQ could occur when in initial stage */
1f6010a9 363 /* TODO work and BO cleanup */
4562236b
HW
364 if (amdgpu_crtc == NULL) {
365 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
366 return;
367 }
368
4a580877 369 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
370
371 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373 amdgpu_crtc->pflip_status,
374 AMDGPU_FLIP_SUBMITTED,
375 amdgpu_crtc->crtc_id,
376 amdgpu_crtc);
4a580877 377 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
378 return;
379 }
380
71bbe51a
MK
381 /* page flip completed. */
382 e = amdgpu_crtc->event;
383 amdgpu_crtc->event = NULL;
4562236b 384
71bbe51a
MK
385 if (!e)
386 WARN_ON(1);
1159898a 387
585d450c 388 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
389
390 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
391 if (!vrr_active ||
585d450c 392 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
393 &v_blank_end, &hpos, &vpos) ||
394 (vpos < v_blank_start)) {
395 /* Update to correct count and vblank timestamp if racing with
396 * vblank irq. This also updates to the correct vblank timestamp
397 * even in VRR mode, as scanout is past the front-porch atm.
398 */
399 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 400
71bbe51a
MK
401 /* Wake up userspace by sending the pageflip event with proper
402 * count and timestamp of vblank of flip completion.
403 */
404 if (e) {
405 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
406
407 /* Event sent, so done with vblank for this flip */
408 drm_crtc_vblank_put(&amdgpu_crtc->base);
409 }
410 } else if (e) {
411 /* VRR active and inside front-porch: vblank count and
412 * timestamp for pageflip event will only be up to date after
413 * drm_crtc_handle_vblank() has been executed from late vblank
414 * irq handler after start of back-porch (vline 0). We queue the
415 * pageflip event for send-out by drm_crtc_handle_vblank() with
416 * updated timestamp and count, once it runs after us.
417 *
418 * We need to open-code this instead of using the helper
419 * drm_crtc_arm_vblank_event(), as that helper would
420 * call drm_crtc_accurate_vblank_count(), which we must
421 * not call in VRR mode while we are in front-porch!
422 */
423
424 /* sequence will be replaced by real count during send-out. */
425 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426 e->pipe = amdgpu_crtc->crtc_id;
427
4a580877 428 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
429 e = NULL;
430 }
4562236b 431
fdd1fe57
MK
432 /* Keep track of vblank of this flip for flip throttling. We use the
433 * cooked hw counter, as that one incremented at start of this vblank
434 * of pageflip completion, so last_flip_vblank is the forbidden count
435 * for queueing new pageflips if vsync + VRR is enabled.
436 */
5d1c59c4 437 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 438 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 439
54f5499a 440 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 441 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 442
71bbe51a
MK
443 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444 amdgpu_crtc->crtc_id, amdgpu_crtc,
445 vrr_active, (int) !e);
4562236b
HW
446}
447
d2574c33
MK
448static void dm_vupdate_high_irq(void *interrupt_params)
449{
450 struct common_irq_params *irq_params = interrupt_params;
451 struct amdgpu_device *adev = irq_params->adev;
452 struct amdgpu_crtc *acrtc;
09aef2c4 453 unsigned long flags;
585d450c 454 int vrr_active;
d2574c33
MK
455
456 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
457
458 if (acrtc) {
585d450c 459 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 460
7f2be468
LP
461 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
462 acrtc->crtc_id,
585d450c 463 vrr_active);
d2574c33
MK
464
465 /* Core vblank handling is done here after end of front-porch in
466 * vrr mode, as vblank timestamping will give valid results
467 * while now done after front-porch. This will also deliver
468 * page-flip completion events that have been queued to us
469 * if a pageflip happened inside front-porch.
470 */
585d450c 471 if (vrr_active) {
d2574c33 472 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
473
474 /* BTR processing for pre-DCE12 ASICs */
585d450c 475 if (acrtc->dm_irq_params.stream &&
09aef2c4 476 adev->family < AMDGPU_FAMILY_AI) {
4a580877 477 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
478 mod_freesync_handle_v_update(
479 adev->dm.freesync_module,
585d450c
AP
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
482
483 dc_stream_adjust_vmin_vmax(
484 adev->dm.dc,
585d450c
AP
485 acrtc->dm_irq_params.stream,
486 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 487 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
488 }
489 }
d2574c33
MK
490 }
491}
492
b8e8c934
HW
493/**
494 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 495 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
496 *
497 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
498 * event handler.
499 */
4562236b
HW
500static void dm_crtc_high_irq(void *interrupt_params)
501{
502 struct common_irq_params *irq_params = interrupt_params;
503 struct amdgpu_device *adev = irq_params->adev;
4562236b 504 struct amdgpu_crtc *acrtc;
09aef2c4 505 unsigned long flags;
585d450c 506 int vrr_active;
4562236b 507
b57de80a 508 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
509 if (!acrtc)
510 return;
511
585d450c 512 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 513
2b5aed9a 514 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 515 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 516
2346ef47
NK
517 /**
518 * Core vblank handling at start of front-porch is only possible
519 * in non-vrr mode, as only there vblank timestamping will give
520 * valid results while done in front-porch. Otherwise defer it
521 * to dm_vupdate_high_irq after end of front-porch.
522 */
585d450c 523 if (!vrr_active)
2346ef47
NK
524 drm_crtc_handle_vblank(&acrtc->base);
525
526 /**
527 * Following stuff must happen at start of vblank, for crc
528 * computation and below-the-range btr support in vrr mode.
529 */
16f17eda 530 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
531
532 /* BTR updates need to happen before VUPDATE on Vega and above. */
533 if (adev->family < AMDGPU_FAMILY_AI)
534 return;
16f17eda 535
4a580877 536 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 537
585d450c
AP
538 if (acrtc->dm_irq_params.stream &&
539 acrtc->dm_irq_params.vrr_params.supported &&
540 acrtc->dm_irq_params.freesync_config.state ==
541 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 542 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
543 acrtc->dm_irq_params.stream,
544 &acrtc->dm_irq_params.vrr_params);
16f17eda 545
585d450c
AP
546 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
548 }
549
2b5aed9a
MK
550 /*
551 * If there aren't any active_planes then DCH HUBP may be clock-gated.
552 * In that case, pageflip completion interrupts won't fire and pageflip
553 * completion events won't get delivered. Prevent this by sending
554 * pending pageflip events from here if a flip is still pending.
555 *
556 * If any planes are enabled, use dm_pflip_high_irq() instead, to
557 * avoid race conditions between flip programming and completion,
558 * which could cause too early flip completion events.
559 */
2346ef47
NK
560 if (adev->family >= AMDGPU_FAMILY_RV &&
561 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 562 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
563 if (acrtc->event) {
564 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
565 acrtc->event = NULL;
566 drm_crtc_vblank_put(&acrtc->base);
567 }
568 acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 }
570
4a580877 571 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
572}
573
4562236b
HW
574static int dm_set_clockgating_state(void *handle,
575 enum amd_clockgating_state state)
576{
577 return 0;
578}
579
580static int dm_set_powergating_state(void *handle,
581 enum amd_powergating_state state)
582{
583 return 0;
584}
585
586/* Prototypes of private functions */
587static int dm_early_init(void* handle);
588
a32e24b4 589/* Allocate memory for FBC compressed data */
3e332d3a 590static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 591{
3e332d3a 592 struct drm_device *dev = connector->dev;
1348969a 593 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 594 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
595 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596 struct drm_display_mode *mode;
42e67c3b
RL
597 unsigned long max_size = 0;
598
599 if (adev->dm.dc->fbc_compressor == NULL)
600 return;
a32e24b4 601
3e332d3a 602 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
603 return;
604
3e332d3a
RL
605 if (compressor->bo_ptr)
606 return;
42e67c3b 607
42e67c3b 608
3e332d3a
RL
609 list_for_each_entry(mode, &connector->modes, head) {
610 if (max_size < mode->htotal * mode->vtotal)
611 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
612 }
613
614 if (max_size) {
615 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 616 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 617 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
618
619 if (r)
42e67c3b
RL
620 DRM_ERROR("DM: Failed to initialize FBC\n");
621 else {
622 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624 }
625
a32e24b4
RL
626 }
627
628}
a32e24b4 629
6ce8f316
NK
630static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631 int pipe, bool *enabled,
632 unsigned char *buf, int max_bytes)
633{
634 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 635 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
636 struct drm_connector *connector;
637 struct drm_connector_list_iter conn_iter;
638 struct amdgpu_dm_connector *aconnector;
639 int ret = 0;
640
641 *enabled = false;
642
643 mutex_lock(&adev->dm.audio_lock);
644
645 drm_connector_list_iter_begin(dev, &conn_iter);
646 drm_for_each_connector_iter(connector, &conn_iter) {
647 aconnector = to_amdgpu_dm_connector(connector);
648 if (aconnector->audio_inst != port)
649 continue;
650
651 *enabled = true;
652 ret = drm_eld_size(connector->eld);
653 memcpy(buf, connector->eld, min(max_bytes, ret));
654
655 break;
656 }
657 drm_connector_list_iter_end(&conn_iter);
658
659 mutex_unlock(&adev->dm.audio_lock);
660
661 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662
663 return ret;
664}
665
666static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667 .get_eld = amdgpu_dm_audio_component_get_eld,
668};
669
670static int amdgpu_dm_audio_component_bind(struct device *kdev,
671 struct device *hda_kdev, void *data)
672{
673 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 674 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
675 struct drm_audio_component *acomp = data;
676
677 acomp->ops = &amdgpu_dm_audio_component_ops;
678 acomp->dev = kdev;
679 adev->dm.audio_component = acomp;
680
681 return 0;
682}
683
684static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685 struct device *hda_kdev, void *data)
686{
687 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 688 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
689 struct drm_audio_component *acomp = data;
690
691 acomp->ops = NULL;
692 acomp->dev = NULL;
693 adev->dm.audio_component = NULL;
694}
695
696static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697 .bind = amdgpu_dm_audio_component_bind,
698 .unbind = amdgpu_dm_audio_component_unbind,
699};
700
701static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702{
703 int i, ret;
704
705 if (!amdgpu_audio)
706 return 0;
707
708 adev->mode_info.audio.enabled = true;
709
710 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711
712 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713 adev->mode_info.audio.pin[i].channels = -1;
714 adev->mode_info.audio.pin[i].rate = -1;
715 adev->mode_info.audio.pin[i].bits_per_sample = -1;
716 adev->mode_info.audio.pin[i].status_bits = 0;
717 adev->mode_info.audio.pin[i].category_code = 0;
718 adev->mode_info.audio.pin[i].connected = false;
719 adev->mode_info.audio.pin[i].id =
720 adev->dm.dc->res_pool->audios[i]->inst;
721 adev->mode_info.audio.pin[i].offset = 0;
722 }
723
724 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725 if (ret < 0)
726 return ret;
727
728 adev->dm.audio_registered = true;
729
730 return 0;
731}
732
733static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734{
735 if (!amdgpu_audio)
736 return;
737
738 if (!adev->mode_info.audio.enabled)
739 return;
740
741 if (adev->dm.audio_registered) {
742 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743 adev->dm.audio_registered = false;
744 }
745
746 /* TODO: Disable audio? */
747
748 adev->mode_info.audio.enabled = false;
749}
750
dfd84d90 751static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
752{
753 struct drm_audio_component *acomp = adev->dm.audio_component;
754
755 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757
758 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 pin, -1);
760 }
761}
762
743b9786
NK
763static int dm_dmub_hw_init(struct amdgpu_device *adev)
764{
743b9786
NK
765 const struct dmcub_firmware_header_v1_0 *hdr;
766 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 767 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
768 const struct firmware *dmub_fw = adev->dm.dmub_fw;
769 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
771 struct dmub_srv_hw_params hw_params;
772 enum dmub_status status;
773 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 774 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
775 bool has_hw_support;
776
777 if (!dmub_srv)
778 /* DMUB isn't supported on the ASIC. */
779 return 0;
780
8c7aea40
NK
781 if (!fb_info) {
782 DRM_ERROR("No framebuffer info for DMUB service.\n");
783 return -EINVAL;
784 }
785
743b9786
NK
786 if (!dmub_fw) {
787 /* Firmware required for DMUB support. */
788 DRM_ERROR("No firmware provided for DMUB.\n");
789 return -EINVAL;
790 }
791
792 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793 if (status != DMUB_STATUS_OK) {
794 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795 return -EINVAL;
796 }
797
798 if (!has_hw_support) {
799 DRM_INFO("DMUB unsupported on ASIC\n");
800 return 0;
801 }
802
803 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804
743b9786
NK
805 fw_inst_const = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 807 PSP_HEADER_BYTES;
743b9786
NK
808
809 fw_bss_data = dmub_fw->data +
810 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811 le32_to_cpu(hdr->inst_const_bytes);
812
813 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
814 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816
817 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818
ddde28a5
HW
819 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820 * amdgpu_ucode_init_single_fw will load dmub firmware
821 * fw_inst_const part to cw0; otherwise, the firmware back door load
822 * will be done by dm_dmub_hw_init
823 */
824 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
826 fw_inst_const_size);
827 }
828
a576b345
NK
829 if (fw_bss_data_size)
830 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
832
833 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
834 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 adev->bios_size);
836
837 /* Reset regions that need to be reset. */
838 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840
841 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843
844 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
846
847 /* Initialize hardware. */
848 memset(&hw_params, 0, sizeof(hw_params));
849 hw_params.fb_base = adev->gmc.fb_start;
850 hw_params.fb_offset = adev->gmc.aper_base;
851
31a7f4bb
HW
852 /* backdoor load firmware and trigger dmub running */
853 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854 hw_params.load_inst_const = true;
855
743b9786
NK
856 if (dmcu)
857 hw_params.psp_version = dmcu->psp_version;
858
8c7aea40
NK
859 for (i = 0; i < fb_info->num_fb; ++i)
860 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
861
862 status = dmub_srv_hw_init(dmub_srv, &hw_params);
863 if (status != DMUB_STATUS_OK) {
864 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
865 return -EINVAL;
866 }
867
868 /* Wait for firmware load to finish. */
869 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870 if (status != DMUB_STATUS_OK)
871 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872
873 /* Init DMCU and ABM if available. */
874 if (dmcu && abm) {
875 dmcu->funcs->dmcu_init(dmcu);
876 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 }
878
9a71c7d3
NK
879 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880 if (!adev->dm.dc->ctx->dmub_srv) {
881 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
882 return -ENOMEM;
883 }
884
743b9786
NK
885 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886 adev->dm.dmcub_fw_version);
887
888 return 0;
889}
890
e6cd859d 891#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 892static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 893{
c0fb85ae
YZ
894 uint64_t pt_base;
895 uint32_t logical_addr_low;
896 uint32_t logical_addr_high;
897 uint32_t agp_base, agp_bot, agp_top;
898 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 899
c0fb85ae
YZ
900 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 902
c0fb85ae
YZ
903 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
904 /*
905 * Raven2 has a HW issue that it is unable to use the vram which
906 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907 * workaround that increase system aperture high address (add 1)
908 * to get rid of the VM fault and hardware hang.
909 */
910 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
911 else
912 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 913
c0fb85ae
YZ
914 agp_base = 0;
915 agp_bot = adev->gmc.agp_start >> 24;
916 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 917
c44a22b3 918
c0fb85ae
YZ
919 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 925
c0fb85ae
YZ
926 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
928
929 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
932
933 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
936
937 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
940
941 pa_config->is_hvm_enabled = 0;
c44a22b3 942
c44a22b3 943}
e6cd859d 944#endif
c44a22b3 945
c920888c
WL
946#ifdef CONFIG_DEBUG_FS
947static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
948{
949 dm->crc_win_x_start_property =
950 drm_property_create_range(adev_to_drm(dm->adev),
951 DRM_MODE_PROP_ATOMIC,
952 "AMD_CRC_WIN_X_START", 0, U16_MAX);
953 if (!dm->crc_win_x_start_property)
954 return -ENOMEM;
955
956 dm->crc_win_y_start_property =
957 drm_property_create_range(adev_to_drm(dm->adev),
958 DRM_MODE_PROP_ATOMIC,
959 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
960 if (!dm->crc_win_y_start_property)
961 return -ENOMEM;
962
963 dm->crc_win_x_end_property =
964 drm_property_create_range(adev_to_drm(dm->adev),
965 DRM_MODE_PROP_ATOMIC,
966 "AMD_CRC_WIN_X_END", 0, U16_MAX);
967 if (!dm->crc_win_x_end_property)
968 return -ENOMEM;
969
970 dm->crc_win_y_end_property =
971 drm_property_create_range(adev_to_drm(dm->adev),
972 DRM_MODE_PROP_ATOMIC,
973 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
974 if (!dm->crc_win_y_end_property)
975 return -ENOMEM;
976
977 return 0;
978}
979#endif
980
7578ecda 981static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
982{
983 struct dc_init_data init_data;
52704fca
BL
984#ifdef CONFIG_DRM_AMD_DC_HDCP
985 struct dc_callback_init init_params;
986#endif
743b9786 987 int r;
52704fca 988
4a580877 989 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
990 adev->dm.adev = adev;
991
4562236b
HW
992 /* Zero all the fields */
993 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
994#ifdef CONFIG_DRM_AMD_DC_HDCP
995 memset(&init_params, 0, sizeof(init_params));
996#endif
4562236b 997
674e78ac 998 mutex_init(&adev->dm.dc_lock);
6ce8f316 999 mutex_init(&adev->dm.audio_lock);
674e78ac 1000
4562236b
HW
1001 if(amdgpu_dm_irq_init(adev)) {
1002 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1003 goto error;
1004 }
1005
1006 init_data.asic_id.chip_family = adev->family;
1007
2dc31ca1 1008 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1009 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1010
770d13b1 1011 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1012 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1013 init_data.asic_id.atombios_base_address =
1014 adev->mode_info.atom_context->bios;
1015
1016 init_data.driver = adev;
1017
1018 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1019
1020 if (!adev->dm.cgs_device) {
1021 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1022 goto error;
1023 }
1024
1025 init_data.cgs_device = adev->dm.cgs_device;
1026
4562236b
HW
1027 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1028
60fb100b
AD
1029 switch (adev->asic_type) {
1030 case CHIP_CARRIZO:
1031 case CHIP_STONEY:
1032 case CHIP_RAVEN:
fe3db437 1033 case CHIP_RENOIR:
6e227308 1034 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1035 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1036 init_data.flags.disable_dmcu = true;
60fb100b
AD
1037 break;
1038 default:
1039 break;
1040 }
6e227308 1041
04b94af4
AD
1042 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043 init_data.flags.fbc_support = true;
1044
d99f38ae
AD
1045 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046 init_data.flags.multi_mon_pp_mclk_switch = true;
1047
eaf56410
LL
1048 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049 init_data.flags.disable_fractional_pwm = true;
1050
27eaa492 1051 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1052
48321c3d 1053 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 1054
4562236b
HW
1055 /* Display Core create. */
1056 adev->dm.dc = dc_create(&init_data);
1057
423788c7 1058 if (adev->dm.dc) {
76121231 1059 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1060 } else {
76121231 1061 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1062 goto error;
1063 }
4562236b 1064
8a791dab
HW
1065 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068 }
1069
f99d8762
HW
1070 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072
8a791dab
HW
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074 adev->dm.dc->debug.disable_stutter = true;
1075
1076 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077 adev->dm.dc->debug.disable_dsc = true;
1078
1079 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080 adev->dm.dc->debug.disable_clock_gate = true;
1081
743b9786
NK
1082 r = dm_dmub_hw_init(adev);
1083 if (r) {
1084 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085 goto error;
1086 }
1087
bb6785c1
NK
1088 dc_hardware_init(adev->dm.dc);
1089
0b08c54b
YZ
1090#if defined(CONFIG_DRM_AMD_DC_DCN)
1091 if (adev->asic_type == CHIP_RENOIR) {
e6cd859d
AD
1092 struct dc_phy_addr_space_config pa_config;
1093
0b08c54b 1094 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1095
0b08c54b
YZ
1096 // Call the DC init_memory func
1097 dc_setup_system_context(adev->dm.dc, &pa_config);
1098 }
1099#endif
c0fb85ae 1100
4562236b
HW
1101 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102 if (!adev->dm.freesync_module) {
1103 DRM_ERROR(
1104 "amdgpu: failed to initialize freesync_module.\n");
1105 } else
f1ad2f5e 1106 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1107 adev->dm.freesync_module);
1108
e277adc5
LSL
1109 amdgpu_dm_init_color_mod();
1110
52704fca 1111#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 1112 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 1113 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1114
96a3b32e
BL
1115 if (!adev->dm.hdcp_workqueue)
1116 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117 else
1118 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1119
96a3b32e
BL
1120 dc_init_callbacks(adev->dm.dc, &init_params);
1121 }
c920888c
WL
1122#endif
1123#ifdef CONFIG_DEBUG_FS
1124 if (create_crtc_crc_properties(&adev->dm))
1125 DRM_ERROR("amdgpu: failed to create crc property.\n");
52704fca 1126#endif
4562236b
HW
1127 if (amdgpu_dm_initialize_drm_device(adev)) {
1128 DRM_ERROR(
1129 "amdgpu: failed to initialize sw for display support.\n");
1130 goto error;
1131 }
1132
1133 /* Update the actual used number of crtc */
1134 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1135
f74367e4
AD
1136 /* create fake encoders for MST */
1137 dm_dp_create_fake_mst_encoders(adev);
1138
4562236b
HW
1139 /* TODO: Add_display_info? */
1140
1141 /* TODO use dynamic cursor width */
4a580877
LT
1142 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1143 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1144
4a580877 1145 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1146 DRM_ERROR(
1147 "amdgpu: failed to initialize sw for display support.\n");
1148 goto error;
1149 }
1150
c0fb85ae 1151
f1ad2f5e 1152 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1153
1154 return 0;
1155error:
1156 amdgpu_dm_fini(adev);
1157
59d0f396 1158 return -EINVAL;
4562236b
HW
1159}
1160
7578ecda 1161static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1162{
f74367e4
AD
1163 int i;
1164
1165 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1166 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 }
1168
6ce8f316
NK
1169 amdgpu_dm_audio_fini(adev);
1170
4562236b 1171 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1172
52704fca
BL
1173#ifdef CONFIG_DRM_AMD_DC_HDCP
1174 if (adev->dm.hdcp_workqueue) {
1175 hdcp_destroy(adev->dm.hdcp_workqueue);
1176 adev->dm.hdcp_workqueue = NULL;
1177 }
1178
1179 if (adev->dm.dc)
1180 dc_deinit_callbacks(adev->dm.dc);
1181#endif
9a71c7d3
NK
1182 if (adev->dm.dc->ctx->dmub_srv) {
1183 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1184 adev->dm.dc->ctx->dmub_srv = NULL;
1185 }
1186
743b9786
NK
1187 if (adev->dm.dmub_bo)
1188 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1189 &adev->dm.dmub_bo_gpu_addr,
1190 &adev->dm.dmub_bo_cpu_addr);
52704fca 1191
c8bdf2b6
ED
1192 /* DC Destroy TODO: Replace destroy DAL */
1193 if (adev->dm.dc)
1194 dc_destroy(&adev->dm.dc);
4562236b
HW
1195 /*
1196 * TODO: pageflip, vlank interrupt
1197 *
1198 * amdgpu_dm_irq_fini(adev);
1199 */
1200
1201 if (adev->dm.cgs_device) {
1202 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1203 adev->dm.cgs_device = NULL;
1204 }
1205 if (adev->dm.freesync_module) {
1206 mod_freesync_destroy(adev->dm.freesync_module);
1207 adev->dm.freesync_module = NULL;
1208 }
674e78ac 1209
6ce8f316 1210 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1211 mutex_destroy(&adev->dm.dc_lock);
1212
4562236b
HW
1213 return;
1214}
1215
a94d5569 1216static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1217{
a7669aff 1218 const char *fw_name_dmcu = NULL;
a94d5569
DF
1219 int r;
1220 const struct dmcu_firmware_header_v1_0 *hdr;
1221
1222 switch(adev->asic_type) {
55e56389
MR
1223#if defined(CONFIG_DRM_AMD_DC_SI)
1224 case CHIP_TAHITI:
1225 case CHIP_PITCAIRN:
1226 case CHIP_VERDE:
1227 case CHIP_OLAND:
1228#endif
a94d5569
DF
1229 case CHIP_BONAIRE:
1230 case CHIP_HAWAII:
1231 case CHIP_KAVERI:
1232 case CHIP_KABINI:
1233 case CHIP_MULLINS:
1234 case CHIP_TONGA:
1235 case CHIP_FIJI:
1236 case CHIP_CARRIZO:
1237 case CHIP_STONEY:
1238 case CHIP_POLARIS11:
1239 case CHIP_POLARIS10:
1240 case CHIP_POLARIS12:
1241 case CHIP_VEGAM:
1242 case CHIP_VEGA10:
1243 case CHIP_VEGA12:
1244 case CHIP_VEGA20:
476e955d 1245 case CHIP_NAVI10:
baebcf2e 1246 case CHIP_NAVI14:
30221ad8 1247 case CHIP_RENOIR:
79037324 1248 case CHIP_SIENNA_CICHLID:
a6c5308f 1249 case CHIP_NAVY_FLOUNDER:
2a411205 1250 case CHIP_DIMGREY_CAVEFISH:
469989ca 1251 case CHIP_VANGOGH:
a94d5569 1252 return 0;
5ea23931
RL
1253 case CHIP_NAVI12:
1254 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1255 break;
a94d5569 1256 case CHIP_RAVEN:
a7669aff
HW
1257 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1258 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1259 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1260 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1261 else
a7669aff 1262 return 0;
a94d5569
DF
1263 break;
1264 default:
1265 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1266 return -EINVAL;
a94d5569
DF
1267 }
1268
1269 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1270 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1271 return 0;
1272 }
1273
1274 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1275 if (r == -ENOENT) {
1276 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1277 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1278 adev->dm.fw_dmcu = NULL;
1279 return 0;
1280 }
1281 if (r) {
1282 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1283 fw_name_dmcu);
1284 return r;
1285 }
1286
1287 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1288 if (r) {
1289 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1290 fw_name_dmcu);
1291 release_firmware(adev->dm.fw_dmcu);
1292 adev->dm.fw_dmcu = NULL;
1293 return r;
1294 }
1295
1296 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1297 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1298 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1299 adev->firmware.fw_size +=
1300 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1301
1302 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1303 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1304 adev->firmware.fw_size +=
1305 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1306
ee6e89c0
DF
1307 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1308
a94d5569
DF
1309 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1310
4562236b
HW
1311 return 0;
1312}
1313
743b9786
NK
1314static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1315{
1316 struct amdgpu_device *adev = ctx;
1317
1318 return dm_read_reg(adev->dm.dc->ctx, address);
1319}
1320
1321static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1322 uint32_t value)
1323{
1324 struct amdgpu_device *adev = ctx;
1325
1326 return dm_write_reg(adev->dm.dc->ctx, address, value);
1327}
1328
1329static int dm_dmub_sw_init(struct amdgpu_device *adev)
1330{
1331 struct dmub_srv_create_params create_params;
8c7aea40
NK
1332 struct dmub_srv_region_params region_params;
1333 struct dmub_srv_region_info region_info;
1334 struct dmub_srv_fb_params fb_params;
1335 struct dmub_srv_fb_info *fb_info;
1336 struct dmub_srv *dmub_srv;
743b9786
NK
1337 const struct dmcub_firmware_header_v1_0 *hdr;
1338 const char *fw_name_dmub;
1339 enum dmub_asic dmub_asic;
1340 enum dmub_status status;
1341 int r;
1342
1343 switch (adev->asic_type) {
1344 case CHIP_RENOIR:
1345 dmub_asic = DMUB_ASIC_DCN21;
1346 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1347 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1348 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1349 break;
79037324
BL
1350 case CHIP_SIENNA_CICHLID:
1351 dmub_asic = DMUB_ASIC_DCN30;
1352 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1353 break;
5ce868fc
BL
1354 case CHIP_NAVY_FLOUNDER:
1355 dmub_asic = DMUB_ASIC_DCN30;
1356 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1357 break;
469989ca
RL
1358 case CHIP_VANGOGH:
1359 dmub_asic = DMUB_ASIC_DCN301;
1360 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1361 break;
2a411205
BL
1362 case CHIP_DIMGREY_CAVEFISH:
1363 dmub_asic = DMUB_ASIC_DCN302;
1364 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1365 break;
743b9786
NK
1366
1367 default:
1368 /* ASIC doesn't support DMUB. */
1369 return 0;
1370 }
1371
743b9786
NK
1372 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1373 if (r) {
1374 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1375 return 0;
1376 }
1377
1378 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1379 if (r) {
1380 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1381 return 0;
1382 }
1383
743b9786 1384 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1385
9a6ed547
NK
1386 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1387 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1388 AMDGPU_UCODE_ID_DMCUB;
1389 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1390 adev->dm.dmub_fw;
1391 adev->firmware.fw_size +=
1392 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1393
9a6ed547
NK
1394 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1395 adev->dm.dmcub_fw_version);
1396 }
1397
1398 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1399
8c7aea40
NK
1400 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1401 dmub_srv = adev->dm.dmub_srv;
1402
1403 if (!dmub_srv) {
1404 DRM_ERROR("Failed to allocate DMUB service!\n");
1405 return -ENOMEM;
1406 }
1407
1408 memset(&create_params, 0, sizeof(create_params));
1409 create_params.user_ctx = adev;
1410 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1411 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1412 create_params.asic = dmub_asic;
1413
1414 /* Create the DMUB service. */
1415 status = dmub_srv_create(dmub_srv, &create_params);
1416 if (status != DMUB_STATUS_OK) {
1417 DRM_ERROR("Error creating DMUB service: %d\n", status);
1418 return -EINVAL;
1419 }
1420
1421 /* Calculate the size of all the regions for the DMUB service. */
1422 memset(&region_params, 0, sizeof(region_params));
1423
1424 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1425 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1426 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1427 region_params.vbios_size = adev->bios_size;
0922b899 1428 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1429 adev->dm.dmub_fw->data +
1430 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1431 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1432 region_params.fw_inst_const =
1433 adev->dm.dmub_fw->data +
1434 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1435 PSP_HEADER_BYTES;
8c7aea40
NK
1436
1437 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1438 &region_info);
1439
1440 if (status != DMUB_STATUS_OK) {
1441 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1442 return -EINVAL;
1443 }
1444
1445 /*
1446 * Allocate a framebuffer based on the total size of all the regions.
1447 * TODO: Move this into GART.
1448 */
1449 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1450 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1451 &adev->dm.dmub_bo_gpu_addr,
1452 &adev->dm.dmub_bo_cpu_addr);
1453 if (r)
1454 return r;
1455
1456 /* Rebase the regions on the framebuffer address. */
1457 memset(&fb_params, 0, sizeof(fb_params));
1458 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1459 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1460 fb_params.region_info = &region_info;
1461
1462 adev->dm.dmub_fb_info =
1463 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1464 fb_info = adev->dm.dmub_fb_info;
1465
1466 if (!fb_info) {
1467 DRM_ERROR(
1468 "Failed to allocate framebuffer info for DMUB service!\n");
1469 return -ENOMEM;
1470 }
1471
1472 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1473 if (status != DMUB_STATUS_OK) {
1474 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1475 return -EINVAL;
1476 }
1477
743b9786
NK
1478 return 0;
1479}
1480
a94d5569
DF
1481static int dm_sw_init(void *handle)
1482{
1483 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1484 int r;
1485
1486 r = dm_dmub_sw_init(adev);
1487 if (r)
1488 return r;
a94d5569
DF
1489
1490 return load_dmcu_fw(adev);
1491}
1492
4562236b
HW
1493static int dm_sw_fini(void *handle)
1494{
a94d5569
DF
1495 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496
8c7aea40
NK
1497 kfree(adev->dm.dmub_fb_info);
1498 adev->dm.dmub_fb_info = NULL;
1499
743b9786
NK
1500 if (adev->dm.dmub_srv) {
1501 dmub_srv_destroy(adev->dm.dmub_srv);
1502 adev->dm.dmub_srv = NULL;
1503 }
1504
75e1658e
ND
1505 release_firmware(adev->dm.dmub_fw);
1506 adev->dm.dmub_fw = NULL;
743b9786 1507
75e1658e
ND
1508 release_firmware(adev->dm.fw_dmcu);
1509 adev->dm.fw_dmcu = NULL;
a94d5569 1510
4562236b
HW
1511 return 0;
1512}
1513
7abcf6b5 1514static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1515{
c84dec2f 1516 struct amdgpu_dm_connector *aconnector;
4562236b 1517 struct drm_connector *connector;
f8d2d39e 1518 struct drm_connector_list_iter iter;
7abcf6b5 1519 int ret = 0;
4562236b 1520
f8d2d39e
LP
1521 drm_connector_list_iter_begin(dev, &iter);
1522 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1523 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1524 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1525 aconnector->mst_mgr.aux) {
f1ad2f5e 1526 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1527 aconnector,
1528 aconnector->base.base.id);
7abcf6b5
AG
1529
1530 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1531 if (ret < 0) {
1532 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1533 aconnector->dc_link->type =
1534 dc_connection_single;
1535 break;
7abcf6b5 1536 }
f8d2d39e 1537 }
4562236b 1538 }
f8d2d39e 1539 drm_connector_list_iter_end(&iter);
4562236b 1540
7abcf6b5
AG
1541 return ret;
1542}
1543
1544static int dm_late_init(void *handle)
1545{
42e67c3b 1546 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1547
bbf854dc
DF
1548 struct dmcu_iram_parameters params;
1549 unsigned int linear_lut[16];
1550 int i;
17bdb4a8 1551 struct dmcu *dmcu = NULL;
5cb32419 1552 bool ret = true;
bbf854dc 1553
17bdb4a8
JFZ
1554 dmcu = adev->dm.dc->res_pool->dmcu;
1555
bbf854dc
DF
1556 for (i = 0; i < 16; i++)
1557 linear_lut[i] = 0xFFFF * i / 15;
1558
1559 params.set = 0;
1560 params.backlight_ramping_start = 0xCCCC;
1561 params.backlight_ramping_reduction = 0xCCCCCCCC;
1562 params.backlight_lut_array_size = 16;
1563 params.backlight_lut_array = linear_lut;
1564
2ad0cdf9
AK
1565 /* Min backlight level after ABM reduction, Don't allow below 1%
1566 * 0xFFFF x 0.01 = 0x28F
1567 */
1568 params.min_abm_backlight = 0x28F;
1569
5cb32419
RL
1570 /* In the case where abm is implemented on dmcub,
1571 * dmcu object will be null.
1572 * ABM 2.4 and up are implemented on dmcub.
1573 */
1574 if (dmcu)
1575 ret = dmcu_load_iram(dmcu, params);
1576 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1577 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1578
14ed1c90
HW
1579 if (!ret)
1580 return -EINVAL;
bbf854dc 1581
4a580877 1582 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1583}
1584
1585static void s3_handle_mst(struct drm_device *dev, bool suspend)
1586{
c84dec2f 1587 struct amdgpu_dm_connector *aconnector;
4562236b 1588 struct drm_connector *connector;
f8d2d39e 1589 struct drm_connector_list_iter iter;
fe7553be
LP
1590 struct drm_dp_mst_topology_mgr *mgr;
1591 int ret;
1592 bool need_hotplug = false;
4562236b 1593
f8d2d39e
LP
1594 drm_connector_list_iter_begin(dev, &iter);
1595 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1596 aconnector = to_amdgpu_dm_connector(connector);
1597 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1598 aconnector->mst_port)
1599 continue;
1600
1601 mgr = &aconnector->mst_mgr;
1602
1603 if (suspend) {
1604 drm_dp_mst_topology_mgr_suspend(mgr);
1605 } else {
6f85f738 1606 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1607 if (ret < 0) {
1608 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1609 need_hotplug = true;
1610 }
1611 }
4562236b 1612 }
f8d2d39e 1613 drm_connector_list_iter_end(&iter);
fe7553be
LP
1614
1615 if (need_hotplug)
1616 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1617}
1618
9340dfd3
HW
1619static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1620{
1621 struct smu_context *smu = &adev->smu;
1622 int ret = 0;
1623
1624 if (!is_support_sw_smu(adev))
1625 return 0;
1626
1627 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1628 * on window driver dc implementation.
1629 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1630 * should be passed to smu during boot up and resume from s3.
1631 * boot up: dc calculate dcn watermark clock settings within dc_create,
1632 * dcn20_resource_construct
1633 * then call pplib functions below to pass the settings to smu:
1634 * smu_set_watermarks_for_clock_ranges
1635 * smu_set_watermarks_table
1636 * navi10_set_watermarks_table
1637 * smu_write_watermarks_table
1638 *
1639 * For Renoir, clock settings of dcn watermark are also fixed values.
1640 * dc has implemented different flow for window driver:
1641 * dc_hardware_init / dc_set_power_state
1642 * dcn10_init_hw
1643 * notify_wm_ranges
1644 * set_wm_ranges
1645 * -- Linux
1646 * smu_set_watermarks_for_clock_ranges
1647 * renoir_set_watermarks_table
1648 * smu_write_watermarks_table
1649 *
1650 * For Linux,
1651 * dc_hardware_init -> amdgpu_dm_init
1652 * dc_set_power_state --> dm_resume
1653 *
1654 * therefore, this function apply to navi10/12/14 but not Renoir
1655 * *
1656 */
1657 switch(adev->asic_type) {
1658 case CHIP_NAVI10:
1659 case CHIP_NAVI14:
1660 case CHIP_NAVI12:
1661 break;
1662 default:
1663 return 0;
1664 }
1665
e7a95eea
EQ
1666 ret = smu_write_watermarks_table(smu);
1667 if (ret) {
1668 DRM_ERROR("Failed to update WMTABLE!\n");
1669 return ret;
9340dfd3
HW
1670 }
1671
9340dfd3
HW
1672 return 0;
1673}
1674
b8592b48
LL
1675/**
1676 * dm_hw_init() - Initialize DC device
28d687ea 1677 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1678 *
1679 * Initialize the &struct amdgpu_display_manager device. This involves calling
1680 * the initializers of each DM component, then populating the struct with them.
1681 *
1682 * Although the function implies hardware initialization, both hardware and
1683 * software are initialized here. Splitting them out to their relevant init
1684 * hooks is a future TODO item.
1685 *
1686 * Some notable things that are initialized here:
1687 *
1688 * - Display Core, both software and hardware
1689 * - DC modules that we need (freesync and color management)
1690 * - DRM software states
1691 * - Interrupt sources and handlers
1692 * - Vblank support
1693 * - Debug FS entries, if enabled
1694 */
4562236b
HW
1695static int dm_hw_init(void *handle)
1696{
1697 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1698 /* Create DAL display manager */
1699 amdgpu_dm_init(adev);
4562236b
HW
1700 amdgpu_dm_hpd_init(adev);
1701
4562236b
HW
1702 return 0;
1703}
1704
b8592b48
LL
1705/**
1706 * dm_hw_fini() - Teardown DC device
28d687ea 1707 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1708 *
1709 * Teardown components within &struct amdgpu_display_manager that require
1710 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1711 * were loaded. Also flush IRQ workqueues and disable them.
1712 */
4562236b
HW
1713static int dm_hw_fini(void *handle)
1714{
1715 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716
1717 amdgpu_dm_hpd_fini(adev);
1718
1719 amdgpu_dm_irq_fini(adev);
21de3396 1720 amdgpu_dm_fini(adev);
4562236b
HW
1721 return 0;
1722}
1723
cdaae837
BL
1724
1725static int dm_enable_vblank(struct drm_crtc *crtc);
1726static void dm_disable_vblank(struct drm_crtc *crtc);
1727
1728static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1729 struct dc_state *state, bool enable)
1730{
1731 enum dc_irq_source irq_source;
1732 struct amdgpu_crtc *acrtc;
1733 int rc = -EBUSY;
1734 int i = 0;
1735
1736 for (i = 0; i < state->stream_count; i++) {
1737 acrtc = get_crtc_by_otg_inst(
1738 adev, state->stream_status[i].primary_otg_inst);
1739
1740 if (acrtc && state->stream_status[i].plane_count != 0) {
1741 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1742 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1743 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1744 acrtc->crtc_id, enable ? "en" : "dis", rc);
1745 if (rc)
1746 DRM_WARN("Failed to %s pflip interrupts\n",
1747 enable ? "enable" : "disable");
1748
1749 if (enable) {
1750 rc = dm_enable_vblank(&acrtc->base);
1751 if (rc)
1752 DRM_WARN("Failed to enable vblank interrupts\n");
1753 } else {
1754 dm_disable_vblank(&acrtc->base);
1755 }
1756
1757 }
1758 }
1759
1760}
1761
dfd84d90 1762static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1763{
1764 struct dc_state *context = NULL;
1765 enum dc_status res = DC_ERROR_UNEXPECTED;
1766 int i;
1767 struct dc_stream_state *del_streams[MAX_PIPES];
1768 int del_streams_count = 0;
1769
1770 memset(del_streams, 0, sizeof(del_streams));
1771
1772 context = dc_create_state(dc);
1773 if (context == NULL)
1774 goto context_alloc_fail;
1775
1776 dc_resource_state_copy_construct_current(dc, context);
1777
1778 /* First remove from context all streams */
1779 for (i = 0; i < context->stream_count; i++) {
1780 struct dc_stream_state *stream = context->streams[i];
1781
1782 del_streams[del_streams_count++] = stream;
1783 }
1784
1785 /* Remove all planes for removed streams and then remove the streams */
1786 for (i = 0; i < del_streams_count; i++) {
1787 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1788 res = DC_FAIL_DETACH_SURFACES;
1789 goto fail;
1790 }
1791
1792 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1793 if (res != DC_OK)
1794 goto fail;
1795 }
1796
1797
1798 res = dc_validate_global_state(dc, context, false);
1799
1800 if (res != DC_OK) {
1801 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1802 goto fail;
1803 }
1804
1805 res = dc_commit_state(dc, context);
1806
1807fail:
1808 dc_release_state(context);
1809
1810context_alloc_fail:
1811 return res;
1812}
1813
4562236b
HW
1814static int dm_suspend(void *handle)
1815{
1816 struct amdgpu_device *adev = handle;
1817 struct amdgpu_display_manager *dm = &adev->dm;
1818 int ret = 0;
4562236b 1819
53b3f8f4 1820 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1821 mutex_lock(&dm->dc_lock);
1822 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1823
1824 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1825
1826 amdgpu_dm_commit_zero_streams(dm->dc);
1827
1828 amdgpu_dm_irq_suspend(adev);
1829
1830 return ret;
1831 }
4562236b 1832
d2f0b53b 1833 WARN_ON(adev->dm.cached_state);
4a580877 1834 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1835
4a580877 1836 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1837
4562236b
HW
1838 amdgpu_dm_irq_suspend(adev);
1839
a3621485 1840
32f5062d 1841 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1842
1c2075d4 1843 return 0;
4562236b
HW
1844}
1845
1daf8c63
AD
1846static struct amdgpu_dm_connector *
1847amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1848 struct drm_crtc *crtc)
4562236b
HW
1849{
1850 uint32_t i;
c2cea706 1851 struct drm_connector_state *new_con_state;
4562236b
HW
1852 struct drm_connector *connector;
1853 struct drm_crtc *crtc_from_state;
1854
c2cea706
LSL
1855 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1856 crtc_from_state = new_con_state->crtc;
4562236b
HW
1857
1858 if (crtc_from_state == crtc)
c84dec2f 1859 return to_amdgpu_dm_connector(connector);
4562236b
HW
1860 }
1861
1862 return NULL;
1863}
1864
fbbdadf2
BL
1865static void emulated_link_detect(struct dc_link *link)
1866{
1867 struct dc_sink_init_data sink_init_data = { 0 };
1868 struct display_sink_capability sink_caps = { 0 };
1869 enum dc_edid_status edid_status;
1870 struct dc_context *dc_ctx = link->ctx;
1871 struct dc_sink *sink = NULL;
1872 struct dc_sink *prev_sink = NULL;
1873
1874 link->type = dc_connection_none;
1875 prev_sink = link->local_sink;
1876
1877 if (prev_sink != NULL)
1878 dc_sink_retain(prev_sink);
1879
1880 switch (link->connector_signal) {
1881 case SIGNAL_TYPE_HDMI_TYPE_A: {
1882 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1883 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1884 break;
1885 }
1886
1887 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1888 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1889 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1890 break;
1891 }
1892
1893 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1894 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1895 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1896 break;
1897 }
1898
1899 case SIGNAL_TYPE_LVDS: {
1900 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1901 sink_caps.signal = SIGNAL_TYPE_LVDS;
1902 break;
1903 }
1904
1905 case SIGNAL_TYPE_EDP: {
1906 sink_caps.transaction_type =
1907 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1908 sink_caps.signal = SIGNAL_TYPE_EDP;
1909 break;
1910 }
1911
1912 case SIGNAL_TYPE_DISPLAY_PORT: {
1913 sink_caps.transaction_type =
1914 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1915 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1916 break;
1917 }
1918
1919 default:
1920 DC_ERROR("Invalid connector type! signal:%d\n",
1921 link->connector_signal);
1922 return;
1923 }
1924
1925 sink_init_data.link = link;
1926 sink_init_data.sink_signal = sink_caps.signal;
1927
1928 sink = dc_sink_create(&sink_init_data);
1929 if (!sink) {
1930 DC_ERROR("Failed to create sink!\n");
1931 return;
1932 }
1933
dcd5fb82 1934 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1935 link->local_sink = sink;
1936
1937 edid_status = dm_helpers_read_local_edid(
1938 link->ctx,
1939 link,
1940 sink);
1941
1942 if (edid_status != EDID_OK)
1943 DC_ERROR("Failed to read EDID");
1944
1945}
1946
cdaae837
BL
1947static void dm_gpureset_commit_state(struct dc_state *dc_state,
1948 struct amdgpu_display_manager *dm)
1949{
1950 struct {
1951 struct dc_surface_update surface_updates[MAX_SURFACES];
1952 struct dc_plane_info plane_infos[MAX_SURFACES];
1953 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1954 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1955 struct dc_stream_update stream_update;
1956 } * bundle;
1957 int k, m;
1958
1959 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1960
1961 if (!bundle) {
1962 dm_error("Failed to allocate update bundle\n");
1963 goto cleanup;
1964 }
1965
1966 for (k = 0; k < dc_state->stream_count; k++) {
1967 bundle->stream_update.stream = dc_state->streams[k];
1968
1969 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1970 bundle->surface_updates[m].surface =
1971 dc_state->stream_status->plane_states[m];
1972 bundle->surface_updates[m].surface->force_full_update =
1973 true;
1974 }
1975 dc_commit_updates_for_stream(
1976 dm->dc, bundle->surface_updates,
1977 dc_state->stream_status->plane_count,
1978 dc_state->streams[k], &bundle->stream_update, dc_state);
1979 }
1980
1981cleanup:
1982 kfree(bundle);
1983
1984 return;
1985}
1986
4562236b
HW
1987static int dm_resume(void *handle)
1988{
1989 struct amdgpu_device *adev = handle;
4a580877 1990 struct drm_device *ddev = adev_to_drm(adev);
4562236b 1991 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1992 struct amdgpu_dm_connector *aconnector;
4562236b 1993 struct drm_connector *connector;
f8d2d39e 1994 struct drm_connector_list_iter iter;
4562236b 1995 struct drm_crtc *crtc;
c2cea706 1996 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1997 struct dm_crtc_state *dm_new_crtc_state;
1998 struct drm_plane *plane;
1999 struct drm_plane_state *new_plane_state;
2000 struct dm_plane_state *dm_new_plane_state;
113b7a01 2001 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2002 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2003 struct dc_state *dc_state;
2004 int i, r, j;
4562236b 2005
53b3f8f4 2006 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2007 dc_state = dm->cached_dc_state;
2008
2009 r = dm_dmub_hw_init(adev);
2010 if (r)
2011 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2012
2013 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2014 dc_resume(dm->dc);
2015
2016 amdgpu_dm_irq_resume_early(adev);
2017
2018 for (i = 0; i < dc_state->stream_count; i++) {
2019 dc_state->streams[i]->mode_changed = true;
2020 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2021 dc_state->stream_status->plane_states[j]->update_flags.raw
2022 = 0xffffffff;
2023 }
2024 }
2025
2026 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2027
cdaae837
BL
2028 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2029
2030 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2031
2032 dc_release_state(dm->cached_dc_state);
2033 dm->cached_dc_state = NULL;
2034
2035 amdgpu_dm_irq_resume_late(adev);
2036
2037 mutex_unlock(&dm->dc_lock);
2038
2039 return 0;
2040 }
113b7a01
LL
2041 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2042 dc_release_state(dm_state->context);
2043 dm_state->context = dc_create_state(dm->dc);
2044 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2045 dc_resource_state_construct(dm->dc, dm_state->context);
2046
8c7aea40
NK
2047 /* Before powering on DC we need to re-initialize DMUB. */
2048 r = dm_dmub_hw_init(adev);
2049 if (r)
2050 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2051
a80aa93d
ML
2052 /* power on hardware */
2053 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2054
4562236b
HW
2055 /* program HPD filter */
2056 dc_resume(dm->dc);
2057
4562236b
HW
2058 /*
2059 * early enable HPD Rx IRQ, should be done before set mode as short
2060 * pulse interrupts are used for MST
2061 */
2062 amdgpu_dm_irq_resume_early(adev);
2063
d20ebea8 2064 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2065 s3_handle_mst(ddev, false);
2066
4562236b 2067 /* Do detection*/
f8d2d39e
LP
2068 drm_connector_list_iter_begin(ddev, &iter);
2069 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2070 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2071
2072 /*
2073 * this is the case when traversing through already created
2074 * MST connectors, should be skipped
2075 */
2076 if (aconnector->mst_port)
2077 continue;
2078
03ea364c 2079 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2080 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2081 DRM_ERROR("KMS: Failed to detect connector\n");
2082
2083 if (aconnector->base.force && new_connection_type == dc_connection_none)
2084 emulated_link_detect(aconnector->dc_link);
2085 else
2086 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2087
2088 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2089 aconnector->fake_enable = false;
2090
dcd5fb82
MF
2091 if (aconnector->dc_sink)
2092 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2093 aconnector->dc_sink = NULL;
2094 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2095 mutex_unlock(&aconnector->hpd_lock);
4562236b 2096 }
f8d2d39e 2097 drm_connector_list_iter_end(&iter);
4562236b 2098
1f6010a9 2099 /* Force mode set in atomic commit */
a80aa93d 2100 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2101 new_crtc_state->active_changed = true;
4f346e65 2102
fcb4019e
LSL
2103 /*
2104 * atomic_check is expected to create the dc states. We need to release
2105 * them here, since they were duplicated as part of the suspend
2106 * procedure.
2107 */
a80aa93d 2108 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2109 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2110 if (dm_new_crtc_state->stream) {
2111 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2112 dc_stream_release(dm_new_crtc_state->stream);
2113 dm_new_crtc_state->stream = NULL;
2114 }
2115 }
2116
a80aa93d 2117 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2118 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2119 if (dm_new_plane_state->dc_state) {
2120 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2121 dc_plane_state_release(dm_new_plane_state->dc_state);
2122 dm_new_plane_state->dc_state = NULL;
2123 }
2124 }
2125
2d1af6a1 2126 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2127
a80aa93d 2128 dm->cached_state = NULL;
0a214e2f 2129
9faa4237 2130 amdgpu_dm_irq_resume_late(adev);
4562236b 2131
9340dfd3
HW
2132 amdgpu_dm_smu_write_watermarks_table(adev);
2133
2d1af6a1 2134 return 0;
4562236b
HW
2135}
2136
b8592b48
LL
2137/**
2138 * DOC: DM Lifecycle
2139 *
2140 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2141 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2142 * the base driver's device list to be initialized and torn down accordingly.
2143 *
2144 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2145 */
2146
4562236b
HW
2147static const struct amd_ip_funcs amdgpu_dm_funcs = {
2148 .name = "dm",
2149 .early_init = dm_early_init,
7abcf6b5 2150 .late_init = dm_late_init,
4562236b
HW
2151 .sw_init = dm_sw_init,
2152 .sw_fini = dm_sw_fini,
2153 .hw_init = dm_hw_init,
2154 .hw_fini = dm_hw_fini,
2155 .suspend = dm_suspend,
2156 .resume = dm_resume,
2157 .is_idle = dm_is_idle,
2158 .wait_for_idle = dm_wait_for_idle,
2159 .check_soft_reset = dm_check_soft_reset,
2160 .soft_reset = dm_soft_reset,
2161 .set_clockgating_state = dm_set_clockgating_state,
2162 .set_powergating_state = dm_set_powergating_state,
2163};
2164
2165const struct amdgpu_ip_block_version dm_ip_block =
2166{
2167 .type = AMD_IP_BLOCK_TYPE_DCE,
2168 .major = 1,
2169 .minor = 0,
2170 .rev = 0,
2171 .funcs = &amdgpu_dm_funcs,
2172};
2173
ca3268c4 2174
b8592b48
LL
2175/**
2176 * DOC: atomic
2177 *
2178 * *WIP*
2179 */
0a323b84 2180
b3663f70 2181static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2182 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2183 .get_format_info = amd_get_format_info,
366c1baa 2184 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2185 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2186 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2187};
2188
2189static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2190 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2191};
2192
94562810
RS
2193static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2194{
2195 u32 max_cll, min_cll, max, min, q, r;
2196 struct amdgpu_dm_backlight_caps *caps;
2197 struct amdgpu_display_manager *dm;
2198 struct drm_connector *conn_base;
2199 struct amdgpu_device *adev;
ec11fe37 2200 struct dc_link *link = NULL;
94562810
RS
2201 static const u8 pre_computed_values[] = {
2202 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2203 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2204
2205 if (!aconnector || !aconnector->dc_link)
2206 return;
2207
ec11fe37 2208 link = aconnector->dc_link;
2209 if (link->connector_signal != SIGNAL_TYPE_EDP)
2210 return;
2211
94562810 2212 conn_base = &aconnector->base;
1348969a 2213 adev = drm_to_adev(conn_base->dev);
94562810
RS
2214 dm = &adev->dm;
2215 caps = &dm->backlight_caps;
2216 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2217 caps->aux_support = false;
2218 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2219 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2220
2221 if (caps->ext_caps->bits.oled == 1 ||
2222 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2223 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2224 caps->aux_support = true;
2225
2226 /* From the specification (CTA-861-G), for calculating the maximum
2227 * luminance we need to use:
2228 * Luminance = 50*2**(CV/32)
2229 * Where CV is a one-byte value.
2230 * For calculating this expression we may need float point precision;
2231 * to avoid this complexity level, we take advantage that CV is divided
2232 * by a constant. From the Euclids division algorithm, we know that CV
2233 * can be written as: CV = 32*q + r. Next, we replace CV in the
2234 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2235 * need to pre-compute the value of r/32. For pre-computing the values
2236 * We just used the following Ruby line:
2237 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2238 * The results of the above expressions can be verified at
2239 * pre_computed_values.
2240 */
2241 q = max_cll >> 5;
2242 r = max_cll % 32;
2243 max = (1 << q) * pre_computed_values[r];
2244
2245 // min luminance: maxLum * (CV/255)^2 / 100
2246 q = DIV_ROUND_CLOSEST(min_cll, 255);
2247 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2248
2249 caps->aux_max_input_signal = max;
2250 caps->aux_min_input_signal = min;
2251}
2252
97e51c16
HW
2253void amdgpu_dm_update_connector_after_detect(
2254 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2255{
2256 struct drm_connector *connector = &aconnector->base;
2257 struct drm_device *dev = connector->dev;
b73a22d3 2258 struct dc_sink *sink;
4562236b
HW
2259
2260 /* MST handled by drm_mst framework */
2261 if (aconnector->mst_mgr.mst_state == true)
2262 return;
2263
4562236b 2264 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2265 if (sink)
2266 dc_sink_retain(sink);
4562236b 2267
1f6010a9
DF
2268 /*
2269 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2270 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2271 * Skip if already done during boot.
4562236b
HW
2272 */
2273 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2274 && aconnector->dc_em_sink) {
2275
1f6010a9
DF
2276 /*
2277 * For S3 resume with headless use eml_sink to fake stream
2278 * because on resume connector->sink is set to NULL
4562236b
HW
2279 */
2280 mutex_lock(&dev->mode_config.mutex);
2281
2282 if (sink) {
922aa1e1 2283 if (aconnector->dc_sink) {
98e6436d 2284 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2285 /*
2286 * retain and release below are used to
2287 * bump up refcount for sink because the link doesn't point
2288 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2289 * reshuffle by UMD we will get into unwanted dc_sink release
2290 */
dcd5fb82 2291 dc_sink_release(aconnector->dc_sink);
922aa1e1 2292 }
4562236b 2293 aconnector->dc_sink = sink;
dcd5fb82 2294 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2295 amdgpu_dm_update_freesync_caps(connector,
2296 aconnector->edid);
4562236b 2297 } else {
98e6436d 2298 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2299 if (!aconnector->dc_sink) {
4562236b 2300 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2301 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2302 }
4562236b
HW
2303 }
2304
2305 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2306
2307 if (sink)
2308 dc_sink_release(sink);
4562236b
HW
2309 return;
2310 }
2311
2312 /*
2313 * TODO: temporary guard to look for proper fix
2314 * if this sink is MST sink, we should not do anything
2315 */
dcd5fb82
MF
2316 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2317 dc_sink_release(sink);
4562236b 2318 return;
dcd5fb82 2319 }
4562236b
HW
2320
2321 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2322 /*
2323 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2324 * Do nothing!!
2325 */
f1ad2f5e 2326 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2327 aconnector->connector_id);
dcd5fb82
MF
2328 if (sink)
2329 dc_sink_release(sink);
4562236b
HW
2330 return;
2331 }
2332
f1ad2f5e 2333 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2334 aconnector->connector_id, aconnector->dc_sink, sink);
2335
2336 mutex_lock(&dev->mode_config.mutex);
2337
1f6010a9
DF
2338 /*
2339 * 1. Update status of the drm connector
2340 * 2. Send an event and let userspace tell us what to do
2341 */
4562236b 2342 if (sink) {
1f6010a9
DF
2343 /*
2344 * TODO: check if we still need the S3 mode update workaround.
2345 * If yes, put it here.
2346 */
4562236b 2347 if (aconnector->dc_sink)
98e6436d 2348 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2349
2350 aconnector->dc_sink = sink;
dcd5fb82 2351 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2352 if (sink->dc_edid.length == 0) {
4562236b 2353 aconnector->edid = NULL;
e6142dd5
AP
2354 if (aconnector->dc_link->aux_mode) {
2355 drm_dp_cec_unset_edid(
2356 &aconnector->dm_dp_aux.aux);
2357 }
900b3cb1 2358 } else {
4562236b 2359 aconnector->edid =
e6142dd5 2360 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2361
c555f023 2362 drm_connector_update_edid_property(connector,
e6142dd5 2363 aconnector->edid);
b24bdc37 2364 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2365
2366 if (aconnector->dc_link->aux_mode)
2367 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2368 aconnector->edid);
4562236b 2369 }
e6142dd5 2370
98e6436d 2371 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2372 update_connector_ext_caps(aconnector);
4562236b 2373 } else {
e86e8947 2374 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2375 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2376 drm_connector_update_edid_property(connector, NULL);
4562236b 2377 aconnector->num_modes = 0;
dcd5fb82 2378 dc_sink_release(aconnector->dc_sink);
4562236b 2379 aconnector->dc_sink = NULL;
5326c452 2380 aconnector->edid = NULL;
0c8620d6
BL
2381#ifdef CONFIG_DRM_AMD_DC_HDCP
2382 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2383 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2384 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2385#endif
4562236b
HW
2386 }
2387
2388 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2389
0f877894
OV
2390 update_subconnector_property(aconnector);
2391
dcd5fb82
MF
2392 if (sink)
2393 dc_sink_release(sink);
4562236b
HW
2394}
2395
2396static void handle_hpd_irq(void *param)
2397{
c84dec2f 2398 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2399 struct drm_connector *connector = &aconnector->base;
2400 struct drm_device *dev = connector->dev;
fbbdadf2 2401 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2402#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2403 struct amdgpu_device *adev = drm_to_adev(dev);
0c8620d6 2404#endif
4562236b 2405
1f6010a9
DF
2406 /*
2407 * In case of failure or MST no need to update connector status or notify the OS
2408 * since (for MST case) MST does this in its own context.
4562236b
HW
2409 */
2410 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2411
0c8620d6 2412#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2413 if (adev->dm.hdcp_workqueue)
96a3b32e 2414 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2415#endif
2e0ac3d6
HW
2416 if (aconnector->fake_enable)
2417 aconnector->fake_enable = false;
2418
fbbdadf2
BL
2419 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2420 DRM_ERROR("KMS: Failed to detect connector\n");
2421
2422 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2423 emulated_link_detect(aconnector->dc_link);
2424
2425
2426 drm_modeset_lock_all(dev);
2427 dm_restore_drm_connector_state(dev, connector);
2428 drm_modeset_unlock_all(dev);
2429
2430 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2431 drm_kms_helper_hotplug_event(dev);
2432
2433 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2434 amdgpu_dm_update_connector_after_detect(aconnector);
2435
2436
2437 drm_modeset_lock_all(dev);
2438 dm_restore_drm_connector_state(dev, connector);
2439 drm_modeset_unlock_all(dev);
2440
2441 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2442 drm_kms_helper_hotplug_event(dev);
2443 }
2444 mutex_unlock(&aconnector->hpd_lock);
2445
2446}
2447
c84dec2f 2448static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2449{
2450 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2451 uint8_t dret;
2452 bool new_irq_handled = false;
2453 int dpcd_addr;
2454 int dpcd_bytes_to_read;
2455
2456 const int max_process_count = 30;
2457 int process_count = 0;
2458
2459 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2460
2461 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2462 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2463 /* DPCD 0x200 - 0x201 for downstream IRQ */
2464 dpcd_addr = DP_SINK_COUNT;
2465 } else {
2466 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2467 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2468 dpcd_addr = DP_SINK_COUNT_ESI;
2469 }
2470
2471 dret = drm_dp_dpcd_read(
2472 &aconnector->dm_dp_aux.aux,
2473 dpcd_addr,
2474 esi,
2475 dpcd_bytes_to_read);
2476
2477 while (dret == dpcd_bytes_to_read &&
2478 process_count < max_process_count) {
2479 uint8_t retry;
2480 dret = 0;
2481
2482 process_count++;
2483
f1ad2f5e 2484 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2485 /* handle HPD short pulse irq */
2486 if (aconnector->mst_mgr.mst_state)
2487 drm_dp_mst_hpd_irq(
2488 &aconnector->mst_mgr,
2489 esi,
2490 &new_irq_handled);
4562236b
HW
2491
2492 if (new_irq_handled) {
2493 /* ACK at DPCD to notify down stream */
2494 const int ack_dpcd_bytes_to_write =
2495 dpcd_bytes_to_read - 1;
2496
2497 for (retry = 0; retry < 3; retry++) {
2498 uint8_t wret;
2499
2500 wret = drm_dp_dpcd_write(
2501 &aconnector->dm_dp_aux.aux,
2502 dpcd_addr + 1,
2503 &esi[1],
2504 ack_dpcd_bytes_to_write);
2505 if (wret == ack_dpcd_bytes_to_write)
2506 break;
2507 }
2508
1f6010a9 2509 /* check if there is new irq to be handled */
4562236b
HW
2510 dret = drm_dp_dpcd_read(
2511 &aconnector->dm_dp_aux.aux,
2512 dpcd_addr,
2513 esi,
2514 dpcd_bytes_to_read);
2515
2516 new_irq_handled = false;
d4a6e8a9 2517 } else {
4562236b 2518 break;
d4a6e8a9 2519 }
4562236b
HW
2520 }
2521
2522 if (process_count == max_process_count)
f1ad2f5e 2523 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2524}
2525
2526static void handle_hpd_rx_irq(void *param)
2527{
c84dec2f 2528 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2529 struct drm_connector *connector = &aconnector->base;
2530 struct drm_device *dev = connector->dev;
53cbf65c 2531 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2532 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2533 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2534#ifdef CONFIG_DRM_AMD_DC_HDCP
2535 union hpd_irq_data hpd_irq_data;
1348969a 2536 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270
BL
2537
2538 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2539#endif
4562236b 2540
1f6010a9
DF
2541 /*
2542 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2543 * conflict, after implement i2c helper, this mutex should be
2544 * retired.
2545 */
53cbf65c 2546 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2547 mutex_lock(&aconnector->hpd_lock);
2548
2a0f9270
BL
2549
2550#ifdef CONFIG_DRM_AMD_DC_HDCP
2551 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2552#else
4e18814e 2553 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2554#endif
4562236b
HW
2555 !is_mst_root_connector) {
2556 /* Downstream Port status changed. */
fbbdadf2
BL
2557 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2558 DRM_ERROR("KMS: Failed to detect connector\n");
2559
2560 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2561 emulated_link_detect(dc_link);
2562
2563 if (aconnector->fake_enable)
2564 aconnector->fake_enable = false;
2565
2566 amdgpu_dm_update_connector_after_detect(aconnector);
2567
2568
2569 drm_modeset_lock_all(dev);
2570 dm_restore_drm_connector_state(dev, connector);
2571 drm_modeset_unlock_all(dev);
2572
2573 drm_kms_helper_hotplug_event(dev);
2574 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2575
2576 if (aconnector->fake_enable)
2577 aconnector->fake_enable = false;
2578
4562236b
HW
2579 amdgpu_dm_update_connector_after_detect(aconnector);
2580
2581
2582 drm_modeset_lock_all(dev);
2583 dm_restore_drm_connector_state(dev, connector);
2584 drm_modeset_unlock_all(dev);
2585
2586 drm_kms_helper_hotplug_event(dev);
2587 }
2588 }
2a0f9270 2589#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2590 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2591 if (adev->dm.hdcp_workqueue)
2592 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2593 }
2a0f9270 2594#endif
4562236b 2595 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2596 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2597 dm_handle_hpd_rx_irq(aconnector);
2598
e86e8947
HV
2599 if (dc_link->type != dc_connection_mst_branch) {
2600 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2601 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2602 }
4562236b
HW
2603}
2604
2605static void register_hpd_handlers(struct amdgpu_device *adev)
2606{
4a580877 2607 struct drm_device *dev = adev_to_drm(adev);
4562236b 2608 struct drm_connector *connector;
c84dec2f 2609 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2610 const struct dc_link *dc_link;
2611 struct dc_interrupt_params int_params = {0};
2612
2613 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2614 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2615
2616 list_for_each_entry(connector,
2617 &dev->mode_config.connector_list, head) {
2618
c84dec2f 2619 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2620 dc_link = aconnector->dc_link;
2621
2622 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2623 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2624 int_params.irq_source = dc_link->irq_source_hpd;
2625
2626 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2627 handle_hpd_irq,
2628 (void *) aconnector);
2629 }
2630
2631 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2632
2633 /* Also register for DP short pulse (hpd_rx). */
2634 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2635 int_params.irq_source = dc_link->irq_source_hpd_rx;
2636
2637 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2638 handle_hpd_rx_irq,
2639 (void *) aconnector);
2640 }
2641 }
2642}
2643
55e56389
MR
2644#if defined(CONFIG_DRM_AMD_DC_SI)
2645/* Register IRQ sources and initialize IRQ callbacks */
2646static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2647{
2648 struct dc *dc = adev->dm.dc;
2649 struct common_irq_params *c_irq_params;
2650 struct dc_interrupt_params int_params = {0};
2651 int r;
2652 int i;
2653 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2654
2655 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2656 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2657
2658 /*
2659 * Actions of amdgpu_irq_add_id():
2660 * 1. Register a set() function with base driver.
2661 * Base driver will call set() function to enable/disable an
2662 * interrupt in DC hardware.
2663 * 2. Register amdgpu_dm_irq_handler().
2664 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2665 * coming from DC hardware.
2666 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2667 * for acknowledging and handling. */
2668
2669 /* Use VBLANK interrupt */
2670 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2671 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2672 if (r) {
2673 DRM_ERROR("Failed to add crtc irq id!\n");
2674 return r;
2675 }
2676
2677 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2678 int_params.irq_source =
2679 dc_interrupt_to_irq_source(dc, i+1 , 0);
2680
2681 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2682
2683 c_irq_params->adev = adev;
2684 c_irq_params->irq_src = int_params.irq_source;
2685
2686 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2687 dm_crtc_high_irq, c_irq_params);
2688 }
2689
2690 /* Use GRPH_PFLIP interrupt */
2691 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2692 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2693 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2694 if (r) {
2695 DRM_ERROR("Failed to add page flip irq id!\n");
2696 return r;
2697 }
2698
2699 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2700 int_params.irq_source =
2701 dc_interrupt_to_irq_source(dc, i, 0);
2702
2703 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2704
2705 c_irq_params->adev = adev;
2706 c_irq_params->irq_src = int_params.irq_source;
2707
2708 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2709 dm_pflip_high_irq, c_irq_params);
2710
2711 }
2712
2713 /* HPD */
2714 r = amdgpu_irq_add_id(adev, client_id,
2715 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2716 if (r) {
2717 DRM_ERROR("Failed to add hpd irq id!\n");
2718 return r;
2719 }
2720
2721 register_hpd_handlers(adev);
2722
2723 return 0;
2724}
2725#endif
2726
4562236b
HW
2727/* Register IRQ sources and initialize IRQ callbacks */
2728static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2729{
2730 struct dc *dc = adev->dm.dc;
2731 struct common_irq_params *c_irq_params;
2732 struct dc_interrupt_params int_params = {0};
2733 int r;
2734 int i;
1ffdeca6 2735 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2736
84374725 2737 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2738 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2739
2740 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2741 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2742
1f6010a9
DF
2743 /*
2744 * Actions of amdgpu_irq_add_id():
4562236b
HW
2745 * 1. Register a set() function with base driver.
2746 * Base driver will call set() function to enable/disable an
2747 * interrupt in DC hardware.
2748 * 2. Register amdgpu_dm_irq_handler().
2749 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2750 * coming from DC hardware.
2751 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2752 * for acknowledging and handling. */
2753
b57de80a 2754 /* Use VBLANK interrupt */
e9029155 2755 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2756 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2757 if (r) {
2758 DRM_ERROR("Failed to add crtc irq id!\n");
2759 return r;
2760 }
2761
2762 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2763 int_params.irq_source =
3d761e79 2764 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2765
b57de80a 2766 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2767
2768 c_irq_params->adev = adev;
2769 c_irq_params->irq_src = int_params.irq_source;
2770
2771 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2772 dm_crtc_high_irq, c_irq_params);
2773 }
2774
d2574c33
MK
2775 /* Use VUPDATE interrupt */
2776 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2777 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2778 if (r) {
2779 DRM_ERROR("Failed to add vupdate irq id!\n");
2780 return r;
2781 }
2782
2783 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2784 int_params.irq_source =
2785 dc_interrupt_to_irq_source(dc, i, 0);
2786
2787 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2788
2789 c_irq_params->adev = adev;
2790 c_irq_params->irq_src = int_params.irq_source;
2791
2792 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2793 dm_vupdate_high_irq, c_irq_params);
2794 }
2795
3d761e79 2796 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2797 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2798 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2799 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2800 if (r) {
2801 DRM_ERROR("Failed to add page flip irq id!\n");
2802 return r;
2803 }
2804
2805 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2806 int_params.irq_source =
2807 dc_interrupt_to_irq_source(dc, i, 0);
2808
2809 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2810
2811 c_irq_params->adev = adev;
2812 c_irq_params->irq_src = int_params.irq_source;
2813
2814 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2815 dm_pflip_high_irq, c_irq_params);
2816
2817 }
2818
2819 /* HPD */
2c8ad2d5
AD
2820 r = amdgpu_irq_add_id(adev, client_id,
2821 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2822 if (r) {
2823 DRM_ERROR("Failed to add hpd irq id!\n");
2824 return r;
2825 }
2826
2827 register_hpd_handlers(adev);
2828
2829 return 0;
2830}
2831
b86a1aa3 2832#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2833/* Register IRQ sources and initialize IRQ callbacks */
2834static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2835{
2836 struct dc *dc = adev->dm.dc;
2837 struct common_irq_params *c_irq_params;
2838 struct dc_interrupt_params int_params = {0};
2839 int r;
2840 int i;
2841
2842 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2843 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2844
1f6010a9
DF
2845 /*
2846 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2847 * 1. Register a set() function with base driver.
2848 * Base driver will call set() function to enable/disable an
2849 * interrupt in DC hardware.
2850 * 2. Register amdgpu_dm_irq_handler().
2851 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2852 * coming from DC hardware.
2853 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2854 * for acknowledging and handling.
1f6010a9 2855 */
ff5ef992
AD
2856
2857 /* Use VSTARTUP interrupt */
2858 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2859 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2860 i++) {
3760f76c 2861 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2862
2863 if (r) {
2864 DRM_ERROR("Failed to add crtc irq id!\n");
2865 return r;
2866 }
2867
2868 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2869 int_params.irq_source =
2870 dc_interrupt_to_irq_source(dc, i, 0);
2871
2872 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2873
2874 c_irq_params->adev = adev;
2875 c_irq_params->irq_src = int_params.irq_source;
2876
2346ef47
NK
2877 amdgpu_dm_irq_register_interrupt(
2878 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2879 }
2880
2881 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2882 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2883 * to trigger at end of each vblank, regardless of state of the lock,
2884 * matching DCE behaviour.
2885 */
2886 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2887 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2888 i++) {
2889 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2890
2891 if (r) {
2892 DRM_ERROR("Failed to add vupdate irq id!\n");
2893 return r;
2894 }
2895
2896 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2897 int_params.irq_source =
2898 dc_interrupt_to_irq_source(dc, i, 0);
2899
2900 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2901
2902 c_irq_params->adev = adev;
2903 c_irq_params->irq_src = int_params.irq_source;
2904
ff5ef992 2905 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2906 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2907 }
2908
ff5ef992
AD
2909 /* Use GRPH_PFLIP interrupt */
2910 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2911 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2912 i++) {
3760f76c 2913 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2914 if (r) {
2915 DRM_ERROR("Failed to add page flip irq id!\n");
2916 return r;
2917 }
2918
2919 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2920 int_params.irq_source =
2921 dc_interrupt_to_irq_source(dc, i, 0);
2922
2923 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2924
2925 c_irq_params->adev = adev;
2926 c_irq_params->irq_src = int_params.irq_source;
2927
2928 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2929 dm_pflip_high_irq, c_irq_params);
2930
2931 }
2932
2933 /* HPD */
3760f76c 2934 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2935 &adev->hpd_irq);
2936 if (r) {
2937 DRM_ERROR("Failed to add hpd irq id!\n");
2938 return r;
2939 }
2940
2941 register_hpd_handlers(adev);
2942
2943 return 0;
2944}
2945#endif
2946
eb3dc897
NK
2947/*
2948 * Acquires the lock for the atomic state object and returns
2949 * the new atomic state.
2950 *
2951 * This should only be called during atomic check.
2952 */
2953static int dm_atomic_get_state(struct drm_atomic_state *state,
2954 struct dm_atomic_state **dm_state)
2955{
2956 struct drm_device *dev = state->dev;
1348969a 2957 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2958 struct amdgpu_display_manager *dm = &adev->dm;
2959 struct drm_private_state *priv_state;
eb3dc897
NK
2960
2961 if (*dm_state)
2962 return 0;
2963
eb3dc897
NK
2964 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2965 if (IS_ERR(priv_state))
2966 return PTR_ERR(priv_state);
2967
2968 *dm_state = to_dm_atomic_state(priv_state);
2969
2970 return 0;
2971}
2972
dfd84d90 2973static struct dm_atomic_state *
eb3dc897
NK
2974dm_atomic_get_new_state(struct drm_atomic_state *state)
2975{
2976 struct drm_device *dev = state->dev;
1348969a 2977 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2978 struct amdgpu_display_manager *dm = &adev->dm;
2979 struct drm_private_obj *obj;
2980 struct drm_private_state *new_obj_state;
2981 int i;
2982
2983 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2984 if (obj->funcs == dm->atomic_obj.funcs)
2985 return to_dm_atomic_state(new_obj_state);
2986 }
2987
2988 return NULL;
2989}
2990
eb3dc897
NK
2991static struct drm_private_state *
2992dm_atomic_duplicate_state(struct drm_private_obj *obj)
2993{
2994 struct dm_atomic_state *old_state, *new_state;
2995
2996 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2997 if (!new_state)
2998 return NULL;
2999
3000 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3001
813d20dc
AW
3002 old_state = to_dm_atomic_state(obj->state);
3003
3004 if (old_state && old_state->context)
3005 new_state->context = dc_copy_state(old_state->context);
3006
eb3dc897
NK
3007 if (!new_state->context) {
3008 kfree(new_state);
3009 return NULL;
3010 }
3011
eb3dc897
NK
3012 return &new_state->base;
3013}
3014
3015static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3016 struct drm_private_state *state)
3017{
3018 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3019
3020 if (dm_state && dm_state->context)
3021 dc_release_state(dm_state->context);
3022
3023 kfree(dm_state);
3024}
3025
3026static struct drm_private_state_funcs dm_atomic_state_funcs = {
3027 .atomic_duplicate_state = dm_atomic_duplicate_state,
3028 .atomic_destroy_state = dm_atomic_destroy_state,
3029};
3030
4562236b
HW
3031static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3032{
eb3dc897 3033 struct dm_atomic_state *state;
4562236b
HW
3034 int r;
3035
3036 adev->mode_info.mode_config_initialized = true;
3037
4a580877
LT
3038 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3039 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3040
4a580877
LT
3041 adev_to_drm(adev)->mode_config.max_width = 16384;
3042 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3043
4a580877
LT
3044 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3045 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3046 /* indicates support for immediate flip */
4a580877 3047 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3048
4a580877 3049 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3050
eb3dc897
NK
3051 state = kzalloc(sizeof(*state), GFP_KERNEL);
3052 if (!state)
3053 return -ENOMEM;
3054
813d20dc 3055 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3056 if (!state->context) {
3057 kfree(state);
3058 return -ENOMEM;
3059 }
3060
3061 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3062
4a580877 3063 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3064 &adev->dm.atomic_obj,
eb3dc897
NK
3065 &state->base,
3066 &dm_atomic_state_funcs);
3067
3dc9b1ce 3068 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3069 if (r) {
3070 dc_release_state(state->context);
3071 kfree(state);
4562236b 3072 return r;
b67a468a 3073 }
4562236b 3074
6ce8f316 3075 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3076 if (r) {
3077 dc_release_state(state->context);
3078 kfree(state);
6ce8f316 3079 return r;
b67a468a 3080 }
6ce8f316 3081
4562236b
HW
3082 return 0;
3083}
3084
206bbafe
DF
3085#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3086#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3087#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3088
4562236b
HW
3089#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3090 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3091
206bbafe
DF
3092static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3093{
3094#if defined(CONFIG_ACPI)
3095 struct amdgpu_dm_backlight_caps caps;
3096
58965855
FS
3097 memset(&caps, 0, sizeof(caps));
3098
206bbafe
DF
3099 if (dm->backlight_caps.caps_valid)
3100 return;
3101
3102 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3103 if (caps.caps_valid) {
94562810
RS
3104 dm->backlight_caps.caps_valid = true;
3105 if (caps.aux_support)
3106 return;
206bbafe
DF
3107 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3108 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3109 } else {
3110 dm->backlight_caps.min_input_signal =
3111 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3112 dm->backlight_caps.max_input_signal =
3113 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3114 }
3115#else
94562810
RS
3116 if (dm->backlight_caps.aux_support)
3117 return;
3118
8bcbc9ef
DF
3119 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3120 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3121#endif
3122}
3123
94562810
RS
3124static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3125{
3126 bool rc;
3127
3128 if (!link)
3129 return 1;
3130
3131 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3132 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3133
3134 return rc ? 0 : 1;
3135}
3136
69d9f427
AM
3137static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3138 unsigned *min, unsigned *max)
94562810 3139{
94562810 3140 if (!caps)
69d9f427 3141 return 0;
94562810 3142
69d9f427
AM
3143 if (caps->aux_support) {
3144 // Firmware limits are in nits, DC API wants millinits.
3145 *max = 1000 * caps->aux_max_input_signal;
3146 *min = 1000 * caps->aux_min_input_signal;
94562810 3147 } else {
69d9f427
AM
3148 // Firmware limits are 8-bit, PWM control is 16-bit.
3149 *max = 0x101 * caps->max_input_signal;
3150 *min = 0x101 * caps->min_input_signal;
94562810 3151 }
69d9f427
AM
3152 return 1;
3153}
94562810 3154
69d9f427
AM
3155static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3156 uint32_t brightness)
3157{
3158 unsigned min, max;
94562810 3159
69d9f427
AM
3160 if (!get_brightness_range(caps, &min, &max))
3161 return brightness;
3162
3163 // Rescale 0..255 to min..max
3164 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3165 AMDGPU_MAX_BL_LEVEL);
3166}
3167
3168static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3169 uint32_t brightness)
3170{
3171 unsigned min, max;
3172
3173 if (!get_brightness_range(caps, &min, &max))
3174 return brightness;
3175
3176 if (brightness < min)
3177 return 0;
3178 // Rescale min..max to 0..255
3179 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3180 max - min);
94562810
RS
3181}
3182
4562236b
HW
3183static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3184{
3185 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3186 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3187 struct dc_link *link = NULL;
3188 u32 brightness;
3189 bool rc;
4562236b 3190
206bbafe
DF
3191 amdgpu_dm_update_backlight_caps(dm);
3192 caps = dm->backlight_caps;
94562810
RS
3193
3194 link = (struct dc_link *)dm->backlight_link;
3195
69d9f427 3196 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3197 // Change brightness based on AUX property
3198 if (caps.aux_support)
3199 return set_backlight_via_aux(link, brightness);
3200
3201 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3202
3203 return rc ? 0 : 1;
4562236b
HW
3204}
3205
3206static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3207{
620a0d27
DF
3208 struct amdgpu_display_manager *dm = bl_get_data(bd);
3209 int ret = dc_link_get_backlight_level(dm->backlight_link);
3210
3211 if (ret == DC_ERROR_UNEXPECTED)
3212 return bd->props.brightness;
69d9f427 3213 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3214}
3215
3216static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3217 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3218 .get_brightness = amdgpu_dm_backlight_get_brightness,
3219 .update_status = amdgpu_dm_backlight_update_status,
3220};
3221
7578ecda
AD
3222static void
3223amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3224{
3225 char bl_name[16];
3226 struct backlight_properties props = { 0 };
3227
206bbafe
DF
3228 amdgpu_dm_update_backlight_caps(dm);
3229
4562236b 3230 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3231 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3232 props.type = BACKLIGHT_RAW;
3233
3234 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3235 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3236
3237 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3238 adev_to_drm(dm->adev)->dev,
3239 dm,
3240 &amdgpu_dm_backlight_ops,
3241 &props);
4562236b 3242
74baea42 3243 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3244 DRM_ERROR("DM: Backlight registration failed!\n");
3245 else
f1ad2f5e 3246 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3247}
3248
3249#endif
3250
df534fff 3251static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3252 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3253 enum drm_plane_type plane_type,
3254 const struct dc_plane_cap *plane_cap)
df534fff 3255{
f180b4bc 3256 struct drm_plane *plane;
df534fff
S
3257 unsigned long possible_crtcs;
3258 int ret = 0;
3259
f180b4bc 3260 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3261 if (!plane) {
3262 DRM_ERROR("KMS: Failed to allocate plane\n");
3263 return -ENOMEM;
3264 }
b2fddb13 3265 plane->type = plane_type;
df534fff
S
3266
3267 /*
b2fddb13
NK
3268 * HACK: IGT tests expect that the primary plane for a CRTC
3269 * can only have one possible CRTC. Only expose support for
3270 * any CRTC if they're not going to be used as a primary plane
3271 * for a CRTC - like overlay or underlay planes.
df534fff
S
3272 */
3273 possible_crtcs = 1 << plane_id;
3274 if (plane_id >= dm->dc->caps.max_streams)
3275 possible_crtcs = 0xff;
3276
cc1fec57 3277 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3278
3279 if (ret) {
3280 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3281 kfree(plane);
df534fff
S
3282 return ret;
3283 }
3284
54087768
NK
3285 if (mode_info)
3286 mode_info->planes[plane_id] = plane;
3287
df534fff
S
3288 return ret;
3289}
3290
89fc8d4e
HW
3291
3292static void register_backlight_device(struct amdgpu_display_manager *dm,
3293 struct dc_link *link)
3294{
3295#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3296 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3297
3298 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3299 link->type != dc_connection_none) {
1f6010a9
DF
3300 /*
3301 * Event if registration failed, we should continue with
89fc8d4e
HW
3302 * DM initialization because not having a backlight control
3303 * is better then a black screen.
3304 */
3305 amdgpu_dm_register_backlight_device(dm);
3306
3307 if (dm->backlight_dev)
3308 dm->backlight_link = link;
3309 }
3310#endif
3311}
3312
3313
1f6010a9
DF
3314/*
3315 * In this architecture, the association
4562236b
HW
3316 * connector -> encoder -> crtc
3317 * id not really requried. The crtc and connector will hold the
3318 * display_index as an abstraction to use with DAL component
3319 *
3320 * Returns 0 on success
3321 */
7578ecda 3322static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3323{
3324 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3325 int32_t i;
c84dec2f 3326 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3327 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3328 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3329 uint32_t link_cnt;
cc1fec57 3330 int32_t primary_planes;
fbbdadf2 3331 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3332 const struct dc_plane_cap *plane;
4562236b
HW
3333
3334 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3335 if (amdgpu_dm_mode_config_init(dm->adev)) {
3336 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3337 return -EINVAL;
4562236b
HW
3338 }
3339
b2fddb13
NK
3340 /* There is one primary plane per CRTC */
3341 primary_planes = dm->dc->caps.max_streams;
54087768 3342 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3343
b2fddb13
NK
3344 /*
3345 * Initialize primary planes, implicit planes for legacy IOCTLS.
3346 * Order is reversed to match iteration order in atomic check.
3347 */
3348 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3349 plane = &dm->dc->caps.planes[i];
3350
b2fddb13 3351 if (initialize_plane(dm, mode_info, i,
cc1fec57 3352 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3353 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3354 goto fail;
d4e13b0d 3355 }
df534fff 3356 }
92f3ac40 3357
0d579c7e
NK
3358 /*
3359 * Initialize overlay planes, index starting after primary planes.
3360 * These planes have a higher DRM index than the primary planes since
3361 * they should be considered as having a higher z-order.
3362 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3363 *
3364 * Only support DCN for now, and only expose one so we don't encourage
3365 * userspace to use up all the pipes.
0d579c7e 3366 */
cc1fec57
NK
3367 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3368 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3369
3370 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3371 continue;
3372
3373 if (!plane->blends_with_above || !plane->blends_with_below)
3374 continue;
3375
ea36ad34 3376 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3377 continue;
3378
54087768 3379 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3380 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3381 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3382 goto fail;
d4e13b0d 3383 }
cc1fec57
NK
3384
3385 /* Only create one overlay plane. */
3386 break;
d4e13b0d 3387 }
4562236b 3388
d4e13b0d 3389 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3390 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3391 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3392 goto fail;
4562236b 3393 }
4562236b 3394
ab2541b6 3395 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3396
3397 /* loops over all connectors on the board */
3398 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3399 struct dc_link *link = NULL;
4562236b
HW
3400
3401 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3402 DRM_ERROR(
3403 "KMS: Cannot support more than %d display indexes\n",
3404 AMDGPU_DM_MAX_DISPLAY_INDEX);
3405 continue;
3406 }
3407
3408 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3409 if (!aconnector)
cd8a2ae8 3410 goto fail;
4562236b
HW
3411
3412 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3413 if (!aencoder)
cd8a2ae8 3414 goto fail;
4562236b
HW
3415
3416 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3417 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3418 goto fail;
4562236b
HW
3419 }
3420
3421 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3422 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3423 goto fail;
4562236b
HW
3424 }
3425
89fc8d4e
HW
3426 link = dc_get_link_at_index(dm->dc, i);
3427
fbbdadf2
BL
3428 if (!dc_link_detect_sink(link, &new_connection_type))
3429 DRM_ERROR("KMS: Failed to detect connector\n");
3430
3431 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3432 emulated_link_detect(link);
3433 amdgpu_dm_update_connector_after_detect(aconnector);
3434
3435 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3436 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3437 register_backlight_device(dm, link);
397a9bc5
RL
3438 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3439 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3440 }
3441
3442
4562236b
HW
3443 }
3444
3445 /* Software is initialized. Now we can register interrupt handlers. */
3446 switch (adev->asic_type) {
55e56389
MR
3447#if defined(CONFIG_DRM_AMD_DC_SI)
3448 case CHIP_TAHITI:
3449 case CHIP_PITCAIRN:
3450 case CHIP_VERDE:
3451 case CHIP_OLAND:
3452 if (dce60_register_irq_handlers(dm->adev)) {
3453 DRM_ERROR("DM: Failed to initialize IRQ\n");
3454 goto fail;
3455 }
3456 break;
3457#endif
4562236b
HW
3458 case CHIP_BONAIRE:
3459 case CHIP_HAWAII:
cd4b356f
AD
3460 case CHIP_KAVERI:
3461 case CHIP_KABINI:
3462 case CHIP_MULLINS:
4562236b
HW
3463 case CHIP_TONGA:
3464 case CHIP_FIJI:
3465 case CHIP_CARRIZO:
3466 case CHIP_STONEY:
3467 case CHIP_POLARIS11:
3468 case CHIP_POLARIS10:
b264d345 3469 case CHIP_POLARIS12:
7737de91 3470 case CHIP_VEGAM:
2c8ad2d5 3471 case CHIP_VEGA10:
2325ff30 3472 case CHIP_VEGA12:
1fe6bf2f 3473 case CHIP_VEGA20:
4562236b
HW
3474 if (dce110_register_irq_handlers(dm->adev)) {
3475 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3476 goto fail;
4562236b
HW
3477 }
3478 break;
b86a1aa3 3479#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3480 case CHIP_RAVEN:
fbd2afe5 3481 case CHIP_NAVI12:
476e955d 3482 case CHIP_NAVI10:
fce651e3 3483 case CHIP_NAVI14:
30221ad8 3484 case CHIP_RENOIR:
79037324 3485 case CHIP_SIENNA_CICHLID:
a6c5308f 3486 case CHIP_NAVY_FLOUNDER:
2a411205 3487 case CHIP_DIMGREY_CAVEFISH:
469989ca 3488 case CHIP_VANGOGH:
ff5ef992
AD
3489 if (dcn10_register_irq_handlers(dm->adev)) {
3490 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3491 goto fail;
ff5ef992
AD
3492 }
3493 break;
3494#endif
4562236b 3495 default:
e63f8673 3496 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3497 goto fail;
4562236b
HW
3498 }
3499
4562236b 3500 return 0;
cd8a2ae8 3501fail:
4562236b 3502 kfree(aencoder);
4562236b 3503 kfree(aconnector);
54087768 3504
59d0f396 3505 return -EINVAL;
4562236b
HW
3506}
3507
7578ecda 3508static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3509{
3510 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3511 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3512 return;
3513}
3514
3515/******************************************************************************
3516 * amdgpu_display_funcs functions
3517 *****************************************************************************/
3518
1f6010a9 3519/*
4562236b
HW
3520 * dm_bandwidth_update - program display watermarks
3521 *
3522 * @adev: amdgpu_device pointer
3523 *
3524 * Calculate and program the display watermarks and line buffer allocation.
3525 */
3526static void dm_bandwidth_update(struct amdgpu_device *adev)
3527{
49c07a99 3528 /* TODO: implement later */
4562236b
HW
3529}
3530
39cc5be2 3531static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3532 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3533 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3534 .backlight_set_level = NULL, /* never called for DC */
3535 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3536 .hpd_sense = NULL,/* called unconditionally */
3537 .hpd_set_polarity = NULL, /* called unconditionally */
3538 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3539 .page_flip_get_scanoutpos =
3540 dm_crtc_get_scanoutpos,/* called unconditionally */
3541 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3542 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3543};
3544
3545#if defined(CONFIG_DEBUG_KERNEL_DC)
3546
3ee6b26b
AD
3547static ssize_t s3_debug_store(struct device *device,
3548 struct device_attribute *attr,
3549 const char *buf,
3550 size_t count)
4562236b
HW
3551{
3552 int ret;
3553 int s3_state;
ef1de361 3554 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3555 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3556
3557 ret = kstrtoint(buf, 0, &s3_state);
3558
3559 if (ret == 0) {
3560 if (s3_state) {
3561 dm_resume(adev);
4a580877 3562 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3563 } else
3564 dm_suspend(adev);
3565 }
3566
3567 return ret == 0 ? count : 0;
3568}
3569
3570DEVICE_ATTR_WO(s3_debug);
3571
3572#endif
3573
3574static int dm_early_init(void *handle)
3575{
3576 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3577
4562236b 3578 switch (adev->asic_type) {
55e56389
MR
3579#if defined(CONFIG_DRM_AMD_DC_SI)
3580 case CHIP_TAHITI:
3581 case CHIP_PITCAIRN:
3582 case CHIP_VERDE:
3583 adev->mode_info.num_crtc = 6;
3584 adev->mode_info.num_hpd = 6;
3585 adev->mode_info.num_dig = 6;
3586 break;
3587 case CHIP_OLAND:
3588 adev->mode_info.num_crtc = 2;
3589 adev->mode_info.num_hpd = 2;
3590 adev->mode_info.num_dig = 2;
3591 break;
3592#endif
4562236b
HW
3593 case CHIP_BONAIRE:
3594 case CHIP_HAWAII:
3595 adev->mode_info.num_crtc = 6;
3596 adev->mode_info.num_hpd = 6;
3597 adev->mode_info.num_dig = 6;
4562236b 3598 break;
cd4b356f
AD
3599 case CHIP_KAVERI:
3600 adev->mode_info.num_crtc = 4;
3601 adev->mode_info.num_hpd = 6;
3602 adev->mode_info.num_dig = 7;
cd4b356f
AD
3603 break;
3604 case CHIP_KABINI:
3605 case CHIP_MULLINS:
3606 adev->mode_info.num_crtc = 2;
3607 adev->mode_info.num_hpd = 6;
3608 adev->mode_info.num_dig = 6;
cd4b356f 3609 break;
4562236b
HW
3610 case CHIP_FIJI:
3611 case CHIP_TONGA:
3612 adev->mode_info.num_crtc = 6;
3613 adev->mode_info.num_hpd = 6;
3614 adev->mode_info.num_dig = 7;
4562236b
HW
3615 break;
3616 case CHIP_CARRIZO:
3617 adev->mode_info.num_crtc = 3;
3618 adev->mode_info.num_hpd = 6;
3619 adev->mode_info.num_dig = 9;
4562236b
HW
3620 break;
3621 case CHIP_STONEY:
3622 adev->mode_info.num_crtc = 2;
3623 adev->mode_info.num_hpd = 6;
3624 adev->mode_info.num_dig = 9;
4562236b
HW
3625 break;
3626 case CHIP_POLARIS11:
b264d345 3627 case CHIP_POLARIS12:
4562236b
HW
3628 adev->mode_info.num_crtc = 5;
3629 adev->mode_info.num_hpd = 5;
3630 adev->mode_info.num_dig = 5;
4562236b
HW
3631 break;
3632 case CHIP_POLARIS10:
7737de91 3633 case CHIP_VEGAM:
4562236b
HW
3634 adev->mode_info.num_crtc = 6;
3635 adev->mode_info.num_hpd = 6;
3636 adev->mode_info.num_dig = 6;
4562236b 3637 break;
2c8ad2d5 3638 case CHIP_VEGA10:
2325ff30 3639 case CHIP_VEGA12:
1fe6bf2f 3640 case CHIP_VEGA20:
2c8ad2d5
AD
3641 adev->mode_info.num_crtc = 6;
3642 adev->mode_info.num_hpd = 6;
3643 adev->mode_info.num_dig = 6;
3644 break;
b86a1aa3 3645#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3646 case CHIP_RAVEN:
20f2ffe5
AD
3647 case CHIP_RENOIR:
3648 case CHIP_VANGOGH:
ff5ef992
AD
3649 adev->mode_info.num_crtc = 4;
3650 adev->mode_info.num_hpd = 4;
3651 adev->mode_info.num_dig = 4;
ff5ef992 3652 break;
476e955d 3653 case CHIP_NAVI10:
fbd2afe5 3654 case CHIP_NAVI12:
79037324 3655 case CHIP_SIENNA_CICHLID:
a6c5308f 3656 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3657 adev->mode_info.num_crtc = 6;
3658 adev->mode_info.num_hpd = 6;
3659 adev->mode_info.num_dig = 6;
3660 break;
fce651e3 3661 case CHIP_NAVI14:
2a411205 3662 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3663 adev->mode_info.num_crtc = 5;
3664 adev->mode_info.num_hpd = 5;
3665 adev->mode_info.num_dig = 5;
3666 break;
20f2ffe5 3667#endif
4562236b 3668 default:
e63f8673 3669 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3670 return -EINVAL;
3671 }
3672
c8dd5715
MD
3673 amdgpu_dm_set_irq_funcs(adev);
3674
39cc5be2
AD
3675 if (adev->mode_info.funcs == NULL)
3676 adev->mode_info.funcs = &dm_display_funcs;
3677
1f6010a9
DF
3678 /*
3679 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3680 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3681 * amdgpu_device_init()
3682 */
4562236b
HW
3683#if defined(CONFIG_DEBUG_KERNEL_DC)
3684 device_create_file(
4a580877 3685 adev_to_drm(adev)->dev,
4562236b
HW
3686 &dev_attr_s3_debug);
3687#endif
3688
3689 return 0;
3690}
3691
9b690ef3 3692static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3693 struct dc_stream_state *new_stream,
3694 struct dc_stream_state *old_stream)
9b690ef3 3695{
2afda735 3696 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3697}
3698
3699static bool modereset_required(struct drm_crtc_state *crtc_state)
3700{
2afda735 3701 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3702}
3703
7578ecda 3704static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3705{
3706 drm_encoder_cleanup(encoder);
3707 kfree(encoder);
3708}
3709
3710static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3711 .destroy = amdgpu_dm_encoder_destroy,
3712};
3713
e7b07cee 3714
695af5f9
NK
3715static int fill_dc_scaling_info(const struct drm_plane_state *state,
3716 struct dc_scaling_info *scaling_info)
e7b07cee 3717{
6491f0c0 3718 int scale_w, scale_h;
e7b07cee 3719
695af5f9 3720 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3721
695af5f9
NK
3722 /* Source is fixed 16.16 but we ignore mantissa for now... */
3723 scaling_info->src_rect.x = state->src_x >> 16;
3724 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3725
695af5f9
NK
3726 scaling_info->src_rect.width = state->src_w >> 16;
3727 if (scaling_info->src_rect.width == 0)
3728 return -EINVAL;
3729
3730 scaling_info->src_rect.height = state->src_h >> 16;
3731 if (scaling_info->src_rect.height == 0)
3732 return -EINVAL;
3733
3734 scaling_info->dst_rect.x = state->crtc_x;
3735 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3736
3737 if (state->crtc_w == 0)
695af5f9 3738 return -EINVAL;
e7b07cee 3739
695af5f9 3740 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3741
3742 if (state->crtc_h == 0)
695af5f9 3743 return -EINVAL;
e7b07cee 3744
695af5f9 3745 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3746
695af5f9
NK
3747 /* DRM doesn't specify clipping on destination output. */
3748 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3749
6491f0c0
NK
3750 /* TODO: Validate scaling per-format with DC plane caps */
3751 scale_w = scaling_info->dst_rect.width * 1000 /
3752 scaling_info->src_rect.width;
e7b07cee 3753
6491f0c0
NK
3754 if (scale_w < 250 || scale_w > 16000)
3755 return -EINVAL;
3756
3757 scale_h = scaling_info->dst_rect.height * 1000 /
3758 scaling_info->src_rect.height;
3759
3760 if (scale_h < 250 || scale_h > 16000)
3761 return -EINVAL;
3762
695af5f9
NK
3763 /*
3764 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3765 * assume reasonable defaults based on the format.
3766 */
e7b07cee 3767
695af5f9 3768 return 0;
4562236b 3769}
695af5f9 3770
a3241991
BN
3771static void
3772fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3773 uint64_t tiling_flags)
e7b07cee 3774{
a3241991
BN
3775 /* Fill GFX8 params */
3776 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3777 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3778
a3241991
BN
3779 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3780 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3781 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3782 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3783 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3784
a3241991
BN
3785 /* XXX fix me for VI */
3786 tiling_info->gfx8.num_banks = num_banks;
3787 tiling_info->gfx8.array_mode =
3788 DC_ARRAY_2D_TILED_THIN1;
3789 tiling_info->gfx8.tile_split = tile_split;
3790 tiling_info->gfx8.bank_width = bankw;
3791 tiling_info->gfx8.bank_height = bankh;
3792 tiling_info->gfx8.tile_aspect = mtaspect;
3793 tiling_info->gfx8.tile_mode =
3794 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3795 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3796 == DC_ARRAY_1D_TILED_THIN1) {
3797 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3798 }
3799
a3241991
BN
3800 tiling_info->gfx8.pipe_config =
3801 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3802}
3803
a3241991
BN
3804static void
3805fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3806 union dc_tiling_info *tiling_info)
3807{
3808 tiling_info->gfx9.num_pipes =
3809 adev->gfx.config.gb_addr_config_fields.num_pipes;
3810 tiling_info->gfx9.num_banks =
3811 adev->gfx.config.gb_addr_config_fields.num_banks;
3812 tiling_info->gfx9.pipe_interleave =
3813 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3814 tiling_info->gfx9.num_shader_engines =
3815 adev->gfx.config.gb_addr_config_fields.num_se;
3816 tiling_info->gfx9.max_compressed_frags =
3817 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3818 tiling_info->gfx9.num_rb_per_se =
3819 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3820 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
3821 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3822 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3823 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3824 adev->asic_type == CHIP_VANGOGH)
3825 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
3826}
3827
695af5f9 3828static int
a3241991
BN
3829validate_dcc(struct amdgpu_device *adev,
3830 const enum surface_pixel_format format,
3831 const enum dc_rotation_angle rotation,
3832 const union dc_tiling_info *tiling_info,
3833 const struct dc_plane_dcc_param *dcc,
3834 const struct dc_plane_address *address,
3835 const struct plane_size *plane_size)
7df7e505
NK
3836{
3837 struct dc *dc = adev->dm.dc;
8daa1218
NC
3838 struct dc_dcc_surface_param input;
3839 struct dc_surface_dcc_cap output;
7df7e505 3840
8daa1218
NC
3841 memset(&input, 0, sizeof(input));
3842 memset(&output, 0, sizeof(output));
3843
a3241991 3844 if (!dcc->enable)
87b7ebc2
RS
3845 return 0;
3846
a3241991
BN
3847 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3848 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3849 return -EINVAL;
7df7e505 3850
695af5f9 3851 input.format = format;
12e2b2d4
DL
3852 input.surface_size.width = plane_size->surface_size.width;
3853 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3854 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3855
695af5f9 3856 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3857 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3858 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3859 input.scan = SCAN_DIRECTION_VERTICAL;
3860
3861 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3862 return -EINVAL;
7df7e505
NK
3863
3864 if (!output.capable)
09e5665a 3865 return -EINVAL;
7df7e505 3866
a3241991
BN
3867 if (dcc->independent_64b_blks == 0 &&
3868 output.grph.rgb.independent_64b_blks != 0)
09e5665a 3869 return -EINVAL;
7df7e505 3870
a3241991
BN
3871 return 0;
3872}
3873
37384b3f
BN
3874static bool
3875modifier_has_dcc(uint64_t modifier)
3876{
3877 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3878}
3879
3880static unsigned
3881modifier_gfx9_swizzle_mode(uint64_t modifier)
3882{
3883 if (modifier == DRM_FORMAT_MOD_LINEAR)
3884 return 0;
3885
3886 return AMD_FMT_MOD_GET(TILE, modifier);
3887}
3888
dfbbfe3c
BN
3889static const struct drm_format_info *
3890amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3891{
816853f9 3892 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
3893}
3894
37384b3f
BN
3895static void
3896fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3897 union dc_tiling_info *tiling_info,
3898 uint64_t modifier)
3899{
3900 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3901 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3902 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3903 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3904
3905 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3906
3907 if (!IS_AMD_FMT_MOD(modifier))
3908 return;
3909
3910 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3911 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3912
3913 if (adev->family >= AMDGPU_FAMILY_NV) {
3914 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3915 } else {
3916 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3917
3918 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3919 }
3920}
3921
faa37f54
BN
3922enum dm_micro_swizzle {
3923 MICRO_SWIZZLE_Z = 0,
3924 MICRO_SWIZZLE_S = 1,
3925 MICRO_SWIZZLE_D = 2,
3926 MICRO_SWIZZLE_R = 3
3927};
3928
3929static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3930 uint32_t format,
3931 uint64_t modifier)
3932{
3933 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3934 const struct drm_format_info *info = drm_format_info(format);
3935
3936 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3937
3938 if (!info)
3939 return false;
3940
3941 /*
3942 * We always have to allow this modifier, because core DRM still
3943 * checks LINEAR support if userspace does not provide modifers.
3944 */
3945 if (modifier == DRM_FORMAT_MOD_LINEAR)
3946 return true;
3947
3948 /*
3949 * The arbitrary tiling support for multiplane formats has not been hooked
3950 * up.
3951 */
3952 if (info->num_planes > 1)
3953 return false;
3954
3955 /*
3956 * For D swizzle the canonical modifier depends on the bpp, so check
3957 * it here.
3958 */
3959 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3960 adev->family >= AMDGPU_FAMILY_NV) {
3961 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3962 return false;
3963 }
3964
3965 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3966 info->cpp[0] < 8)
3967 return false;
3968
3969 if (modifier_has_dcc(modifier)) {
3970 /* Per radeonsi comments 16/64 bpp are more complicated. */
3971 if (info->cpp[0] != 4)
3972 return false;
3973 }
3974
3975 return true;
3976}
3977
3978static void
3979add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
3980{
3981 if (!*mods)
3982 return;
3983
3984 if (*cap - *size < 1) {
3985 uint64_t new_cap = *cap * 2;
3986 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
3987
3988 if (!new_mods) {
3989 kfree(*mods);
3990 *mods = NULL;
3991 return;
3992 }
3993
3994 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
3995 kfree(*mods);
3996 *mods = new_mods;
3997 *cap = new_cap;
3998 }
3999
4000 (*mods)[*size] = mod;
4001 *size += 1;
4002}
4003
4004static void
4005add_gfx9_modifiers(const struct amdgpu_device *adev,
4006 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4007{
4008 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4009 int pipe_xor_bits = min(8, pipes +
4010 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4011 int bank_xor_bits = min(8 - pipe_xor_bits,
4012 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4013 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4014 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4015
4016
4017 if (adev->family == AMDGPU_FAMILY_RV) {
4018 /* Raven2 and later */
4019 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4020
4021 /*
4022 * No _D DCC swizzles yet because we only allow 32bpp, which
4023 * doesn't support _D on DCN
4024 */
4025
4026 if (has_constant_encode) {
4027 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4028 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4029 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4030 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4031 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4032 AMD_FMT_MOD_SET(DCC, 1) |
4033 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4034 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4035 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4036 }
4037
4038 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4039 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4040 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4041 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4042 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4043 AMD_FMT_MOD_SET(DCC, 1) |
4044 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4045 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4046 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4047
4048 if (has_constant_encode) {
4049 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4050 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4051 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4052 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4053 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4054 AMD_FMT_MOD_SET(DCC, 1) |
4055 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4056 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4057 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4058
4059 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4060 AMD_FMT_MOD_SET(RB, rb) |
4061 AMD_FMT_MOD_SET(PIPE, pipes));
4062 }
4063
4064 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4065 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4066 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4067 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4068 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4069 AMD_FMT_MOD_SET(DCC, 1) |
4070 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4071 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4072 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4073 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4074 AMD_FMT_MOD_SET(RB, rb) |
4075 AMD_FMT_MOD_SET(PIPE, pipes));
4076 }
4077
4078 /*
4079 * Only supported for 64bpp on Raven, will be filtered on format in
4080 * dm_plane_format_mod_supported.
4081 */
4082 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4083 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4084 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4085 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4086 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4087
4088 if (adev->family == AMDGPU_FAMILY_RV) {
4089 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4090 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4091 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4092 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4093 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4094 }
4095
4096 /*
4097 * Only supported for 64bpp on Raven, will be filtered on format in
4098 * dm_plane_format_mod_supported.
4099 */
4100 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4101 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4102 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4103
4104 if (adev->family == AMDGPU_FAMILY_RV) {
4105 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4106 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4107 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4108 }
4109}
4110
4111static void
4112add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4113 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4114{
4115 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4116
4117 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4118 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4119 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4120 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4121 AMD_FMT_MOD_SET(DCC, 1) |
4122 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4123 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4124 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4125
4126 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4127 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4128 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4129 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4130 AMD_FMT_MOD_SET(DCC, 1) |
4131 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4132 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4133 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4134 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4135
4136 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4137 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4138 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4139 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4140
4141 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4142 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4143 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4144 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4145
4146
4147 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4148 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4149 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4150 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4151
4152 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4153 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4154 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4155}
4156
4157static void
4158add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4159 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4160{
4161 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4162 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4163
4164 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4166 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4167 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4169 AMD_FMT_MOD_SET(DCC, 1) |
4170 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4171 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4172 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4173 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4174
4175 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4176 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4177 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4178 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4179 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4180 AMD_FMT_MOD_SET(DCC, 1) |
4181 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4182 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4183 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4184 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4185 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4186
4187 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4188 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4189 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4190 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4191 AMD_FMT_MOD_SET(PACKERS, pkrs));
4192
4193 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4194 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4195 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4196 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4197 AMD_FMT_MOD_SET(PACKERS, pkrs));
4198
4199 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4200 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4201 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4202 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4203
4204 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4205 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4206 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4207}
4208
4209static int
4210get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4211{
4212 uint64_t size = 0, capacity = 128;
4213 *mods = NULL;
4214
4215 /* We have not hooked up any pre-GFX9 modifiers. */
4216 if (adev->family < AMDGPU_FAMILY_AI)
4217 return 0;
4218
4219 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4220
4221 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4222 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4223 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4224 return *mods ? 0 : -ENOMEM;
4225 }
4226
4227 switch (adev->family) {
4228 case AMDGPU_FAMILY_AI:
4229 case AMDGPU_FAMILY_RV:
4230 add_gfx9_modifiers(adev, mods, &size, &capacity);
4231 break;
4232 case AMDGPU_FAMILY_NV:
4233 case AMDGPU_FAMILY_VGH:
4234 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4235 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4236 else
4237 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4238 break;
4239 }
4240
4241 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4242
4243 /* INVALID marks the end of the list. */
4244 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4245
4246 if (!*mods)
4247 return -ENOMEM;
4248
4249 return 0;
4250}
4251
37384b3f
BN
4252static int
4253fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4254 const struct amdgpu_framebuffer *afb,
4255 const enum surface_pixel_format format,
4256 const enum dc_rotation_angle rotation,
4257 const struct plane_size *plane_size,
4258 union dc_tiling_info *tiling_info,
4259 struct dc_plane_dcc_param *dcc,
4260 struct dc_plane_address *address,
4261 const bool force_disable_dcc)
4262{
4263 const uint64_t modifier = afb->base.modifier;
4264 int ret;
4265
4266 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4267 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4268
4269 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4270 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4271
4272 dcc->enable = 1;
4273 dcc->meta_pitch = afb->base.pitches[1];
4274 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4275
4276 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4277 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4278 }
4279
4280 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4281 if (ret)
4282 return ret;
7df7e505 4283
09e5665a
NK
4284 return 0;
4285}
4286
4287static int
320932bf 4288fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4289 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4290 const enum surface_pixel_format format,
4291 const enum dc_rotation_angle rotation,
4292 const uint64_t tiling_flags,
09e5665a 4293 union dc_tiling_info *tiling_info,
12e2b2d4 4294 struct plane_size *plane_size,
09e5665a 4295 struct dc_plane_dcc_param *dcc,
87b7ebc2 4296 struct dc_plane_address *address,
5888f07a 4297 bool tmz_surface,
87b7ebc2 4298 bool force_disable_dcc)
09e5665a 4299{
320932bf 4300 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4301 int ret;
4302
4303 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4304 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4305 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4306 memset(address, 0, sizeof(*address));
4307
5888f07a
HW
4308 address->tmz_surface = tmz_surface;
4309
695af5f9 4310 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4311 uint64_t addr = afb->address + fb->offsets[0];
4312
12e2b2d4
DL
4313 plane_size->surface_size.x = 0;
4314 plane_size->surface_size.y = 0;
4315 plane_size->surface_size.width = fb->width;
4316 plane_size->surface_size.height = fb->height;
4317 plane_size->surface_pitch =
320932bf
NK
4318 fb->pitches[0] / fb->format->cpp[0];
4319
e0634e8d 4320 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4321 address->grph.addr.low_part = lower_32_bits(addr);
4322 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4323 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4324 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4325 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4326
12e2b2d4
DL
4327 plane_size->surface_size.x = 0;
4328 plane_size->surface_size.y = 0;
4329 plane_size->surface_size.width = fb->width;
4330 plane_size->surface_size.height = fb->height;
4331 plane_size->surface_pitch =
320932bf
NK
4332 fb->pitches[0] / fb->format->cpp[0];
4333
12e2b2d4
DL
4334 plane_size->chroma_size.x = 0;
4335 plane_size->chroma_size.y = 0;
320932bf 4336 /* TODO: set these based on surface format */
12e2b2d4
DL
4337 plane_size->chroma_size.width = fb->width / 2;
4338 plane_size->chroma_size.height = fb->height / 2;
320932bf 4339
12e2b2d4 4340 plane_size->chroma_pitch =
320932bf
NK
4341 fb->pitches[1] / fb->format->cpp[1];
4342
e0634e8d
NK
4343 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4344 address->video_progressive.luma_addr.low_part =
be7b9b32 4345 lower_32_bits(luma_addr);
e0634e8d 4346 address->video_progressive.luma_addr.high_part =
be7b9b32 4347 upper_32_bits(luma_addr);
e0634e8d
NK
4348 address->video_progressive.chroma_addr.low_part =
4349 lower_32_bits(chroma_addr);
4350 address->video_progressive.chroma_addr.high_part =
4351 upper_32_bits(chroma_addr);
4352 }
09e5665a 4353
a3241991 4354 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4355 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4356 rotation, plane_size,
4357 tiling_info, dcc,
4358 address,
4359 force_disable_dcc);
09e5665a
NK
4360 if (ret)
4361 return ret;
a3241991
BN
4362 } else {
4363 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4364 }
4365
4366 return 0;
7df7e505
NK
4367}
4368
d74004b6 4369static void
695af5f9 4370fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4371 bool *per_pixel_alpha, bool *global_alpha,
4372 int *global_alpha_value)
4373{
4374 *per_pixel_alpha = false;
4375 *global_alpha = false;
4376 *global_alpha_value = 0xff;
4377
4378 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4379 return;
4380
4381 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4382 static const uint32_t alpha_formats[] = {
4383 DRM_FORMAT_ARGB8888,
4384 DRM_FORMAT_RGBA8888,
4385 DRM_FORMAT_ABGR8888,
4386 };
4387 uint32_t format = plane_state->fb->format->format;
4388 unsigned int i;
4389
4390 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4391 if (format == alpha_formats[i]) {
4392 *per_pixel_alpha = true;
4393 break;
4394 }
4395 }
4396 }
4397
4398 if (plane_state->alpha < 0xffff) {
4399 *global_alpha = true;
4400 *global_alpha_value = plane_state->alpha >> 8;
4401 }
4402}
4403
004fefa3
NK
4404static int
4405fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4406 const enum surface_pixel_format format,
004fefa3
NK
4407 enum dc_color_space *color_space)
4408{
4409 bool full_range;
4410
4411 *color_space = COLOR_SPACE_SRGB;
4412
4413 /* DRM color properties only affect non-RGB formats. */
695af5f9 4414 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4415 return 0;
4416
4417 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4418
4419 switch (plane_state->color_encoding) {
4420 case DRM_COLOR_YCBCR_BT601:
4421 if (full_range)
4422 *color_space = COLOR_SPACE_YCBCR601;
4423 else
4424 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4425 break;
4426
4427 case DRM_COLOR_YCBCR_BT709:
4428 if (full_range)
4429 *color_space = COLOR_SPACE_YCBCR709;
4430 else
4431 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4432 break;
4433
4434 case DRM_COLOR_YCBCR_BT2020:
4435 if (full_range)
4436 *color_space = COLOR_SPACE_2020_YCBCR;
4437 else
4438 return -EINVAL;
4439 break;
4440
4441 default:
4442 return -EINVAL;
4443 }
4444
4445 return 0;
4446}
4447
695af5f9
NK
4448static int
4449fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4450 const struct drm_plane_state *plane_state,
4451 const uint64_t tiling_flags,
4452 struct dc_plane_info *plane_info,
87b7ebc2 4453 struct dc_plane_address *address,
5888f07a 4454 bool tmz_surface,
87b7ebc2 4455 bool force_disable_dcc)
695af5f9
NK
4456{
4457 const struct drm_framebuffer *fb = plane_state->fb;
4458 const struct amdgpu_framebuffer *afb =
4459 to_amdgpu_framebuffer(plane_state->fb);
4460 struct drm_format_name_buf format_name;
4461 int ret;
4462
4463 memset(plane_info, 0, sizeof(*plane_info));
4464
4465 switch (fb->format->format) {
4466 case DRM_FORMAT_C8:
4467 plane_info->format =
4468 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4469 break;
4470 case DRM_FORMAT_RGB565:
4471 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4472 break;
4473 case DRM_FORMAT_XRGB8888:
4474 case DRM_FORMAT_ARGB8888:
4475 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4476 break;
4477 case DRM_FORMAT_XRGB2101010:
4478 case DRM_FORMAT_ARGB2101010:
4479 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4480 break;
4481 case DRM_FORMAT_XBGR2101010:
4482 case DRM_FORMAT_ABGR2101010:
4483 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4484 break;
4485 case DRM_FORMAT_XBGR8888:
4486 case DRM_FORMAT_ABGR8888:
4487 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4488 break;
4489 case DRM_FORMAT_NV21:
4490 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4491 break;
4492 case DRM_FORMAT_NV12:
4493 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4494 break;
cbec6477
SW
4495 case DRM_FORMAT_P010:
4496 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4497 break;
492548dc
SW
4498 case DRM_FORMAT_XRGB16161616F:
4499 case DRM_FORMAT_ARGB16161616F:
4500 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4501 break;
2a5195dc
MK
4502 case DRM_FORMAT_XBGR16161616F:
4503 case DRM_FORMAT_ABGR16161616F:
4504 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4505 break;
695af5f9
NK
4506 default:
4507 DRM_ERROR(
4508 "Unsupported screen format %s\n",
4509 drm_get_format_name(fb->format->format, &format_name));
4510 return -EINVAL;
4511 }
4512
4513 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4514 case DRM_MODE_ROTATE_0:
4515 plane_info->rotation = ROTATION_ANGLE_0;
4516 break;
4517 case DRM_MODE_ROTATE_90:
4518 plane_info->rotation = ROTATION_ANGLE_90;
4519 break;
4520 case DRM_MODE_ROTATE_180:
4521 plane_info->rotation = ROTATION_ANGLE_180;
4522 break;
4523 case DRM_MODE_ROTATE_270:
4524 plane_info->rotation = ROTATION_ANGLE_270;
4525 break;
4526 default:
4527 plane_info->rotation = ROTATION_ANGLE_0;
4528 break;
4529 }
4530
4531 plane_info->visible = true;
4532 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4533
6d83a32d
MS
4534 plane_info->layer_index = 0;
4535
695af5f9
NK
4536 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4537 &plane_info->color_space);
4538 if (ret)
4539 return ret;
4540
4541 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4542 plane_info->rotation, tiling_flags,
4543 &plane_info->tiling_info,
4544 &plane_info->plane_size,
5888f07a 4545 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4546 force_disable_dcc);
695af5f9
NK
4547 if (ret)
4548 return ret;
4549
4550 fill_blending_from_plane_state(
4551 plane_state, &plane_info->per_pixel_alpha,
4552 &plane_info->global_alpha, &plane_info->global_alpha_value);
4553
4554 return 0;
4555}
4556
4557static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4558 struct dc_plane_state *dc_plane_state,
4559 struct drm_plane_state *plane_state,
4560 struct drm_crtc_state *crtc_state)
e7b07cee 4561{
cf020d49 4562 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4563 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4564 struct dc_scaling_info scaling_info;
4565 struct dc_plane_info plane_info;
695af5f9 4566 int ret;
87b7ebc2 4567 bool force_disable_dcc = false;
e7b07cee 4568
695af5f9
NK
4569 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4570 if (ret)
4571 return ret;
e7b07cee 4572
695af5f9
NK
4573 dc_plane_state->src_rect = scaling_info.src_rect;
4574 dc_plane_state->dst_rect = scaling_info.dst_rect;
4575 dc_plane_state->clip_rect = scaling_info.clip_rect;
4576 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4577
87b7ebc2 4578 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4579 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4580 afb->tiling_flags,
695af5f9 4581 &plane_info,
87b7ebc2 4582 &dc_plane_state->address,
6eed95b0 4583 afb->tmz_surface,
87b7ebc2 4584 force_disable_dcc);
004fefa3
NK
4585 if (ret)
4586 return ret;
4587
695af5f9
NK
4588 dc_plane_state->format = plane_info.format;
4589 dc_plane_state->color_space = plane_info.color_space;
4590 dc_plane_state->format = plane_info.format;
4591 dc_plane_state->plane_size = plane_info.plane_size;
4592 dc_plane_state->rotation = plane_info.rotation;
4593 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4594 dc_plane_state->stereo_format = plane_info.stereo_format;
4595 dc_plane_state->tiling_info = plane_info.tiling_info;
4596 dc_plane_state->visible = plane_info.visible;
4597 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4598 dc_plane_state->global_alpha = plane_info.global_alpha;
4599 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4600 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4601 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4602
e277adc5
LSL
4603 /*
4604 * Always set input transfer function, since plane state is refreshed
4605 * every time.
4606 */
cf020d49
NK
4607 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4608 if (ret)
4609 return ret;
e7b07cee 4610
cf020d49 4611 return 0;
e7b07cee
HW
4612}
4613
3ee6b26b
AD
4614static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4615 const struct dm_connector_state *dm_state,
4616 struct dc_stream_state *stream)
e7b07cee
HW
4617{
4618 enum amdgpu_rmx_type rmx_type;
4619
4620 struct rect src = { 0 }; /* viewport in composition space*/
4621 struct rect dst = { 0 }; /* stream addressable area */
4622
4623 /* no mode. nothing to be done */
4624 if (!mode)
4625 return;
4626
4627 /* Full screen scaling by default */
4628 src.width = mode->hdisplay;
4629 src.height = mode->vdisplay;
4630 dst.width = stream->timing.h_addressable;
4631 dst.height = stream->timing.v_addressable;
4632
f4791779
HW
4633 if (dm_state) {
4634 rmx_type = dm_state->scaling;
4635 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4636 if (src.width * dst.height <
4637 src.height * dst.width) {
4638 /* height needs less upscaling/more downscaling */
4639 dst.width = src.width *
4640 dst.height / src.height;
4641 } else {
4642 /* width needs less upscaling/more downscaling */
4643 dst.height = src.height *
4644 dst.width / src.width;
4645 }
4646 } else if (rmx_type == RMX_CENTER) {
4647 dst = src;
e7b07cee 4648 }
e7b07cee 4649
f4791779
HW
4650 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4651 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4652
f4791779
HW
4653 if (dm_state->underscan_enable) {
4654 dst.x += dm_state->underscan_hborder / 2;
4655 dst.y += dm_state->underscan_vborder / 2;
4656 dst.width -= dm_state->underscan_hborder;
4657 dst.height -= dm_state->underscan_vborder;
4658 }
e7b07cee
HW
4659 }
4660
4661 stream->src = src;
4662 stream->dst = dst;
4663
f1ad2f5e 4664 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4665 dst.x, dst.y, dst.width, dst.height);
4666
4667}
4668
3ee6b26b 4669static enum dc_color_depth
42ba01fc 4670convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4671 bool is_y420, int requested_bpc)
e7b07cee 4672{
1bc22f20 4673 uint8_t bpc;
01c22997 4674
1bc22f20
SW
4675 if (is_y420) {
4676 bpc = 8;
4677
4678 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4679 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4680 bpc = 16;
4681 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4682 bpc = 12;
4683 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4684 bpc = 10;
4685 } else {
4686 bpc = (uint8_t)connector->display_info.bpc;
4687 /* Assume 8 bpc by default if no bpc is specified. */
4688 bpc = bpc ? bpc : 8;
4689 }
e7b07cee 4690
cbd14ae7 4691 if (requested_bpc > 0) {
01c22997
NK
4692 /*
4693 * Cap display bpc based on the user requested value.
4694 *
4695 * The value for state->max_bpc may not correctly updated
4696 * depending on when the connector gets added to the state
4697 * or if this was called outside of atomic check, so it
4698 * can't be used directly.
4699 */
cbd14ae7 4700 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4701
1825fd34
NK
4702 /* Round down to the nearest even number. */
4703 bpc = bpc - (bpc & 1);
4704 }
07e3a1cf 4705
e7b07cee
HW
4706 switch (bpc) {
4707 case 0:
1f6010a9
DF
4708 /*
4709 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4710 * EDID revision before 1.4
4711 * TODO: Fix edid parsing
4712 */
4713 return COLOR_DEPTH_888;
4714 case 6:
4715 return COLOR_DEPTH_666;
4716 case 8:
4717 return COLOR_DEPTH_888;
4718 case 10:
4719 return COLOR_DEPTH_101010;
4720 case 12:
4721 return COLOR_DEPTH_121212;
4722 case 14:
4723 return COLOR_DEPTH_141414;
4724 case 16:
4725 return COLOR_DEPTH_161616;
4726 default:
4727 return COLOR_DEPTH_UNDEFINED;
4728 }
4729}
4730
3ee6b26b
AD
4731static enum dc_aspect_ratio
4732get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4733{
e11d4147
LSL
4734 /* 1-1 mapping, since both enums follow the HDMI spec. */
4735 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4736}
4737
3ee6b26b
AD
4738static enum dc_color_space
4739get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4740{
4741 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4742
4743 switch (dc_crtc_timing->pixel_encoding) {
4744 case PIXEL_ENCODING_YCBCR422:
4745 case PIXEL_ENCODING_YCBCR444:
4746 case PIXEL_ENCODING_YCBCR420:
4747 {
4748 /*
4749 * 27030khz is the separation point between HDTV and SDTV
4750 * according to HDMI spec, we use YCbCr709 and YCbCr601
4751 * respectively
4752 */
380604e2 4753 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4754 if (dc_crtc_timing->flags.Y_ONLY)
4755 color_space =
4756 COLOR_SPACE_YCBCR709_LIMITED;
4757 else
4758 color_space = COLOR_SPACE_YCBCR709;
4759 } else {
4760 if (dc_crtc_timing->flags.Y_ONLY)
4761 color_space =
4762 COLOR_SPACE_YCBCR601_LIMITED;
4763 else
4764 color_space = COLOR_SPACE_YCBCR601;
4765 }
4766
4767 }
4768 break;
4769 case PIXEL_ENCODING_RGB:
4770 color_space = COLOR_SPACE_SRGB;
4771 break;
4772
4773 default:
4774 WARN_ON(1);
4775 break;
4776 }
4777
4778 return color_space;
4779}
4780
ea117312
TA
4781static bool adjust_colour_depth_from_display_info(
4782 struct dc_crtc_timing *timing_out,
4783 const struct drm_display_info *info)
400443e8 4784{
ea117312 4785 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4786 int normalized_clk;
400443e8 4787 do {
380604e2 4788 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4789 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4790 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4791 normalized_clk /= 2;
4792 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4793 switch (depth) {
4794 case COLOR_DEPTH_888:
4795 break;
400443e8
ML
4796 case COLOR_DEPTH_101010:
4797 normalized_clk = (normalized_clk * 30) / 24;
4798 break;
4799 case COLOR_DEPTH_121212:
4800 normalized_clk = (normalized_clk * 36) / 24;
4801 break;
4802 case COLOR_DEPTH_161616:
4803 normalized_clk = (normalized_clk * 48) / 24;
4804 break;
4805 default:
ea117312
TA
4806 /* The above depths are the only ones valid for HDMI. */
4807 return false;
400443e8 4808 }
ea117312
TA
4809 if (normalized_clk <= info->max_tmds_clock) {
4810 timing_out->display_color_depth = depth;
4811 return true;
4812 }
4813 } while (--depth > COLOR_DEPTH_666);
4814 return false;
400443e8 4815}
e7b07cee 4816
42ba01fc
NK
4817static void fill_stream_properties_from_drm_display_mode(
4818 struct dc_stream_state *stream,
4819 const struct drm_display_mode *mode_in,
4820 const struct drm_connector *connector,
4821 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4822 const struct dc_stream_state *old_stream,
4823 int requested_bpc)
e7b07cee
HW
4824{
4825 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4826 const struct drm_display_info *info = &connector->display_info;
d4252eee 4827 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4828 struct hdmi_vendor_infoframe hv_frame;
4829 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4830
acf83f86
WL
4831 memset(&hv_frame, 0, sizeof(hv_frame));
4832 memset(&avi_frame, 0, sizeof(avi_frame));
4833
e7b07cee
HW
4834 timing_out->h_border_left = 0;
4835 timing_out->h_border_right = 0;
4836 timing_out->v_border_top = 0;
4837 timing_out->v_border_bottom = 0;
4838 /* TODO: un-hardcode */
fe61a2f1 4839 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4840 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4841 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4842 else if (drm_mode_is_420_also(info, mode_in)
4843 && aconnector->force_yuv420_output)
4844 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4845 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4846 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4847 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4848 else
4849 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4850
4851 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4852 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4853 connector,
4854 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4855 requested_bpc);
e7b07cee
HW
4856 timing_out->scan_type = SCANNING_TYPE_NODATA;
4857 timing_out->hdmi_vic = 0;
b333730d
BL
4858
4859 if(old_stream) {
4860 timing_out->vic = old_stream->timing.vic;
4861 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4862 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4863 } else {
4864 timing_out->vic = drm_match_cea_mode(mode_in);
4865 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4866 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4867 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4868 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4869 }
e7b07cee 4870
1cb1d477
WL
4871 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4872 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4873 timing_out->vic = avi_frame.video_code;
4874 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4875 timing_out->hdmi_vic = hv_frame.vic;
4876 }
4877
e7b07cee
HW
4878 timing_out->h_addressable = mode_in->crtc_hdisplay;
4879 timing_out->h_total = mode_in->crtc_htotal;
4880 timing_out->h_sync_width =
4881 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4882 timing_out->h_front_porch =
4883 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4884 timing_out->v_total = mode_in->crtc_vtotal;
4885 timing_out->v_addressable = mode_in->crtc_vdisplay;
4886 timing_out->v_front_porch =
4887 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4888 timing_out->v_sync_width =
4889 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4890 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4891 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4892
4893 stream->output_color_space = get_output_color_space(timing_out);
4894
e43a432c
AK
4895 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4896 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4897 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4898 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4899 drm_mode_is_420_also(info, mode_in) &&
4900 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4901 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4902 adjust_colour_depth_from_display_info(timing_out, info);
4903 }
4904 }
e7b07cee
HW
4905}
4906
3ee6b26b
AD
4907static void fill_audio_info(struct audio_info *audio_info,
4908 const struct drm_connector *drm_connector,
4909 const struct dc_sink *dc_sink)
e7b07cee
HW
4910{
4911 int i = 0;
4912 int cea_revision = 0;
4913 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4914
4915 audio_info->manufacture_id = edid_caps->manufacturer_id;
4916 audio_info->product_id = edid_caps->product_id;
4917
4918 cea_revision = drm_connector->display_info.cea_rev;
4919
090afc1e 4920 strscpy(audio_info->display_name,
d2b2562c 4921 edid_caps->display_name,
090afc1e 4922 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4923
b830ebc9 4924 if (cea_revision >= 3) {
e7b07cee
HW
4925 audio_info->mode_count = edid_caps->audio_mode_count;
4926
4927 for (i = 0; i < audio_info->mode_count; ++i) {
4928 audio_info->modes[i].format_code =
4929 (enum audio_format_code)
4930 (edid_caps->audio_modes[i].format_code);
4931 audio_info->modes[i].channel_count =
4932 edid_caps->audio_modes[i].channel_count;
4933 audio_info->modes[i].sample_rates.all =
4934 edid_caps->audio_modes[i].sample_rate;
4935 audio_info->modes[i].sample_size =
4936 edid_caps->audio_modes[i].sample_size;
4937 }
4938 }
4939
4940 audio_info->flags.all = edid_caps->speaker_flags;
4941
4942 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4943 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4944 audio_info->video_latency = drm_connector->video_latency[0];
4945 audio_info->audio_latency = drm_connector->audio_latency[0];
4946 }
4947
4948 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4949
4950}
4951
3ee6b26b
AD
4952static void
4953copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4954 struct drm_display_mode *dst_mode)
e7b07cee
HW
4955{
4956 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4957 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4958 dst_mode->crtc_clock = src_mode->crtc_clock;
4959 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4960 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4961 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4962 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4963 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4964 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4965 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4966 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4967 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4968 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4969 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4970}
4971
3ee6b26b
AD
4972static void
4973decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4974 const struct drm_display_mode *native_mode,
4975 bool scale_enabled)
e7b07cee
HW
4976{
4977 if (scale_enabled) {
4978 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4979 } else if (native_mode->clock == drm_mode->clock &&
4980 native_mode->htotal == drm_mode->htotal &&
4981 native_mode->vtotal == drm_mode->vtotal) {
4982 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4983 } else {
4984 /* no scaling nor amdgpu inserted, no need to patch */
4985 }
4986}
4987
aed15309
ML
4988static struct dc_sink *
4989create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4990{
2e0ac3d6 4991 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4992 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4993 sink_init_data.link = aconnector->dc_link;
4994 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4995
4996 sink = dc_sink_create(&sink_init_data);
423788c7 4997 if (!sink) {
2e0ac3d6 4998 DRM_ERROR("Failed to create sink!\n");
aed15309 4999 return NULL;
423788c7 5000 }
2e0ac3d6 5001 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5002
aed15309 5003 return sink;
2e0ac3d6
HW
5004}
5005
fa2123db
ML
5006static void set_multisync_trigger_params(
5007 struct dc_stream_state *stream)
5008{
5009 if (stream->triggered_crtc_reset.enabled) {
5010 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5011 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5012 }
5013}
5014
5015static void set_master_stream(struct dc_stream_state *stream_set[],
5016 int stream_count)
5017{
5018 int j, highest_rfr = 0, master_stream = 0;
5019
5020 for (j = 0; j < stream_count; j++) {
5021 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5022 int refresh_rate = 0;
5023
380604e2 5024 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5025 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5026 if (refresh_rate > highest_rfr) {
5027 highest_rfr = refresh_rate;
5028 master_stream = j;
5029 }
5030 }
5031 }
5032 for (j = 0; j < stream_count; j++) {
03736f4c 5033 if (stream_set[j])
fa2123db
ML
5034 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5035 }
5036}
5037
5038static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5039{
5040 int i = 0;
5041
5042 if (context->stream_count < 2)
5043 return;
5044 for (i = 0; i < context->stream_count ; i++) {
5045 if (!context->streams[i])
5046 continue;
1f6010a9
DF
5047 /*
5048 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5049 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5050 * For now it's set to false
fa2123db
ML
5051 */
5052 set_multisync_trigger_params(context->streams[i]);
5053 }
5054 set_master_stream(context->streams, context->stream_count);
5055}
5056
3ee6b26b
AD
5057static struct dc_stream_state *
5058create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5059 const struct drm_display_mode *drm_mode,
b333730d 5060 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5061 const struct dc_stream_state *old_stream,
5062 int requested_bpc)
e7b07cee
HW
5063{
5064 struct drm_display_mode *preferred_mode = NULL;
391ef035 5065 struct drm_connector *drm_connector;
42ba01fc
NK
5066 const struct drm_connector_state *con_state =
5067 dm_state ? &dm_state->base : NULL;
0971c40e 5068 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5069 struct drm_display_mode mode = *drm_mode;
5070 bool native_mode_found = false;
b333730d
BL
5071 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5072 int mode_refresh;
58124bf8 5073 int preferred_refresh = 0;
defeb878 5074#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 5075 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 5076#endif
df2f1015 5077 uint32_t link_bandwidth_kbps;
b333730d 5078
aed15309 5079 struct dc_sink *sink = NULL;
b830ebc9 5080 if (aconnector == NULL) {
e7b07cee 5081 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5082 return stream;
e7b07cee
HW
5083 }
5084
e7b07cee 5085 drm_connector = &aconnector->base;
2e0ac3d6 5086
f4ac176e 5087 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5088 sink = create_fake_sink(aconnector);
5089 if (!sink)
5090 return stream;
aed15309
ML
5091 } else {
5092 sink = aconnector->dc_sink;
dcd5fb82 5093 dc_sink_retain(sink);
f4ac176e 5094 }
2e0ac3d6 5095
aed15309 5096 stream = dc_create_stream_for_sink(sink);
4562236b 5097
b830ebc9 5098 if (stream == NULL) {
e7b07cee 5099 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5100 goto finish;
e7b07cee
HW
5101 }
5102
ceb3dbb4
JL
5103 stream->dm_stream_context = aconnector;
5104
4a36fcba
WL
5105 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5106 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5107
e7b07cee
HW
5108 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5109 /* Search for preferred mode */
5110 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5111 native_mode_found = true;
5112 break;
5113 }
5114 }
5115 if (!native_mode_found)
5116 preferred_mode = list_first_entry_or_null(
5117 &aconnector->base.modes,
5118 struct drm_display_mode,
5119 head);
5120
b333730d
BL
5121 mode_refresh = drm_mode_vrefresh(&mode);
5122
b830ebc9 5123 if (preferred_mode == NULL) {
1f6010a9
DF
5124 /*
5125 * This may not be an error, the use case is when we have no
e7b07cee
HW
5126 * usermode calls to reset and set mode upon hotplug. In this
5127 * case, we call set mode ourselves to restore the previous mode
5128 * and the modelist may not be filled in in time.
5129 */
f1ad2f5e 5130 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
5131 } else {
5132 decide_crtc_timing_for_drm_display_mode(
5133 &mode, preferred_mode,
f4791779 5134 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5135 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5136 }
5137
f783577c
JFZ
5138 if (!dm_state)
5139 drm_mode_set_crtcinfo(&mode, 0);
5140
b333730d
BL
5141 /*
5142 * If scaling is enabled and refresh rate didn't change
5143 * we copy the vic and polarities of the old timings
5144 */
5145 if (!scale || mode_refresh != preferred_refresh)
5146 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5147 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
5148 else
5149 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5150 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5151
df2f1015
DF
5152 stream->timing.flags.DSC = 0;
5153
5154 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5155#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5156 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5157 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5158 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015 5159 &dsc_caps);
defeb878 5160#endif
df2f1015
DF
5161 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5162 dc_link_get_link_cap(aconnector->dc_link));
5163
defeb878 5164#if defined(CONFIG_DRM_AMD_DC_DCN)
0749ddeb 5165 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5166 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5167 dc_dsc_policy_set_enable_dsc_when_not_needed(
5168 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5169
0417df16 5170 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5171 &dsc_caps,
0417df16 5172 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5173 0,
df2f1015
DF
5174 link_bandwidth_kbps,
5175 &stream->timing,
5176 &stream->timing.dsc_cfg))
5177 stream->timing.flags.DSC = 1;
27e84dd7 5178 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5179 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5180 stream->timing.flags.DSC = 1;
734e4c97 5181
28b2f656
EB
5182 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5183 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5184
28b2f656
EB
5185 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5186 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5187
5188 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5189 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5190 }
39a4eb85 5191#endif
df2f1015 5192 }
39a4eb85 5193
e7b07cee
HW
5194 update_stream_scaling_settings(&mode, dm_state, stream);
5195
5196 fill_audio_info(
5197 &stream->audio_info,
5198 drm_connector,
aed15309 5199 sink);
e7b07cee 5200
ceb3dbb4 5201 update_stream_signal(stream, sink);
9182b4cb 5202
d832fc3b 5203 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5204 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5205
8a488f5d
RL
5206 if (stream->link->psr_settings.psr_feature_enabled) {
5207 //
5208 // should decide stream support vsc sdp colorimetry capability
5209 // before building vsc info packet
5210 //
5211 stream->use_vsc_sdp_for_colorimetry = false;
5212 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5213 stream->use_vsc_sdp_for_colorimetry =
5214 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5215 } else {
5216 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5217 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5218 }
8a488f5d 5219 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5220 }
aed15309 5221finish:
dcd5fb82 5222 dc_sink_release(sink);
9e3efe3e 5223
e7b07cee
HW
5224 return stream;
5225}
5226
7578ecda 5227static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5228{
5229 drm_crtc_cleanup(crtc);
5230 kfree(crtc);
5231}
5232
5233static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5234 struct drm_crtc_state *state)
e7b07cee
HW
5235{
5236 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5237
5238 /* TODO Destroy dc_stream objects are stream object is flattened */
5239 if (cur->stream)
5240 dc_stream_release(cur->stream);
5241
5242
5243 __drm_atomic_helper_crtc_destroy_state(state);
5244
5245
5246 kfree(state);
5247}
5248
5249static void dm_crtc_reset_state(struct drm_crtc *crtc)
5250{
5251 struct dm_crtc_state *state;
5252
5253 if (crtc->state)
5254 dm_crtc_destroy_state(crtc, crtc->state);
5255
5256 state = kzalloc(sizeof(*state), GFP_KERNEL);
5257 if (WARN_ON(!state))
5258 return;
5259
1f8a52ec 5260 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5261}
5262
5263static struct drm_crtc_state *
5264dm_crtc_duplicate_state(struct drm_crtc *crtc)
5265{
5266 struct dm_crtc_state *state, *cur;
5267
5268 cur = to_dm_crtc_state(crtc->state);
5269
5270 if (WARN_ON(!crtc->state))
5271 return NULL;
5272
2004f45e 5273 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5274 if (!state)
5275 return NULL;
e7b07cee
HW
5276
5277 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5278
5279 if (cur->stream) {
5280 state->stream = cur->stream;
5281 dc_stream_retain(state->stream);
5282 }
5283
d6ef9b41 5284 state->active_planes = cur->active_planes;
98e6436d 5285 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5286 state->abm_level = cur->abm_level;
bb47de73
NK
5287 state->vrr_supported = cur->vrr_supported;
5288 state->freesync_config = cur->freesync_config;
14b25846 5289 state->crc_src = cur->crc_src;
cf020d49
NK
5290 state->cm_has_degamma = cur->cm_has_degamma;
5291 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
c920888c
WL
5292#ifdef CONFIG_DEBUG_FS
5293 state->crc_window = cur->crc_window;
5294#endif
e7b07cee
HW
5295 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5296
5297 return &state->base;
5298}
5299
c920888c
WL
5300#ifdef CONFIG_DEBUG_FS
5301int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5302 struct drm_crtc_state *crtc_state,
5303 struct drm_property *property,
5304 uint64_t val)
5305{
5306 struct drm_device *dev = crtc->dev;
5307 struct amdgpu_device *adev = drm_to_adev(dev);
5308 struct dm_crtc_state *dm_new_state =
5309 to_dm_crtc_state(crtc_state);
5310
5311 if (property == adev->dm.crc_win_x_start_property)
5312 dm_new_state->crc_window.x_start = val;
5313 else if (property == adev->dm.crc_win_y_start_property)
5314 dm_new_state->crc_window.y_start = val;
5315 else if (property == adev->dm.crc_win_x_end_property)
5316 dm_new_state->crc_window.x_end = val;
5317 else if (property == adev->dm.crc_win_y_end_property)
5318 dm_new_state->crc_window.y_end = val;
5319 else
5320 return -EINVAL;
5321
5322 return 0;
5323}
5324
5325int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5326 const struct drm_crtc_state *state,
5327 struct drm_property *property,
5328 uint64_t *val)
5329{
5330 struct drm_device *dev = crtc->dev;
5331 struct amdgpu_device *adev = drm_to_adev(dev);
5332 struct dm_crtc_state *dm_state =
5333 to_dm_crtc_state(state);
5334
5335 if (property == adev->dm.crc_win_x_start_property)
5336 *val = dm_state->crc_window.x_start;
5337 else if (property == adev->dm.crc_win_y_start_property)
5338 *val = dm_state->crc_window.y_start;
5339 else if (property == adev->dm.crc_win_x_end_property)
5340 *val = dm_state->crc_window.x_end;
5341 else if (property == adev->dm.crc_win_y_end_property)
5342 *val = dm_state->crc_window.y_end;
5343 else
5344 return -EINVAL;
5345
5346 return 0;
5347}
5348#endif
5349
d2574c33
MK
5350static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5351{
5352 enum dc_irq_source irq_source;
5353 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5354 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5355 int rc;
5356
5357 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5358
5359 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5360
5361 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5362 acrtc->crtc_id, enable ? "en" : "dis", rc);
5363 return rc;
5364}
589d2739
HW
5365
5366static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5367{
5368 enum dc_irq_source irq_source;
5369 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5370 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5371 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5372 int rc = 0;
5373
5374 if (enable) {
5375 /* vblank irq on -> Only need vupdate irq in vrr mode */
5376 if (amdgpu_dm_vrr_active(acrtc_state))
5377 rc = dm_set_vupdate_irq(crtc, true);
5378 } else {
5379 /* vblank irq off -> vupdate irq off */
5380 rc = dm_set_vupdate_irq(crtc, false);
5381 }
5382
5383 if (rc)
5384 return rc;
589d2739
HW
5385
5386 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 5387 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
5388}
5389
5390static int dm_enable_vblank(struct drm_crtc *crtc)
5391{
5392 return dm_set_vblank(crtc, true);
5393}
5394
5395static void dm_disable_vblank(struct drm_crtc *crtc)
5396{
5397 dm_set_vblank(crtc, false);
5398}
5399
e7b07cee
HW
5400/* Implemented only the options currently availible for the driver */
5401static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5402 .reset = dm_crtc_reset_state,
5403 .destroy = amdgpu_dm_crtc_destroy,
5404 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5405 .set_config = drm_atomic_helper_set_config,
5406 .page_flip = drm_atomic_helper_page_flip,
5407 .atomic_duplicate_state = dm_crtc_duplicate_state,
5408 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5409 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5410 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5411 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5412 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5413 .enable_vblank = dm_enable_vblank,
5414 .disable_vblank = dm_disable_vblank,
e3eff4b5 5415 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
c920888c
WL
5416#ifdef CONFIG_DEBUG_FS
5417 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5418 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5419#endif
e7b07cee
HW
5420};
5421
5422static enum drm_connector_status
5423amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5424{
5425 bool connected;
c84dec2f 5426 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5427
1f6010a9
DF
5428 /*
5429 * Notes:
e7b07cee
HW
5430 * 1. This interface is NOT called in context of HPD irq.
5431 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5432 * makes it a bad place for *any* MST-related activity.
5433 */
e7b07cee 5434
8580d60b
HW
5435 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5436 !aconnector->fake_enable)
e7b07cee
HW
5437 connected = (aconnector->dc_sink != NULL);
5438 else
5439 connected = (aconnector->base.force == DRM_FORCE_ON);
5440
0f877894
OV
5441 update_subconnector_property(aconnector);
5442
e7b07cee
HW
5443 return (connected ? connector_status_connected :
5444 connector_status_disconnected);
5445}
5446
3ee6b26b
AD
5447int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5448 struct drm_connector_state *connector_state,
5449 struct drm_property *property,
5450 uint64_t val)
e7b07cee
HW
5451{
5452 struct drm_device *dev = connector->dev;
1348969a 5453 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5454 struct dm_connector_state *dm_old_state =
5455 to_dm_connector_state(connector->state);
5456 struct dm_connector_state *dm_new_state =
5457 to_dm_connector_state(connector_state);
5458
5459 int ret = -EINVAL;
5460
5461 if (property == dev->mode_config.scaling_mode_property) {
5462 enum amdgpu_rmx_type rmx_type;
5463
5464 switch (val) {
5465 case DRM_MODE_SCALE_CENTER:
5466 rmx_type = RMX_CENTER;
5467 break;
5468 case DRM_MODE_SCALE_ASPECT:
5469 rmx_type = RMX_ASPECT;
5470 break;
5471 case DRM_MODE_SCALE_FULLSCREEN:
5472 rmx_type = RMX_FULL;
5473 break;
5474 case DRM_MODE_SCALE_NONE:
5475 default:
5476 rmx_type = RMX_OFF;
5477 break;
5478 }
5479
5480 if (dm_old_state->scaling == rmx_type)
5481 return 0;
5482
5483 dm_new_state->scaling = rmx_type;
5484 ret = 0;
5485 } else if (property == adev->mode_info.underscan_hborder_property) {
5486 dm_new_state->underscan_hborder = val;
5487 ret = 0;
5488 } else if (property == adev->mode_info.underscan_vborder_property) {
5489 dm_new_state->underscan_vborder = val;
5490 ret = 0;
5491 } else if (property == adev->mode_info.underscan_property) {
5492 dm_new_state->underscan_enable = val;
5493 ret = 0;
c1ee92f9
DF
5494 } else if (property == adev->mode_info.abm_level_property) {
5495 dm_new_state->abm_level = val;
5496 ret = 0;
e7b07cee
HW
5497 }
5498
5499 return ret;
5500}
5501
3ee6b26b
AD
5502int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5503 const struct drm_connector_state *state,
5504 struct drm_property *property,
5505 uint64_t *val)
e7b07cee
HW
5506{
5507 struct drm_device *dev = connector->dev;
1348969a 5508 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5509 struct dm_connector_state *dm_state =
5510 to_dm_connector_state(state);
5511 int ret = -EINVAL;
5512
5513 if (property == dev->mode_config.scaling_mode_property) {
5514 switch (dm_state->scaling) {
5515 case RMX_CENTER:
5516 *val = DRM_MODE_SCALE_CENTER;
5517 break;
5518 case RMX_ASPECT:
5519 *val = DRM_MODE_SCALE_ASPECT;
5520 break;
5521 case RMX_FULL:
5522 *val = DRM_MODE_SCALE_FULLSCREEN;
5523 break;
5524 case RMX_OFF:
5525 default:
5526 *val = DRM_MODE_SCALE_NONE;
5527 break;
5528 }
5529 ret = 0;
5530 } else if (property == adev->mode_info.underscan_hborder_property) {
5531 *val = dm_state->underscan_hborder;
5532 ret = 0;
5533 } else if (property == adev->mode_info.underscan_vborder_property) {
5534 *val = dm_state->underscan_vborder;
5535 ret = 0;
5536 } else if (property == adev->mode_info.underscan_property) {
5537 *val = dm_state->underscan_enable;
5538 ret = 0;
c1ee92f9
DF
5539 } else if (property == adev->mode_info.abm_level_property) {
5540 *val = dm_state->abm_level;
5541 ret = 0;
e7b07cee 5542 }
c1ee92f9 5543
e7b07cee
HW
5544 return ret;
5545}
5546
526c654a
ED
5547static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5548{
5549 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5550
5551 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5552}
5553
7578ecda 5554static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5555{
c84dec2f 5556 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5557 const struct dc_link *link = aconnector->dc_link;
1348969a 5558 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5559 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5560
5dff80bd
AG
5561 /*
5562 * Call only if mst_mgr was iniitalized before since it's not done
5563 * for all connector types.
5564 */
5565 if (aconnector->mst_mgr.dev)
5566 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5567
e7b07cee
HW
5568#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5569 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5570
89fc8d4e 5571 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5572 link->type != dc_connection_none &&
5573 dm->backlight_dev) {
5574 backlight_device_unregister(dm->backlight_dev);
5575 dm->backlight_dev = NULL;
e7b07cee
HW
5576 }
5577#endif
dcd5fb82
MF
5578
5579 if (aconnector->dc_em_sink)
5580 dc_sink_release(aconnector->dc_em_sink);
5581 aconnector->dc_em_sink = NULL;
5582 if (aconnector->dc_sink)
5583 dc_sink_release(aconnector->dc_sink);
5584 aconnector->dc_sink = NULL;
5585
e86e8947 5586 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5587 drm_connector_unregister(connector);
5588 drm_connector_cleanup(connector);
526c654a
ED
5589 if (aconnector->i2c) {
5590 i2c_del_adapter(&aconnector->i2c->base);
5591 kfree(aconnector->i2c);
5592 }
7daec99f 5593 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5594
e7b07cee
HW
5595 kfree(connector);
5596}
5597
5598void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5599{
5600 struct dm_connector_state *state =
5601 to_dm_connector_state(connector->state);
5602
df099b9b
LSL
5603 if (connector->state)
5604 __drm_atomic_helper_connector_destroy_state(connector->state);
5605
e7b07cee
HW
5606 kfree(state);
5607
5608 state = kzalloc(sizeof(*state), GFP_KERNEL);
5609
5610 if (state) {
5611 state->scaling = RMX_OFF;
5612 state->underscan_enable = false;
5613 state->underscan_hborder = 0;
5614 state->underscan_vborder = 0;
01933ba4 5615 state->base.max_requested_bpc = 8;
3261e013
ML
5616 state->vcpi_slots = 0;
5617 state->pbn = 0;
c3e50f89
NK
5618 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5619 state->abm_level = amdgpu_dm_abm_level;
5620
df099b9b 5621 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5622 }
5623}
5624
3ee6b26b
AD
5625struct drm_connector_state *
5626amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5627{
5628 struct dm_connector_state *state =
5629 to_dm_connector_state(connector->state);
5630
5631 struct dm_connector_state *new_state =
5632 kmemdup(state, sizeof(*state), GFP_KERNEL);
5633
98e6436d
AK
5634 if (!new_state)
5635 return NULL;
e7b07cee 5636
98e6436d
AK
5637 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5638
5639 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5640 new_state->abm_level = state->abm_level;
922454c2
NK
5641 new_state->scaling = state->scaling;
5642 new_state->underscan_enable = state->underscan_enable;
5643 new_state->underscan_hborder = state->underscan_hborder;
5644 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5645 new_state->vcpi_slots = state->vcpi_slots;
5646 new_state->pbn = state->pbn;
98e6436d 5647 return &new_state->base;
e7b07cee
HW
5648}
5649
14f04fa4
AD
5650static int
5651amdgpu_dm_connector_late_register(struct drm_connector *connector)
5652{
5653 struct amdgpu_dm_connector *amdgpu_dm_connector =
5654 to_amdgpu_dm_connector(connector);
00a8037e 5655 int r;
14f04fa4 5656
00a8037e
AD
5657 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5658 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5659 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5660 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5661 if (r)
5662 return r;
5663 }
5664
5665#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5666 connector_debugfs_init(amdgpu_dm_connector);
5667#endif
5668
5669 return 0;
5670}
5671
e7b07cee
HW
5672static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5673 .reset = amdgpu_dm_connector_funcs_reset,
5674 .detect = amdgpu_dm_connector_detect,
5675 .fill_modes = drm_helper_probe_single_connector_modes,
5676 .destroy = amdgpu_dm_connector_destroy,
5677 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5678 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5679 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5680 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5681 .late_register = amdgpu_dm_connector_late_register,
526c654a 5682 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5683};
5684
e7b07cee
HW
5685static int get_modes(struct drm_connector *connector)
5686{
5687 return amdgpu_dm_connector_get_modes(connector);
5688}
5689
c84dec2f 5690static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5691{
5692 struct dc_sink_init_data init_params = {
5693 .link = aconnector->dc_link,
5694 .sink_signal = SIGNAL_TYPE_VIRTUAL
5695 };
70e8ffc5 5696 struct edid *edid;
e7b07cee 5697
a89ff457 5698 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5699 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5700 aconnector->base.name);
5701
5702 aconnector->base.force = DRM_FORCE_OFF;
5703 aconnector->base.override_edid = false;
5704 return;
5705 }
5706
70e8ffc5
HW
5707 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5708
e7b07cee
HW
5709 aconnector->edid = edid;
5710
5711 aconnector->dc_em_sink = dc_link_add_remote_sink(
5712 aconnector->dc_link,
5713 (uint8_t *)edid,
5714 (edid->extensions + 1) * EDID_LENGTH,
5715 &init_params);
5716
dcd5fb82 5717 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5718 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5719 aconnector->dc_link->local_sink :
5720 aconnector->dc_em_sink;
dcd5fb82
MF
5721 dc_sink_retain(aconnector->dc_sink);
5722 }
e7b07cee
HW
5723}
5724
c84dec2f 5725static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5726{
5727 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5728
1f6010a9
DF
5729 /*
5730 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5731 * Those settings have to be != 0 to get initial modeset
5732 */
5733 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5734 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5735 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5736 }
5737
5738
5739 aconnector->base.override_edid = true;
5740 create_eml_sink(aconnector);
5741}
5742
cbd14ae7
SW
5743static struct dc_stream_state *
5744create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5745 const struct drm_display_mode *drm_mode,
5746 const struct dm_connector_state *dm_state,
5747 const struct dc_stream_state *old_stream)
5748{
5749 struct drm_connector *connector = &aconnector->base;
1348969a 5750 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5751 struct dc_stream_state *stream;
4b7da34b
SW
5752 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5753 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5754 enum dc_status dc_result = DC_OK;
5755
5756 do {
5757 stream = create_stream_for_sink(aconnector, drm_mode,
5758 dm_state, old_stream,
5759 requested_bpc);
5760 if (stream == NULL) {
5761 DRM_ERROR("Failed to create stream for sink!\n");
5762 break;
5763 }
5764
5765 dc_result = dc_validate_stream(adev->dm.dc, stream);
5766
5767 if (dc_result != DC_OK) {
74a16675 5768 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5769 drm_mode->hdisplay,
5770 drm_mode->vdisplay,
5771 drm_mode->clock,
74a16675
RS
5772 dc_result,
5773 dc_status_to_str(dc_result));
cbd14ae7
SW
5774
5775 dc_stream_release(stream);
5776 stream = NULL;
5777 requested_bpc -= 2; /* lower bpc to retry validation */
5778 }
5779
5780 } while (stream == NULL && requested_bpc >= 6);
5781
5782 return stream;
5783}
5784
ba9ca088 5785enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5786 struct drm_display_mode *mode)
e7b07cee
HW
5787{
5788 int result = MODE_ERROR;
5789 struct dc_sink *dc_sink;
e7b07cee 5790 /* TODO: Unhardcode stream count */
0971c40e 5791 struct dc_stream_state *stream;
c84dec2f 5792 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5793
5794 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5795 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5796 return result;
5797
1f6010a9
DF
5798 /*
5799 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5800 * EDID mgmt
5801 */
5802 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5803 !aconnector->dc_em_sink)
5804 handle_edid_mgmt(aconnector);
5805
c84dec2f 5806 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5807
ad975f44
VL
5808 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5809 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5810 DRM_ERROR("dc_sink is NULL!\n");
5811 goto fail;
5812 }
5813
cbd14ae7
SW
5814 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5815 if (stream) {
5816 dc_stream_release(stream);
e7b07cee 5817 result = MODE_OK;
cbd14ae7 5818 }
e7b07cee
HW
5819
5820fail:
5821 /* TODO: error handling*/
5822 return result;
5823}
5824
88694af9
NK
5825static int fill_hdr_info_packet(const struct drm_connector_state *state,
5826 struct dc_info_packet *out)
5827{
5828 struct hdmi_drm_infoframe frame;
5829 unsigned char buf[30]; /* 26 + 4 */
5830 ssize_t len;
5831 int ret, i;
5832
5833 memset(out, 0, sizeof(*out));
5834
5835 if (!state->hdr_output_metadata)
5836 return 0;
5837
5838 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5839 if (ret)
5840 return ret;
5841
5842 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5843 if (len < 0)
5844 return (int)len;
5845
5846 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5847 if (len != 30)
5848 return -EINVAL;
5849
5850 /* Prepare the infopacket for DC. */
5851 switch (state->connector->connector_type) {
5852 case DRM_MODE_CONNECTOR_HDMIA:
5853 out->hb0 = 0x87; /* type */
5854 out->hb1 = 0x01; /* version */
5855 out->hb2 = 0x1A; /* length */
5856 out->sb[0] = buf[3]; /* checksum */
5857 i = 1;
5858 break;
5859
5860 case DRM_MODE_CONNECTOR_DisplayPort:
5861 case DRM_MODE_CONNECTOR_eDP:
5862 out->hb0 = 0x00; /* sdp id, zero */
5863 out->hb1 = 0x87; /* type */
5864 out->hb2 = 0x1D; /* payload len - 1 */
5865 out->hb3 = (0x13 << 2); /* sdp version */
5866 out->sb[0] = 0x01; /* version */
5867 out->sb[1] = 0x1A; /* length */
5868 i = 2;
5869 break;
5870
5871 default:
5872 return -EINVAL;
5873 }
5874
5875 memcpy(&out->sb[i], &buf[4], 26);
5876 out->valid = true;
5877
5878 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5879 sizeof(out->sb), false);
5880
5881 return 0;
5882}
5883
5884static bool
5885is_hdr_metadata_different(const struct drm_connector_state *old_state,
5886 const struct drm_connector_state *new_state)
5887{
5888 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5889 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5890
5891 if (old_blob != new_blob) {
5892 if (old_blob && new_blob &&
5893 old_blob->length == new_blob->length)
5894 return memcmp(old_blob->data, new_blob->data,
5895 old_blob->length);
5896
5897 return true;
5898 }
5899
5900 return false;
5901}
5902
5903static int
5904amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5905 struct drm_atomic_state *state)
88694af9 5906{
51e857af
SP
5907 struct drm_connector_state *new_con_state =
5908 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5909 struct drm_connector_state *old_con_state =
5910 drm_atomic_get_old_connector_state(state, conn);
5911 struct drm_crtc *crtc = new_con_state->crtc;
5912 struct drm_crtc_state *new_crtc_state;
5913 int ret;
5914
e8a98235
RS
5915 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5916
88694af9
NK
5917 if (!crtc)
5918 return 0;
5919
5920 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5921 struct dc_info_packet hdr_infopacket;
5922
5923 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5924 if (ret)
5925 return ret;
5926
5927 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5928 if (IS_ERR(new_crtc_state))
5929 return PTR_ERR(new_crtc_state);
5930
5931 /*
5932 * DC considers the stream backends changed if the
5933 * static metadata changes. Forcing the modeset also
5934 * gives a simple way for userspace to switch from
b232d4ed
NK
5935 * 8bpc to 10bpc when setting the metadata to enter
5936 * or exit HDR.
5937 *
5938 * Changing the static metadata after it's been
5939 * set is permissible, however. So only force a
5940 * modeset if we're entering or exiting HDR.
88694af9 5941 */
b232d4ed
NK
5942 new_crtc_state->mode_changed =
5943 !old_con_state->hdr_output_metadata ||
5944 !new_con_state->hdr_output_metadata;
88694af9
NK
5945 }
5946
5947 return 0;
5948}
5949
e7b07cee
HW
5950static const struct drm_connector_helper_funcs
5951amdgpu_dm_connector_helper_funcs = {
5952 /*
1f6010a9 5953 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5954 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5955 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5956 * in get_modes call back, not just return the modes count
5957 */
e7b07cee
HW
5958 .get_modes = get_modes,
5959 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5960 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5961};
5962
5963static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5964{
5965}
5966
d6ef9b41 5967static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5968{
5969 struct drm_atomic_state *state = new_crtc_state->state;
5970 struct drm_plane *plane;
5971 int num_active = 0;
5972
5973 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5974 struct drm_plane_state *new_plane_state;
5975
5976 /* Cursor planes are "fake". */
5977 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5978 continue;
5979
5980 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5981
5982 if (!new_plane_state) {
5983 /*
5984 * The plane is enable on the CRTC and hasn't changed
5985 * state. This means that it previously passed
5986 * validation and is therefore enabled.
5987 */
5988 num_active += 1;
5989 continue;
5990 }
5991
5992 /* We need a framebuffer to be considered enabled. */
5993 num_active += (new_plane_state->fb != NULL);
5994 }
5995
d6ef9b41
NK
5996 return num_active;
5997}
5998
8fe684e9
NK
5999static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6000 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6001{
6002 struct dm_crtc_state *dm_new_crtc_state =
6003 to_dm_crtc_state(new_crtc_state);
6004
6005 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6006
6007 if (!dm_new_crtc_state->stream)
6008 return;
6009
6010 dm_new_crtc_state->active_planes =
6011 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6012}
6013
3ee6b26b 6014static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6015 struct drm_atomic_state *state)
e7b07cee 6016{
29b77ad7
MR
6017 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6018 crtc);
1348969a 6019 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6020 struct dc *dc = adev->dm.dc;
29b77ad7 6021 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6022 int ret = -EINVAL;
6023
5b8c5969 6024 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6025
29b77ad7 6026 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6027
9b690ef3 6028 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6029 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6030 WARN_ON(1);
6031 return ret;
6032 }
6033
bc92c065 6034 /*
b836a274
MD
6035 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6036 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6037 * planes are disabled, which is not supported by the hardware. And there is legacy
6038 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6039 */
29b77ad7
MR
6040 if (crtc_state->enable &&
6041 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
c14a005c
NK
6042 return -EINVAL;
6043
b836a274
MD
6044 /* In some use cases, like reset, no stream is attached */
6045 if (!dm_crtc_state->stream)
6046 return 0;
6047
62c933f9 6048 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6049 return 0;
6050
6051 return ret;
6052}
6053
3ee6b26b
AD
6054static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6055 const struct drm_display_mode *mode,
6056 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6057{
6058 return true;
6059}
6060
6061static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6062 .disable = dm_crtc_helper_disable,
6063 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6064 .mode_fixup = dm_crtc_helper_mode_fixup,
6065 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6066};
6067
6068static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6069{
6070
6071}
6072
3261e013
ML
6073static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6074{
6075 switch (display_color_depth) {
6076 case COLOR_DEPTH_666:
6077 return 6;
6078 case COLOR_DEPTH_888:
6079 return 8;
6080 case COLOR_DEPTH_101010:
6081 return 10;
6082 case COLOR_DEPTH_121212:
6083 return 12;
6084 case COLOR_DEPTH_141414:
6085 return 14;
6086 case COLOR_DEPTH_161616:
6087 return 16;
6088 default:
6089 break;
6090 }
6091 return 0;
6092}
6093
3ee6b26b
AD
6094static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6095 struct drm_crtc_state *crtc_state,
6096 struct drm_connector_state *conn_state)
e7b07cee 6097{
3261e013
ML
6098 struct drm_atomic_state *state = crtc_state->state;
6099 struct drm_connector *connector = conn_state->connector;
6100 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6101 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6102 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6103 struct drm_dp_mst_topology_mgr *mst_mgr;
6104 struct drm_dp_mst_port *mst_port;
6105 enum dc_color_depth color_depth;
6106 int clock, bpp = 0;
1bc22f20 6107 bool is_y420 = false;
3261e013
ML
6108
6109 if (!aconnector->port || !aconnector->dc_sink)
6110 return 0;
6111
6112 mst_port = aconnector->port;
6113 mst_mgr = &aconnector->mst_port->mst_mgr;
6114
6115 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6116 return 0;
6117
6118 if (!state->duplicated) {
cbd14ae7 6119 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6120 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6121 aconnector->force_yuv420_output;
cbd14ae7
SW
6122 color_depth = convert_color_depth_from_display_info(connector,
6123 is_y420,
6124 max_bpc);
3261e013
ML
6125 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6126 clock = adjusted_mode->clock;
dc48529f 6127 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6128 }
6129 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6130 mst_mgr,
6131 mst_port,
1c6c1cb5 6132 dm_new_connector_state->pbn,
03ca9600 6133 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6134 if (dm_new_connector_state->vcpi_slots < 0) {
6135 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6136 return dm_new_connector_state->vcpi_slots;
6137 }
e7b07cee
HW
6138 return 0;
6139}
6140
6141const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6142 .disable = dm_encoder_helper_disable,
6143 .atomic_check = dm_encoder_helper_atomic_check
6144};
6145
d9fe1a4c 6146#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6147static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6148 struct dc_state *dc_state)
6149{
6150 struct dc_stream_state *stream = NULL;
6151 struct drm_connector *connector;
6152 struct drm_connector_state *new_con_state, *old_con_state;
6153 struct amdgpu_dm_connector *aconnector;
6154 struct dm_connector_state *dm_conn_state;
6155 int i, j, clock, bpp;
6156 int vcpi, pbn_div, pbn = 0;
6157
6158 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6159
6160 aconnector = to_amdgpu_dm_connector(connector);
6161
6162 if (!aconnector->port)
6163 continue;
6164
6165 if (!new_con_state || !new_con_state->crtc)
6166 continue;
6167
6168 dm_conn_state = to_dm_connector_state(new_con_state);
6169
6170 for (j = 0; j < dc_state->stream_count; j++) {
6171 stream = dc_state->streams[j];
6172 if (!stream)
6173 continue;
6174
6175 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6176 break;
6177
6178 stream = NULL;
6179 }
6180
6181 if (!stream)
6182 continue;
6183
6184 if (stream->timing.flags.DSC != 1) {
6185 drm_dp_mst_atomic_enable_dsc(state,
6186 aconnector->port,
6187 dm_conn_state->pbn,
6188 0,
6189 false);
6190 continue;
6191 }
6192
6193 pbn_div = dm_mst_get_pbn_divider(stream->link);
6194 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6195 clock = stream->timing.pix_clk_100hz / 10;
6196 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6197 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6198 aconnector->port,
6199 pbn, pbn_div,
6200 true);
6201 if (vcpi < 0)
6202 return vcpi;
6203
6204 dm_conn_state->pbn = pbn;
6205 dm_conn_state->vcpi_slots = vcpi;
6206 }
6207 return 0;
6208}
d9fe1a4c 6209#endif
29b9ba74 6210
e7b07cee
HW
6211static void dm_drm_plane_reset(struct drm_plane *plane)
6212{
6213 struct dm_plane_state *amdgpu_state = NULL;
6214
6215 if (plane->state)
6216 plane->funcs->atomic_destroy_state(plane, plane->state);
6217
6218 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6219 WARN_ON(amdgpu_state == NULL);
1f6010a9 6220
7ddaef96
NK
6221 if (amdgpu_state)
6222 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6223}
6224
6225static struct drm_plane_state *
6226dm_drm_plane_duplicate_state(struct drm_plane *plane)
6227{
6228 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6229
6230 old_dm_plane_state = to_dm_plane_state(plane->state);
6231 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6232 if (!dm_plane_state)
6233 return NULL;
6234
6235 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6236
3be5262e
HW
6237 if (old_dm_plane_state->dc_state) {
6238 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6239 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6240 }
6241
6242 return &dm_plane_state->base;
6243}
6244
dfd84d90 6245static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6246 struct drm_plane_state *state)
e7b07cee
HW
6247{
6248 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6249
3be5262e
HW
6250 if (dm_plane_state->dc_state)
6251 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6252
0627bbd3 6253 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6254}
6255
6256static const struct drm_plane_funcs dm_plane_funcs = {
6257 .update_plane = drm_atomic_helper_update_plane,
6258 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6259 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6260 .reset = dm_drm_plane_reset,
6261 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6262 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6263 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6264};
6265
3ee6b26b
AD
6266static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6267 struct drm_plane_state *new_state)
e7b07cee
HW
6268{
6269 struct amdgpu_framebuffer *afb;
6270 struct drm_gem_object *obj;
5d43be0c 6271 struct amdgpu_device *adev;
e7b07cee 6272 struct amdgpu_bo *rbo;
e7b07cee 6273 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6274 struct list_head list;
6275 struct ttm_validate_buffer tv;
6276 struct ww_acquire_ctx ticket;
5d43be0c
CK
6277 uint32_t domain;
6278 int r;
e7b07cee
HW
6279
6280 if (!new_state->fb) {
f1ad2f5e 6281 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6282 return 0;
6283 }
6284
6285 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6286 obj = new_state->fb->obj[0];
e7b07cee 6287 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6288 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6289 INIT_LIST_HEAD(&list);
6290
6291 tv.bo = &rbo->tbo;
6292 tv.num_shared = 1;
6293 list_add(&tv.head, &list);
6294
9165fb87 6295 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6296 if (r) {
6297 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6298 return r;
0f257b09 6299 }
e7b07cee 6300
5d43be0c 6301 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6302 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6303 else
6304 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6305
7b7c6c81 6306 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6307 if (unlikely(r != 0)) {
30b7c614
HW
6308 if (r != -ERESTARTSYS)
6309 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6310 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6311 return r;
6312 }
6313
bb812f1e
JZ
6314 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6315 if (unlikely(r != 0)) {
6316 amdgpu_bo_unpin(rbo);
0f257b09 6317 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6318 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6319 return r;
6320 }
7df7e505 6321
0f257b09 6322 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6323
7b7c6c81 6324 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6325
6326 amdgpu_bo_ref(rbo);
6327
cf322b49
NK
6328 /**
6329 * We don't do surface updates on planes that have been newly created,
6330 * but we also don't have the afb->address during atomic check.
6331 *
6332 * Fill in buffer attributes depending on the address here, but only on
6333 * newly created planes since they're not being used by DC yet and this
6334 * won't modify global state.
6335 */
6336 dm_plane_state_old = to_dm_plane_state(plane->state);
6337 dm_plane_state_new = to_dm_plane_state(new_state);
6338
3be5262e 6339 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6340 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6341 struct dc_plane_state *plane_state =
6342 dm_plane_state_new->dc_state;
6343 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6344
320932bf 6345 fill_plane_buffer_attributes(
695af5f9 6346 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6347 afb->tiling_flags,
cf322b49
NK
6348 &plane_state->tiling_info, &plane_state->plane_size,
6349 &plane_state->dcc, &plane_state->address,
6eed95b0 6350 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6351 }
6352
e7b07cee
HW
6353 return 0;
6354}
6355
3ee6b26b
AD
6356static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6357 struct drm_plane_state *old_state)
e7b07cee
HW
6358{
6359 struct amdgpu_bo *rbo;
e7b07cee
HW
6360 int r;
6361
6362 if (!old_state->fb)
6363 return;
6364
e68d14dd 6365 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6366 r = amdgpu_bo_reserve(rbo, false);
6367 if (unlikely(r)) {
6368 DRM_ERROR("failed to reserve rbo before unpin\n");
6369 return;
b830ebc9
HW
6370 }
6371
6372 amdgpu_bo_unpin(rbo);
6373 amdgpu_bo_unreserve(rbo);
6374 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6375}
6376
8c44515b
AP
6377static int dm_plane_helper_check_state(struct drm_plane_state *state,
6378 struct drm_crtc_state *new_crtc_state)
6379{
6380 int max_downscale = 0;
6381 int max_upscale = INT_MAX;
6382
6383 /* TODO: These should be checked against DC plane caps */
6384 return drm_atomic_helper_check_plane_state(
6385 state, new_crtc_state, max_downscale, max_upscale, true, true);
6386}
6387
7578ecda
AD
6388static int dm_plane_atomic_check(struct drm_plane *plane,
6389 struct drm_plane_state *state)
cbd19488 6390{
1348969a 6391 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6392 struct dc *dc = adev->dm.dc;
78171832 6393 struct dm_plane_state *dm_plane_state;
695af5f9 6394 struct dc_scaling_info scaling_info;
8c44515b 6395 struct drm_crtc_state *new_crtc_state;
695af5f9 6396 int ret;
78171832 6397
e8a98235
RS
6398 trace_amdgpu_dm_plane_atomic_check(state);
6399
78171832 6400 dm_plane_state = to_dm_plane_state(state);
cbd19488 6401
3be5262e 6402 if (!dm_plane_state->dc_state)
9a3329b1 6403 return 0;
cbd19488 6404
8c44515b
AP
6405 new_crtc_state =
6406 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6407 if (!new_crtc_state)
6408 return -EINVAL;
6409
6410 ret = dm_plane_helper_check_state(state, new_crtc_state);
6411 if (ret)
6412 return ret;
6413
695af5f9
NK
6414 ret = fill_dc_scaling_info(state, &scaling_info);
6415 if (ret)
6416 return ret;
a05bcff1 6417
62c933f9 6418 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6419 return 0;
6420
6421 return -EINVAL;
6422}
6423
674e78ac
NK
6424static int dm_plane_atomic_async_check(struct drm_plane *plane,
6425 struct drm_plane_state *new_plane_state)
6426{
6427 /* Only support async updates on cursor planes. */
6428 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6429 return -EINVAL;
6430
6431 return 0;
6432}
6433
6434static void dm_plane_atomic_async_update(struct drm_plane *plane,
6435 struct drm_plane_state *new_state)
6436{
6437 struct drm_plane_state *old_state =
6438 drm_atomic_get_old_plane_state(new_state->state, plane);
6439
e8a98235
RS
6440 trace_amdgpu_dm_atomic_update_cursor(new_state);
6441
332af874 6442 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6443
6444 plane->state->src_x = new_state->src_x;
6445 plane->state->src_y = new_state->src_y;
6446 plane->state->src_w = new_state->src_w;
6447 plane->state->src_h = new_state->src_h;
6448 plane->state->crtc_x = new_state->crtc_x;
6449 plane->state->crtc_y = new_state->crtc_y;
6450 plane->state->crtc_w = new_state->crtc_w;
6451 plane->state->crtc_h = new_state->crtc_h;
6452
6453 handle_cursor_update(plane, old_state);
6454}
6455
e7b07cee
HW
6456static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6457 .prepare_fb = dm_plane_helper_prepare_fb,
6458 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6459 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6460 .atomic_async_check = dm_plane_atomic_async_check,
6461 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6462};
6463
6464/*
6465 * TODO: these are currently initialized to rgb formats only.
6466 * For future use cases we should either initialize them dynamically based on
6467 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6468 * check will succeed, and let DC implement proper check
e7b07cee 6469 */
d90371b0 6470static const uint32_t rgb_formats[] = {
e7b07cee
HW
6471 DRM_FORMAT_XRGB8888,
6472 DRM_FORMAT_ARGB8888,
6473 DRM_FORMAT_RGBA8888,
6474 DRM_FORMAT_XRGB2101010,
6475 DRM_FORMAT_XBGR2101010,
6476 DRM_FORMAT_ARGB2101010,
6477 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6478 DRM_FORMAT_XBGR8888,
6479 DRM_FORMAT_ABGR8888,
46dd9ff7 6480 DRM_FORMAT_RGB565,
e7b07cee
HW
6481};
6482
0d579c7e
NK
6483static const uint32_t overlay_formats[] = {
6484 DRM_FORMAT_XRGB8888,
6485 DRM_FORMAT_ARGB8888,
6486 DRM_FORMAT_RGBA8888,
6487 DRM_FORMAT_XBGR8888,
6488 DRM_FORMAT_ABGR8888,
7267a1a9 6489 DRM_FORMAT_RGB565
e7b07cee
HW
6490};
6491
6492static const u32 cursor_formats[] = {
6493 DRM_FORMAT_ARGB8888
6494};
6495
37c6a93b
NK
6496static int get_plane_formats(const struct drm_plane *plane,
6497 const struct dc_plane_cap *plane_cap,
6498 uint32_t *formats, int max_formats)
e7b07cee 6499{
37c6a93b
NK
6500 int i, num_formats = 0;
6501
6502 /*
6503 * TODO: Query support for each group of formats directly from
6504 * DC plane caps. This will require adding more formats to the
6505 * caps list.
6506 */
e7b07cee 6507
f180b4bc 6508 switch (plane->type) {
e7b07cee 6509 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6510 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6511 if (num_formats >= max_formats)
6512 break;
6513
6514 formats[num_formats++] = rgb_formats[i];
6515 }
6516
ea36ad34 6517 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6518 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6519 if (plane_cap && plane_cap->pixel_format_support.p010)
6520 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6521 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6522 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6523 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6524 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6525 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6526 }
e7b07cee 6527 break;
37c6a93b 6528
e7b07cee 6529 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6530 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6531 if (num_formats >= max_formats)
6532 break;
6533
6534 formats[num_formats++] = overlay_formats[i];
6535 }
e7b07cee 6536 break;
37c6a93b 6537
e7b07cee 6538 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6539 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6540 if (num_formats >= max_formats)
6541 break;
6542
6543 formats[num_formats++] = cursor_formats[i];
6544 }
e7b07cee
HW
6545 break;
6546 }
6547
37c6a93b
NK
6548 return num_formats;
6549}
6550
6551static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6552 struct drm_plane *plane,
6553 unsigned long possible_crtcs,
6554 const struct dc_plane_cap *plane_cap)
6555{
6556 uint32_t formats[32];
6557 int num_formats;
6558 int res = -EPERM;
ecc874a6 6559 unsigned int supported_rotations;
faa37f54 6560 uint64_t *modifiers = NULL;
37c6a93b
NK
6561
6562 num_formats = get_plane_formats(plane, plane_cap, formats,
6563 ARRAY_SIZE(formats));
6564
faa37f54
BN
6565 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6566 if (res)
6567 return res;
6568
4a580877 6569 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6570 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6571 modifiers, plane->type, NULL);
6572 kfree(modifiers);
37c6a93b
NK
6573 if (res)
6574 return res;
6575
cc1fec57
NK
6576 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6577 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6578 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6579 BIT(DRM_MODE_BLEND_PREMULTI);
6580
6581 drm_plane_create_alpha_property(plane);
6582 drm_plane_create_blend_mode_property(plane, blend_caps);
6583 }
6584
fc8e5230 6585 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6586 plane_cap &&
6587 (plane_cap->pixel_format_support.nv12 ||
6588 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6589 /* This only affects YUV formats. */
6590 drm_plane_create_color_properties(
6591 plane,
6592 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6593 BIT(DRM_COLOR_YCBCR_BT709) |
6594 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6595 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6596 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6597 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6598 }
6599
ecc874a6
PLG
6600 supported_rotations =
6601 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6602 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6603
f784112f
MR
6604 if (dm->adev->asic_type >= CHIP_BONAIRE)
6605 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6606 supported_rotations);
ecc874a6 6607
f180b4bc 6608 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6609
96719c54 6610 /* Create (reset) the plane state */
f180b4bc
HW
6611 if (plane->funcs->reset)
6612 plane->funcs->reset(plane);
96719c54 6613
37c6a93b 6614 return 0;
e7b07cee
HW
6615}
6616
c920888c
WL
6617#ifdef CONFIG_DEBUG_FS
6618static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6619 struct amdgpu_crtc *acrtc)
6620{
6621 drm_object_attach_property(&acrtc->base.base,
6622 dm->crc_win_x_start_property,
6623 0);
6624 drm_object_attach_property(&acrtc->base.base,
6625 dm->crc_win_y_start_property,
6626 0);
6627 drm_object_attach_property(&acrtc->base.base,
6628 dm->crc_win_x_end_property,
6629 0);
6630 drm_object_attach_property(&acrtc->base.base,
6631 dm->crc_win_y_end_property,
6632 0);
6633}
6634#endif
6635
7578ecda
AD
6636static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6637 struct drm_plane *plane,
6638 uint32_t crtc_index)
e7b07cee
HW
6639{
6640 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6641 struct drm_plane *cursor_plane;
e7b07cee
HW
6642
6643 int res = -ENOMEM;
6644
6645 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6646 if (!cursor_plane)
6647 goto fail;
6648
f180b4bc 6649 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6650 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6651
6652 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6653 if (!acrtc)
6654 goto fail;
6655
6656 res = drm_crtc_init_with_planes(
6657 dm->ddev,
6658 &acrtc->base,
6659 plane,
f180b4bc 6660 cursor_plane,
e7b07cee
HW
6661 &amdgpu_dm_crtc_funcs, NULL);
6662
6663 if (res)
6664 goto fail;
6665
6666 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6667
96719c54
HW
6668 /* Create (reset) the plane state */
6669 if (acrtc->base.funcs->reset)
6670 acrtc->base.funcs->reset(&acrtc->base);
6671
e7b07cee
HW
6672 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6673 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6674
6675 acrtc->crtc_id = crtc_index;
6676 acrtc->base.enabled = false;
c37e2d29 6677 acrtc->otg_inst = -1;
e7b07cee
HW
6678
6679 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6680 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6681 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6682 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
c920888c
WL
6683#ifdef CONFIG_DEBUG_FS
6684 attach_crtc_crc_properties(dm, acrtc);
6685#endif
e7b07cee
HW
6686 return 0;
6687
6688fail:
b830ebc9
HW
6689 kfree(acrtc);
6690 kfree(cursor_plane);
e7b07cee
HW
6691 return res;
6692}
6693
6694
6695static int to_drm_connector_type(enum signal_type st)
6696{
6697 switch (st) {
6698 case SIGNAL_TYPE_HDMI_TYPE_A:
6699 return DRM_MODE_CONNECTOR_HDMIA;
6700 case SIGNAL_TYPE_EDP:
6701 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6702 case SIGNAL_TYPE_LVDS:
6703 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6704 case SIGNAL_TYPE_RGB:
6705 return DRM_MODE_CONNECTOR_VGA;
6706 case SIGNAL_TYPE_DISPLAY_PORT:
6707 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6708 return DRM_MODE_CONNECTOR_DisplayPort;
6709 case SIGNAL_TYPE_DVI_DUAL_LINK:
6710 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6711 return DRM_MODE_CONNECTOR_DVID;
6712 case SIGNAL_TYPE_VIRTUAL:
6713 return DRM_MODE_CONNECTOR_VIRTUAL;
6714
6715 default:
6716 return DRM_MODE_CONNECTOR_Unknown;
6717 }
6718}
6719
2b4c1c05
DV
6720static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6721{
62afb4ad
JRS
6722 struct drm_encoder *encoder;
6723
6724 /* There is only one encoder per connector */
6725 drm_connector_for_each_possible_encoder(connector, encoder)
6726 return encoder;
6727
6728 return NULL;
2b4c1c05
DV
6729}
6730
e7b07cee
HW
6731static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6732{
e7b07cee
HW
6733 struct drm_encoder *encoder;
6734 struct amdgpu_encoder *amdgpu_encoder;
6735
2b4c1c05 6736 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6737
6738 if (encoder == NULL)
6739 return;
6740
6741 amdgpu_encoder = to_amdgpu_encoder(encoder);
6742
6743 amdgpu_encoder->native_mode.clock = 0;
6744
6745 if (!list_empty(&connector->probed_modes)) {
6746 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6747
e7b07cee 6748 list_for_each_entry(preferred_mode,
b830ebc9
HW
6749 &connector->probed_modes,
6750 head) {
6751 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6752 amdgpu_encoder->native_mode = *preferred_mode;
6753
e7b07cee
HW
6754 break;
6755 }
6756
6757 }
6758}
6759
3ee6b26b
AD
6760static struct drm_display_mode *
6761amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6762 char *name,
6763 int hdisplay, int vdisplay)
e7b07cee
HW
6764{
6765 struct drm_device *dev = encoder->dev;
6766 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6767 struct drm_display_mode *mode = NULL;
6768 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6769
6770 mode = drm_mode_duplicate(dev, native_mode);
6771
b830ebc9 6772 if (mode == NULL)
e7b07cee
HW
6773 return NULL;
6774
6775 mode->hdisplay = hdisplay;
6776 mode->vdisplay = vdisplay;
6777 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6778 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6779
6780 return mode;
6781
6782}
6783
6784static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6785 struct drm_connector *connector)
e7b07cee
HW
6786{
6787 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6788 struct drm_display_mode *mode = NULL;
6789 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6790 struct amdgpu_dm_connector *amdgpu_dm_connector =
6791 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6792 int i;
6793 int n;
6794 struct mode_size {
6795 char name[DRM_DISPLAY_MODE_LEN];
6796 int w;
6797 int h;
b830ebc9 6798 } common_modes[] = {
e7b07cee
HW
6799 { "640x480", 640, 480},
6800 { "800x600", 800, 600},
6801 { "1024x768", 1024, 768},
6802 { "1280x720", 1280, 720},
6803 { "1280x800", 1280, 800},
6804 {"1280x1024", 1280, 1024},
6805 { "1440x900", 1440, 900},
6806 {"1680x1050", 1680, 1050},
6807 {"1600x1200", 1600, 1200},
6808 {"1920x1080", 1920, 1080},
6809 {"1920x1200", 1920, 1200}
6810 };
6811
b830ebc9 6812 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6813
6814 for (i = 0; i < n; i++) {
6815 struct drm_display_mode *curmode = NULL;
6816 bool mode_existed = false;
6817
6818 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6819 common_modes[i].h > native_mode->vdisplay ||
6820 (common_modes[i].w == native_mode->hdisplay &&
6821 common_modes[i].h == native_mode->vdisplay))
6822 continue;
e7b07cee
HW
6823
6824 list_for_each_entry(curmode, &connector->probed_modes, head) {
6825 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6826 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6827 mode_existed = true;
6828 break;
6829 }
6830 }
6831
6832 if (mode_existed)
6833 continue;
6834
6835 mode = amdgpu_dm_create_common_mode(encoder,
6836 common_modes[i].name, common_modes[i].w,
6837 common_modes[i].h);
6838 drm_mode_probed_add(connector, mode);
c84dec2f 6839 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6840 }
6841}
6842
3ee6b26b
AD
6843static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6844 struct edid *edid)
e7b07cee 6845{
c84dec2f
HW
6846 struct amdgpu_dm_connector *amdgpu_dm_connector =
6847 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6848
6849 if (edid) {
6850 /* empty probed_modes */
6851 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6852 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6853 drm_add_edid_modes(connector, edid);
6854
f1e5e913
YMM
6855 /* sorting the probed modes before calling function
6856 * amdgpu_dm_get_native_mode() since EDID can have
6857 * more than one preferred mode. The modes that are
6858 * later in the probed mode list could be of higher
6859 * and preferred resolution. For example, 3840x2160
6860 * resolution in base EDID preferred timing and 4096x2160
6861 * preferred resolution in DID extension block later.
6862 */
6863 drm_mode_sort(&connector->probed_modes);
e7b07cee 6864 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6865 } else {
c84dec2f 6866 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6867 }
e7b07cee
HW
6868}
6869
7578ecda 6870static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6871{
c84dec2f
HW
6872 struct amdgpu_dm_connector *amdgpu_dm_connector =
6873 to_amdgpu_dm_connector(connector);
e7b07cee 6874 struct drm_encoder *encoder;
c84dec2f 6875 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6876
2b4c1c05 6877 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6878
5c0e6840 6879 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
6880 amdgpu_dm_connector->num_modes =
6881 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6882 } else {
6883 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6884 amdgpu_dm_connector_add_common_modes(encoder, connector);
6885 }
3e332d3a 6886 amdgpu_dm_fbc_init(connector);
5099114b 6887
c84dec2f 6888 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6889}
6890
3ee6b26b
AD
6891void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6892 struct amdgpu_dm_connector *aconnector,
6893 int connector_type,
6894 struct dc_link *link,
6895 int link_index)
e7b07cee 6896{
1348969a 6897 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 6898
f04bee34
NK
6899 /*
6900 * Some of the properties below require access to state, like bpc.
6901 * Allocate some default initial connector state with our reset helper.
6902 */
6903 if (aconnector->base.funcs->reset)
6904 aconnector->base.funcs->reset(&aconnector->base);
6905
e7b07cee
HW
6906 aconnector->connector_id = link_index;
6907 aconnector->dc_link = link;
6908 aconnector->base.interlace_allowed = false;
6909 aconnector->base.doublescan_allowed = false;
6910 aconnector->base.stereo_allowed = false;
6911 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6912 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6913 aconnector->audio_inst = -1;
e7b07cee
HW
6914 mutex_init(&aconnector->hpd_lock);
6915
1f6010a9
DF
6916 /*
6917 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6918 * which means HPD hot plug not supported
6919 */
e7b07cee
HW
6920 switch (connector_type) {
6921 case DRM_MODE_CONNECTOR_HDMIA:
6922 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6923 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6924 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6925 break;
6926 case DRM_MODE_CONNECTOR_DisplayPort:
6927 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6928 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6929 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6930 break;
6931 case DRM_MODE_CONNECTOR_DVID:
6932 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6933 break;
6934 default:
6935 break;
6936 }
6937
6938 drm_object_attach_property(&aconnector->base.base,
6939 dm->ddev->mode_config.scaling_mode_property,
6940 DRM_MODE_SCALE_NONE);
6941
6942 drm_object_attach_property(&aconnector->base.base,
6943 adev->mode_info.underscan_property,
6944 UNDERSCAN_OFF);
6945 drm_object_attach_property(&aconnector->base.base,
6946 adev->mode_info.underscan_hborder_property,
6947 0);
6948 drm_object_attach_property(&aconnector->base.base,
6949 adev->mode_info.underscan_vborder_property,
6950 0);
1825fd34 6951
8c61b31e
JFZ
6952 if (!aconnector->mst_port)
6953 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6954
4a8ca46b
RL
6955 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6956 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6957 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6958
c1ee92f9 6959 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 6960 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
6961 drm_object_attach_property(&aconnector->base.base,
6962 adev->mode_info.abm_level_property, 0);
6963 }
bb47de73
NK
6964
6965 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6966 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6967 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6968 drm_object_attach_property(
6969 &aconnector->base.base,
6970 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6971
8c61b31e
JFZ
6972 if (!aconnector->mst_port)
6973 drm_connector_attach_vrr_capable_property(&aconnector->base);
6974
0c8620d6 6975#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6976 if (adev->dm.hdcp_workqueue)
53e108aa 6977 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6978#endif
bb47de73 6979 }
e7b07cee
HW
6980}
6981
7578ecda
AD
6982static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6983 struct i2c_msg *msgs, int num)
e7b07cee
HW
6984{
6985 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6986 struct ddc_service *ddc_service = i2c->ddc_service;
6987 struct i2c_command cmd;
6988 int i;
6989 int result = -EIO;
6990
b830ebc9 6991 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6992
6993 if (!cmd.payloads)
6994 return result;
6995
6996 cmd.number_of_payloads = num;
6997 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6998 cmd.speed = 100;
6999
7000 for (i = 0; i < num; i++) {
7001 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7002 cmd.payloads[i].address = msgs[i].addr;
7003 cmd.payloads[i].length = msgs[i].len;
7004 cmd.payloads[i].data = msgs[i].buf;
7005 }
7006
c85e6e54
DF
7007 if (dc_submit_i2c(
7008 ddc_service->ctx->dc,
7009 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7010 &cmd))
7011 result = num;
7012
7013 kfree(cmd.payloads);
7014 return result;
7015}
7016
7578ecda 7017static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7018{
7019 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7020}
7021
7022static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7023 .master_xfer = amdgpu_dm_i2c_xfer,
7024 .functionality = amdgpu_dm_i2c_func,
7025};
7026
3ee6b26b
AD
7027static struct amdgpu_i2c_adapter *
7028create_i2c(struct ddc_service *ddc_service,
7029 int link_index,
7030 int *res)
e7b07cee
HW
7031{
7032 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7033 struct amdgpu_i2c_adapter *i2c;
7034
b830ebc9 7035 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7036 if (!i2c)
7037 return NULL;
e7b07cee
HW
7038 i2c->base.owner = THIS_MODULE;
7039 i2c->base.class = I2C_CLASS_DDC;
7040 i2c->base.dev.parent = &adev->pdev->dev;
7041 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7042 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7043 i2c_set_adapdata(&i2c->base, i2c);
7044 i2c->ddc_service = ddc_service;
c85e6e54 7045 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7046
7047 return i2c;
7048}
7049
89fc8d4e 7050
1f6010a9
DF
7051/*
7052 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7053 * dc_link which will be represented by this aconnector.
7054 */
7578ecda
AD
7055static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7056 struct amdgpu_dm_connector *aconnector,
7057 uint32_t link_index,
7058 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7059{
7060 int res = 0;
7061 int connector_type;
7062 struct dc *dc = dm->dc;
7063 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7064 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7065
7066 link->priv = aconnector;
e7b07cee 7067
f1ad2f5e 7068 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7069
7070 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7071 if (!i2c) {
7072 DRM_ERROR("Failed to create i2c adapter data\n");
7073 return -ENOMEM;
7074 }
7075
e7b07cee
HW
7076 aconnector->i2c = i2c;
7077 res = i2c_add_adapter(&i2c->base);
7078
7079 if (res) {
7080 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7081 goto out_free;
7082 }
7083
7084 connector_type = to_drm_connector_type(link->connector_signal);
7085
17165de2 7086 res = drm_connector_init_with_ddc(
e7b07cee
HW
7087 dm->ddev,
7088 &aconnector->base,
7089 &amdgpu_dm_connector_funcs,
17165de2
AP
7090 connector_type,
7091 &i2c->base);
e7b07cee
HW
7092
7093 if (res) {
7094 DRM_ERROR("connector_init failed\n");
7095 aconnector->connector_id = -1;
7096 goto out_free;
7097 }
7098
7099 drm_connector_helper_add(
7100 &aconnector->base,
7101 &amdgpu_dm_connector_helper_funcs);
7102
7103 amdgpu_dm_connector_init_helper(
7104 dm,
7105 aconnector,
7106 connector_type,
7107 link,
7108 link_index);
7109
cde4c44d 7110 drm_connector_attach_encoder(
e7b07cee
HW
7111 &aconnector->base, &aencoder->base);
7112
e7b07cee
HW
7113 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7114 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7115 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7116
e7b07cee
HW
7117out_free:
7118 if (res) {
7119 kfree(i2c);
7120 aconnector->i2c = NULL;
7121 }
7122 return res;
7123}
7124
7125int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7126{
7127 switch (adev->mode_info.num_crtc) {
7128 case 1:
7129 return 0x1;
7130 case 2:
7131 return 0x3;
7132 case 3:
7133 return 0x7;
7134 case 4:
7135 return 0xf;
7136 case 5:
7137 return 0x1f;
7138 case 6:
7139 default:
7140 return 0x3f;
7141 }
7142}
7143
7578ecda
AD
7144static int amdgpu_dm_encoder_init(struct drm_device *dev,
7145 struct amdgpu_encoder *aencoder,
7146 uint32_t link_index)
e7b07cee 7147{
1348969a 7148 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7149
7150 int res = drm_encoder_init(dev,
7151 &aencoder->base,
7152 &amdgpu_dm_encoder_funcs,
7153 DRM_MODE_ENCODER_TMDS,
7154 NULL);
7155
7156 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7157
7158 if (!res)
7159 aencoder->encoder_id = link_index;
7160 else
7161 aencoder->encoder_id = -1;
7162
7163 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7164
7165 return res;
7166}
7167
3ee6b26b
AD
7168static void manage_dm_interrupts(struct amdgpu_device *adev,
7169 struct amdgpu_crtc *acrtc,
7170 bool enable)
e7b07cee
HW
7171{
7172 /*
8fe684e9
NK
7173 * We have no guarantee that the frontend index maps to the same
7174 * backend index - some even map to more than one.
7175 *
7176 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7177 */
7178 int irq_type =
734dd01d 7179 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7180 adev,
7181 acrtc->crtc_id);
7182
7183 if (enable) {
7184 drm_crtc_vblank_on(&acrtc->base);
7185 amdgpu_irq_get(
7186 adev,
7187 &adev->pageflip_irq,
7188 irq_type);
7189 } else {
7190
7191 amdgpu_irq_put(
7192 adev,
7193 &adev->pageflip_irq,
7194 irq_type);
7195 drm_crtc_vblank_off(&acrtc->base);
7196 }
7197}
7198
8fe684e9
NK
7199static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7200 struct amdgpu_crtc *acrtc)
7201{
7202 int irq_type =
7203 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7204
7205 /**
7206 * This reads the current state for the IRQ and force reapplies
7207 * the setting to hardware.
7208 */
7209 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7210}
7211
3ee6b26b
AD
7212static bool
7213is_scaling_state_different(const struct dm_connector_state *dm_state,
7214 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7215{
7216 if (dm_state->scaling != old_dm_state->scaling)
7217 return true;
7218 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7219 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7220 return true;
7221 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7222 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7223 return true;
b830ebc9
HW
7224 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7225 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7226 return true;
e7b07cee
HW
7227 return false;
7228}
7229
0c8620d6
BL
7230#ifdef CONFIG_DRM_AMD_DC_HDCP
7231static bool is_content_protection_different(struct drm_connector_state *state,
7232 const struct drm_connector_state *old_state,
7233 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7234{
7235 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7236
53e108aa
BL
7237 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7238 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7239 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7240 return true;
7241 }
7242
0c8620d6
BL
7243 /* CP is being re enabled, ignore this */
7244 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7245 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7246 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7247 return false;
7248 }
7249
7250 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
7251 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7252 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7253 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7254
7255 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7256 * hot-plug, headless s3, dpms
7257 */
7258 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
7259 aconnector->dc_sink != NULL)
7260 return true;
7261
7262 if (old_state->content_protection == state->content_protection)
7263 return false;
7264
7265 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
7266 return true;
7267
7268 return false;
7269}
7270
0c8620d6 7271#endif
3ee6b26b
AD
7272static void remove_stream(struct amdgpu_device *adev,
7273 struct amdgpu_crtc *acrtc,
7274 struct dc_stream_state *stream)
e7b07cee
HW
7275{
7276 /* this is the update mode case */
e7b07cee
HW
7277
7278 acrtc->otg_inst = -1;
7279 acrtc->enabled = false;
7280}
7281
7578ecda
AD
7282static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7283 struct dc_cursor_position *position)
2a8f6ccb 7284{
f4c2cc43 7285 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7286 int x, y;
7287 int xorigin = 0, yorigin = 0;
7288
e371e19c
NK
7289 position->enable = false;
7290 position->x = 0;
7291 position->y = 0;
7292
7293 if (!crtc || !plane->state->fb)
2a8f6ccb 7294 return 0;
2a8f6ccb
HW
7295
7296 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7297 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7298 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7299 __func__,
7300 plane->state->crtc_w,
7301 plane->state->crtc_h);
7302 return -EINVAL;
7303 }
7304
7305 x = plane->state->crtc_x;
7306 y = plane->state->crtc_y;
c14a005c 7307
e371e19c
NK
7308 if (x <= -amdgpu_crtc->max_cursor_width ||
7309 y <= -amdgpu_crtc->max_cursor_height)
7310 return 0;
7311
2a8f6ccb
HW
7312 if (x < 0) {
7313 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7314 x = 0;
7315 }
7316 if (y < 0) {
7317 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7318 y = 0;
7319 }
7320 position->enable = true;
d243b6ff 7321 position->translate_by_source = true;
2a8f6ccb
HW
7322 position->x = x;
7323 position->y = y;
7324 position->x_hotspot = xorigin;
7325 position->y_hotspot = yorigin;
7326
7327 return 0;
7328}
7329
3ee6b26b
AD
7330static void handle_cursor_update(struct drm_plane *plane,
7331 struct drm_plane_state *old_plane_state)
e7b07cee 7332{
1348969a 7333 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7334 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7335 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7336 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7337 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7338 uint64_t address = afb ? afb->address : 0;
7339 struct dc_cursor_position position;
7340 struct dc_cursor_attributes attributes;
7341 int ret;
7342
e7b07cee
HW
7343 if (!plane->state->fb && !old_plane_state->fb)
7344 return;
7345
f1ad2f5e 7346 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7347 __func__,
7348 amdgpu_crtc->crtc_id,
7349 plane->state->crtc_w,
7350 plane->state->crtc_h);
2a8f6ccb
HW
7351
7352 ret = get_cursor_position(plane, crtc, &position);
7353 if (ret)
7354 return;
7355
7356 if (!position.enable) {
7357 /* turn off cursor */
674e78ac
NK
7358 if (crtc_state && crtc_state->stream) {
7359 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7360 dc_stream_set_cursor_position(crtc_state->stream,
7361 &position);
674e78ac
NK
7362 mutex_unlock(&adev->dm.dc_lock);
7363 }
2a8f6ccb 7364 return;
e7b07cee 7365 }
e7b07cee 7366
2a8f6ccb
HW
7367 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7368 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7369
c1cefe11 7370 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7371 attributes.address.high_part = upper_32_bits(address);
7372 attributes.address.low_part = lower_32_bits(address);
7373 attributes.width = plane->state->crtc_w;
7374 attributes.height = plane->state->crtc_h;
7375 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7376 attributes.rotation_angle = 0;
7377 attributes.attribute_flags.value = 0;
7378
7379 attributes.pitch = attributes.width;
7380
886daac9 7381 if (crtc_state->stream) {
674e78ac 7382 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7383 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7384 &attributes))
7385 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7386
2a8f6ccb
HW
7387 if (!dc_stream_set_cursor_position(crtc_state->stream,
7388 &position))
7389 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7390 mutex_unlock(&adev->dm.dc_lock);
886daac9 7391 }
2a8f6ccb 7392}
e7b07cee
HW
7393
7394static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7395{
7396
7397 assert_spin_locked(&acrtc->base.dev->event_lock);
7398 WARN_ON(acrtc->event);
7399
7400 acrtc->event = acrtc->base.state->event;
7401
7402 /* Set the flip status */
7403 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7404
7405 /* Mark this event as consumed */
7406 acrtc->base.state->event = NULL;
7407
7408 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7409 acrtc->crtc_id);
7410}
7411
bb47de73
NK
7412static void update_freesync_state_on_stream(
7413 struct amdgpu_display_manager *dm,
7414 struct dm_crtc_state *new_crtc_state,
180db303
NK
7415 struct dc_stream_state *new_stream,
7416 struct dc_plane_state *surface,
7417 u32 flip_timestamp_in_us)
bb47de73 7418{
09aef2c4 7419 struct mod_vrr_params vrr_params;
bb47de73 7420 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7421 struct amdgpu_device *adev = dm->adev;
585d450c 7422 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7423 unsigned long flags;
bb47de73
NK
7424
7425 if (!new_stream)
7426 return;
7427
7428 /*
7429 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7430 * For now it's sufficient to just guard against these conditions.
7431 */
7432
7433 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7434 return;
7435
4a580877 7436 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7437 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7438
180db303
NK
7439 if (surface) {
7440 mod_freesync_handle_preflip(
7441 dm->freesync_module,
7442 surface,
7443 new_stream,
7444 flip_timestamp_in_us,
7445 &vrr_params);
09aef2c4
MK
7446
7447 if (adev->family < AMDGPU_FAMILY_AI &&
7448 amdgpu_dm_vrr_active(new_crtc_state)) {
7449 mod_freesync_handle_v_update(dm->freesync_module,
7450 new_stream, &vrr_params);
e63e2491
EB
7451
7452 /* Need to call this before the frame ends. */
7453 dc_stream_adjust_vmin_vmax(dm->dc,
7454 new_crtc_state->stream,
7455 &vrr_params.adjust);
09aef2c4 7456 }
180db303 7457 }
bb47de73
NK
7458
7459 mod_freesync_build_vrr_infopacket(
7460 dm->freesync_module,
7461 new_stream,
180db303 7462 &vrr_params,
ecd0136b
HT
7463 PACKET_TYPE_VRR,
7464 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7465 &vrr_infopacket);
7466
8a48b44c 7467 new_crtc_state->freesync_timing_changed |=
585d450c 7468 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7469 &vrr_params.adjust,
7470 sizeof(vrr_params.adjust)) != 0);
bb47de73 7471
8a48b44c 7472 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7473 (memcmp(&new_crtc_state->vrr_infopacket,
7474 &vrr_infopacket,
7475 sizeof(vrr_infopacket)) != 0);
7476
585d450c 7477 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7478 new_crtc_state->vrr_infopacket = vrr_infopacket;
7479
585d450c 7480 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7481 new_stream->vrr_infopacket = vrr_infopacket;
7482
7483 if (new_crtc_state->freesync_vrr_info_changed)
7484 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7485 new_crtc_state->base.crtc->base.id,
7486 (int)new_crtc_state->base.vrr_enabled,
180db303 7487 (int)vrr_params.state);
09aef2c4 7488
4a580877 7489 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7490}
7491
585d450c 7492static void update_stream_irq_parameters(
e854194c
MK
7493 struct amdgpu_display_manager *dm,
7494 struct dm_crtc_state *new_crtc_state)
7495{
7496 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7497 struct mod_vrr_params vrr_params;
e854194c 7498 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7499 struct amdgpu_device *adev = dm->adev;
585d450c 7500 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7501 unsigned long flags;
e854194c
MK
7502
7503 if (!new_stream)
7504 return;
7505
7506 /*
7507 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7508 * For now it's sufficient to just guard against these conditions.
7509 */
7510 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7511 return;
7512
4a580877 7513 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7514 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7515
e854194c
MK
7516 if (new_crtc_state->vrr_supported &&
7517 config.min_refresh_in_uhz &&
7518 config.max_refresh_in_uhz) {
7519 config.state = new_crtc_state->base.vrr_enabled ?
7520 VRR_STATE_ACTIVE_VARIABLE :
7521 VRR_STATE_INACTIVE;
7522 } else {
7523 config.state = VRR_STATE_UNSUPPORTED;
7524 }
7525
7526 mod_freesync_build_vrr_params(dm->freesync_module,
7527 new_stream,
7528 &config, &vrr_params);
7529
7530 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7531 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7532 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7533
585d450c
AP
7534 new_crtc_state->freesync_config = config;
7535 /* Copy state for access from DM IRQ handler */
7536 acrtc->dm_irq_params.freesync_config = config;
7537 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7538 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7539 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7540}
7541
66b0c973
MK
7542static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7543 struct dm_crtc_state *new_state)
7544{
7545 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7546 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7547
7548 if (!old_vrr_active && new_vrr_active) {
7549 /* Transition VRR inactive -> active:
7550 * While VRR is active, we must not disable vblank irq, as a
7551 * reenable after disable would compute bogus vblank/pflip
7552 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7553 *
7554 * We also need vupdate irq for the actual core vblank handling
7555 * at end of vblank.
66b0c973 7556 */
d2574c33 7557 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7558 drm_crtc_vblank_get(new_state->base.crtc);
7559 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7560 __func__, new_state->base.crtc->base.id);
7561 } else if (old_vrr_active && !new_vrr_active) {
7562 /* Transition VRR active -> inactive:
7563 * Allow vblank irq disable again for fixed refresh rate.
7564 */
d2574c33 7565 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7566 drm_crtc_vblank_put(new_state->base.crtc);
7567 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7568 __func__, new_state->base.crtc->base.id);
7569 }
7570}
7571
8ad27806
NK
7572static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7573{
7574 struct drm_plane *plane;
7575 struct drm_plane_state *old_plane_state, *new_plane_state;
7576 int i;
7577
7578 /*
7579 * TODO: Make this per-stream so we don't issue redundant updates for
7580 * commits with multiple streams.
7581 */
7582 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7583 new_plane_state, i)
7584 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7585 handle_cursor_update(plane, old_plane_state);
7586}
7587
3be5262e 7588static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7589 struct dc_state *dc_state,
3ee6b26b
AD
7590 struct drm_device *dev,
7591 struct amdgpu_display_manager *dm,
7592 struct drm_crtc *pcrtc,
420cd472 7593 bool wait_for_vblank)
e7b07cee 7594{
570c91d5 7595 uint32_t i;
8a48b44c 7596 uint64_t timestamp_ns;
e7b07cee 7597 struct drm_plane *plane;
0bc9706d 7598 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7599 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7600 struct drm_crtc_state *new_pcrtc_state =
7601 drm_atomic_get_new_crtc_state(state, pcrtc);
7602 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7603 struct dm_crtc_state *dm_old_crtc_state =
7604 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7605 int planes_count = 0, vpos, hpos;
570c91d5 7606 long r;
e7b07cee 7607 unsigned long flags;
8a48b44c 7608 struct amdgpu_bo *abo;
fdd1fe57
MK
7609 uint32_t target_vblank, last_flip_vblank;
7610 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7611 bool pflip_present = false;
bc7f670e
DF
7612 struct {
7613 struct dc_surface_update surface_updates[MAX_SURFACES];
7614 struct dc_plane_info plane_infos[MAX_SURFACES];
7615 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7616 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7617 struct dc_stream_update stream_update;
74aa7bd4 7618 } *bundle;
bc7f670e 7619
74aa7bd4 7620 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7621
74aa7bd4
DF
7622 if (!bundle) {
7623 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7624 goto cleanup;
7625 }
e7b07cee 7626
8ad27806
NK
7627 /*
7628 * Disable the cursor first if we're disabling all the planes.
7629 * It'll remain on the screen after the planes are re-enabled
7630 * if we don't.
7631 */
7632 if (acrtc_state->active_planes == 0)
7633 amdgpu_dm_commit_cursors(state);
7634
e7b07cee 7635 /* update planes when needed */
0bc9706d
LSL
7636 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7637 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7638 struct drm_crtc_state *new_crtc_state;
0bc9706d 7639 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7640 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7641 bool plane_needs_flip;
c7af5f77 7642 struct dc_plane_state *dc_plane;
54d76575 7643 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7644
80c218d5
NK
7645 /* Cursor plane is handled after stream updates */
7646 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7647 continue;
e7b07cee 7648
f5ba60fe
DD
7649 if (!fb || !crtc || pcrtc != crtc)
7650 continue;
7651
7652 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7653 if (!new_crtc_state->active)
e7b07cee
HW
7654 continue;
7655
bc7f670e 7656 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7657
74aa7bd4 7658 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7659 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7660 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7661 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7662 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7663 }
8a48b44c 7664
695af5f9
NK
7665 fill_dc_scaling_info(new_plane_state,
7666 &bundle->scaling_infos[planes_count]);
8a48b44c 7667
695af5f9
NK
7668 bundle->surface_updates[planes_count].scaling_info =
7669 &bundle->scaling_infos[planes_count];
8a48b44c 7670
f5031000 7671 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7672
f5031000 7673 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7674
f5031000
DF
7675 if (!plane_needs_flip) {
7676 planes_count += 1;
7677 continue;
7678 }
8a48b44c 7679
2fac0f53
CK
7680 abo = gem_to_amdgpu_bo(fb->obj[0]);
7681
f8308898
AG
7682 /*
7683 * Wait for all fences on this FB. Do limited wait to avoid
7684 * deadlock during GPU reset when this fence will not signal
7685 * but we hold reservation lock for the BO.
7686 */
52791eee 7687 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7688 false,
f8308898
AG
7689 msecs_to_jiffies(5000));
7690 if (unlikely(r <= 0))
ed8a5fb2 7691 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7692
695af5f9 7693 fill_dc_plane_info_and_addr(
8ce5d842 7694 dm->adev, new_plane_state,
6eed95b0 7695 afb->tiling_flags,
695af5f9 7696 &bundle->plane_infos[planes_count],
87b7ebc2 7697 &bundle->flip_addrs[planes_count].address,
6eed95b0 7698 afb->tmz_surface, false);
87b7ebc2
RS
7699
7700 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7701 new_plane_state->plane->index,
7702 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7703
7704 bundle->surface_updates[planes_count].plane_info =
7705 &bundle->plane_infos[planes_count];
8a48b44c 7706
caff0e66
NK
7707 /*
7708 * Only allow immediate flips for fast updates that don't
7709 * change FB pitch, DCC state, rotation or mirroing.
7710 */
f5031000 7711 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7712 crtc->state->async_flip &&
caff0e66 7713 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7714
f5031000
DF
7715 timestamp_ns = ktime_get_ns();
7716 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7717 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7718 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7719
f5031000
DF
7720 if (!bundle->surface_updates[planes_count].surface) {
7721 DRM_ERROR("No surface for CRTC: id=%d\n",
7722 acrtc_attach->crtc_id);
7723 continue;
bc7f670e
DF
7724 }
7725
f5031000
DF
7726 if (plane == pcrtc->primary)
7727 update_freesync_state_on_stream(
7728 dm,
7729 acrtc_state,
7730 acrtc_state->stream,
7731 dc_plane,
7732 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7733
f5031000
DF
7734 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7735 __func__,
7736 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7737 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7738
7739 planes_count += 1;
7740
8a48b44c
DF
7741 }
7742
74aa7bd4 7743 if (pflip_present) {
634092b1
MK
7744 if (!vrr_active) {
7745 /* Use old throttling in non-vrr fixed refresh rate mode
7746 * to keep flip scheduling based on target vblank counts
7747 * working in a backwards compatible way, e.g., for
7748 * clients using the GLX_OML_sync_control extension or
7749 * DRI3/Present extension with defined target_msc.
7750 */
e3eff4b5 7751 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7752 }
7753 else {
7754 /* For variable refresh rate mode only:
7755 * Get vblank of last completed flip to avoid > 1 vrr
7756 * flips per video frame by use of throttling, but allow
7757 * flip programming anywhere in the possibly large
7758 * variable vrr vblank interval for fine-grained flip
7759 * timing control and more opportunity to avoid stutter
7760 * on late submission of flips.
7761 */
7762 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7763 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7764 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7765 }
7766
fdd1fe57 7767 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7768
7769 /*
7770 * Wait until we're out of the vertical blank period before the one
7771 * targeted by the flip
7772 */
7773 while ((acrtc_attach->enabled &&
7774 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7775 0, &vpos, &hpos, NULL,
7776 NULL, &pcrtc->hwmode)
7777 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7778 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7779 (int)(target_vblank -
e3eff4b5 7780 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7781 usleep_range(1000, 1100);
7782 }
7783
8fe684e9
NK
7784 /**
7785 * Prepare the flip event for the pageflip interrupt to handle.
7786 *
7787 * This only works in the case where we've already turned on the
7788 * appropriate hardware blocks (eg. HUBP) so in the transition case
7789 * from 0 -> n planes we have to skip a hardware generated event
7790 * and rely on sending it from software.
7791 */
7792 if (acrtc_attach->base.state->event &&
7793 acrtc_state->active_planes > 0) {
8a48b44c
DF
7794 drm_crtc_vblank_get(pcrtc);
7795
7796 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7797
7798 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7799 prepare_flip_isr(acrtc_attach);
7800
7801 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7802 }
7803
7804 if (acrtc_state->stream) {
8a48b44c 7805 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7806 bundle->stream_update.vrr_infopacket =
8a48b44c 7807 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7808 }
e7b07cee
HW
7809 }
7810
bc92c065 7811 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7812 if ((planes_count || acrtc_state->active_planes == 0) &&
7813 acrtc_state->stream) {
b6e881c9 7814 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7815 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7816 bundle->stream_update.src = acrtc_state->stream->src;
7817 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7818 }
7819
cf020d49
NK
7820 if (new_pcrtc_state->color_mgmt_changed) {
7821 /*
7822 * TODO: This isn't fully correct since we've actually
7823 * already modified the stream in place.
7824 */
7825 bundle->stream_update.gamut_remap =
7826 &acrtc_state->stream->gamut_remap_matrix;
7827 bundle->stream_update.output_csc_transform =
7828 &acrtc_state->stream->csc_color_matrix;
7829 bundle->stream_update.out_transfer_func =
7830 acrtc_state->stream->out_transfer_func;
7831 }
bc7f670e 7832
8a48b44c 7833 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7834 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7835 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7836
e63e2491
EB
7837 /*
7838 * If FreeSync state on the stream has changed then we need to
7839 * re-adjust the min/max bounds now that DC doesn't handle this
7840 * as part of commit.
7841 */
7842 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7843 amdgpu_dm_vrr_active(acrtc_state)) {
7844 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7845 dc_stream_adjust_vmin_vmax(
7846 dm->dc, acrtc_state->stream,
585d450c 7847 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7848 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7849 }
bc7f670e 7850 mutex_lock(&dm->dc_lock);
8c322309 7851 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7852 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7853 amdgpu_dm_psr_disable(acrtc_state->stream);
7854
bc7f670e 7855 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7856 bundle->surface_updates,
bc7f670e
DF
7857 planes_count,
7858 acrtc_state->stream,
74aa7bd4 7859 &bundle->stream_update,
bc7f670e 7860 dc_state);
8c322309 7861
8fe684e9
NK
7862 /**
7863 * Enable or disable the interrupts on the backend.
7864 *
7865 * Most pipes are put into power gating when unused.
7866 *
7867 * When power gating is enabled on a pipe we lose the
7868 * interrupt enablement state when power gating is disabled.
7869 *
7870 * So we need to update the IRQ control state in hardware
7871 * whenever the pipe turns on (since it could be previously
7872 * power gated) or off (since some pipes can't be power gated
7873 * on some ASICs).
7874 */
7875 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
7876 dm_update_pflip_irq_state(drm_to_adev(dev),
7877 acrtc_attach);
8fe684e9 7878
8c322309 7879 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7880 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7881 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7882 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7883 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7884 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7885 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7886 amdgpu_dm_psr_enable(acrtc_state->stream);
7887 }
7888
bc7f670e 7889 mutex_unlock(&dm->dc_lock);
e7b07cee 7890 }
4b510503 7891
8ad27806
NK
7892 /*
7893 * Update cursor state *after* programming all the planes.
7894 * This avoids redundant programming in the case where we're going
7895 * to be disabling a single plane - those pipes are being disabled.
7896 */
7897 if (acrtc_state->active_planes)
7898 amdgpu_dm_commit_cursors(state);
80c218d5 7899
4b510503 7900cleanup:
74aa7bd4 7901 kfree(bundle);
e7b07cee
HW
7902}
7903
6ce8f316
NK
7904static void amdgpu_dm_commit_audio(struct drm_device *dev,
7905 struct drm_atomic_state *state)
7906{
1348969a 7907 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
7908 struct amdgpu_dm_connector *aconnector;
7909 struct drm_connector *connector;
7910 struct drm_connector_state *old_con_state, *new_con_state;
7911 struct drm_crtc_state *new_crtc_state;
7912 struct dm_crtc_state *new_dm_crtc_state;
7913 const struct dc_stream_status *status;
7914 int i, inst;
7915
7916 /* Notify device removals. */
7917 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7918 if (old_con_state->crtc != new_con_state->crtc) {
7919 /* CRTC changes require notification. */
7920 goto notify;
7921 }
7922
7923 if (!new_con_state->crtc)
7924 continue;
7925
7926 new_crtc_state = drm_atomic_get_new_crtc_state(
7927 state, new_con_state->crtc);
7928
7929 if (!new_crtc_state)
7930 continue;
7931
7932 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7933 continue;
7934
7935 notify:
7936 aconnector = to_amdgpu_dm_connector(connector);
7937
7938 mutex_lock(&adev->dm.audio_lock);
7939 inst = aconnector->audio_inst;
7940 aconnector->audio_inst = -1;
7941 mutex_unlock(&adev->dm.audio_lock);
7942
7943 amdgpu_dm_audio_eld_notify(adev, inst);
7944 }
7945
7946 /* Notify audio device additions. */
7947 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7948 if (!new_con_state->crtc)
7949 continue;
7950
7951 new_crtc_state = drm_atomic_get_new_crtc_state(
7952 state, new_con_state->crtc);
7953
7954 if (!new_crtc_state)
7955 continue;
7956
7957 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7958 continue;
7959
7960 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7961 if (!new_dm_crtc_state->stream)
7962 continue;
7963
7964 status = dc_stream_get_status(new_dm_crtc_state->stream);
7965 if (!status)
7966 continue;
7967
7968 aconnector = to_amdgpu_dm_connector(connector);
7969
7970 mutex_lock(&adev->dm.audio_lock);
7971 inst = status->audio_inst;
7972 aconnector->audio_inst = inst;
7973 mutex_unlock(&adev->dm.audio_lock);
7974
7975 amdgpu_dm_audio_eld_notify(adev, inst);
7976 }
7977}
7978
1f6010a9 7979/*
27b3f4fc
LSL
7980 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7981 * @crtc_state: the DRM CRTC state
7982 * @stream_state: the DC stream state.
7983 *
7984 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7985 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7986 */
7987static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7988 struct dc_stream_state *stream_state)
7989{
b9952f93 7990 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7991}
e7b07cee 7992
7578ecda
AD
7993static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7994 struct drm_atomic_state *state,
7995 bool nonblock)
e7b07cee 7996{
1f6010a9
DF
7997 /*
7998 * Add check here for SoC's that support hardware cursor plane, to
7999 * unset legacy_cursor_update
8000 */
e7b07cee
HW
8001
8002 return drm_atomic_helper_commit(dev, state, nonblock);
8003
8004 /*TODO Handle EINTR, reenable IRQ*/
8005}
8006
b8592b48
LL
8007/**
8008 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8009 * @state: The atomic state to commit
8010 *
8011 * This will tell DC to commit the constructed DC state from atomic_check,
8012 * programming the hardware. Any failures here implies a hardware failure, since
8013 * atomic check should have filtered anything non-kosher.
8014 */
7578ecda 8015static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8016{
8017 struct drm_device *dev = state->dev;
1348969a 8018 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8019 struct amdgpu_display_manager *dm = &adev->dm;
8020 struct dm_atomic_state *dm_state;
eb3dc897 8021 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8022 uint32_t i, j;
5cc6dcbd 8023 struct drm_crtc *crtc;
0bc9706d 8024 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8025 unsigned long flags;
8026 bool wait_for_vblank = true;
8027 struct drm_connector *connector;
c2cea706 8028 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8029 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8030 int crtc_disable_count = 0;
6ee90e88 8031 bool mode_set_reset_required = false;
e7b07cee 8032
e8a98235
RS
8033 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8034
e7b07cee 8035 drm_atomic_helper_update_legacy_modeset_state(dev, state);
441959eb 8036 drm_atomic_helper_calc_timestamping_constants(state);
e7b07cee 8037
eb3dc897
NK
8038 dm_state = dm_atomic_get_new_state(state);
8039 if (dm_state && dm_state->context) {
8040 dc_state = dm_state->context;
8041 } else {
8042 /* No state changes, retain current state. */
813d20dc 8043 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8044 ASSERT(dc_state_temp);
8045 dc_state = dc_state_temp;
8046 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8047 }
e7b07cee 8048
6d90a208
AP
8049 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8050 new_crtc_state, i) {
8051 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8052
8053 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8054
8055 if (old_crtc_state->active &&
8056 (!new_crtc_state->active ||
8057 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8058 manage_dm_interrupts(adev, acrtc, false);
8059 dc_stream_release(dm_old_crtc_state->stream);
8060 }
8061 }
8062
e7b07cee 8063 /* update changed items */
0bc9706d 8064 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8065 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8066
54d76575
LSL
8067 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8068 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8069
f1ad2f5e 8070 DRM_DEBUG_DRIVER(
e7b07cee
HW
8071 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8072 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8073 "connectors_changed:%d\n",
8074 acrtc->crtc_id,
0bc9706d
LSL
8075 new_crtc_state->enable,
8076 new_crtc_state->active,
8077 new_crtc_state->planes_changed,
8078 new_crtc_state->mode_changed,
8079 new_crtc_state->active_changed,
8080 new_crtc_state->connectors_changed);
e7b07cee 8081
5c68c652
VL
8082 /* Disable cursor if disabling crtc */
8083 if (old_crtc_state->active && !new_crtc_state->active) {
8084 struct dc_cursor_position position;
8085
8086 memset(&position, 0, sizeof(position));
8087 mutex_lock(&dm->dc_lock);
8088 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8089 mutex_unlock(&dm->dc_lock);
8090 }
8091
27b3f4fc
LSL
8092 /* Copy all transient state flags into dc state */
8093 if (dm_new_crtc_state->stream) {
8094 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8095 dm_new_crtc_state->stream);
8096 }
8097
e7b07cee
HW
8098 /* handles headless hotplug case, updating new_state and
8099 * aconnector as needed
8100 */
8101
54d76575 8102 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8103
f1ad2f5e 8104 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8105
54d76575 8106 if (!dm_new_crtc_state->stream) {
e7b07cee 8107 /*
b830ebc9
HW
8108 * this could happen because of issues with
8109 * userspace notifications delivery.
8110 * In this case userspace tries to set mode on
1f6010a9
DF
8111 * display which is disconnected in fact.
8112 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8113 * We expect reset mode will come soon.
8114 *
8115 * This can also happen when unplug is done
8116 * during resume sequence ended
8117 *
8118 * In this case, we want to pretend we still
8119 * have a sink to keep the pipe running so that
8120 * hw state is consistent with the sw state
8121 */
f1ad2f5e 8122 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8123 __func__, acrtc->base.base.id);
8124 continue;
8125 }
8126
54d76575
LSL
8127 if (dm_old_crtc_state->stream)
8128 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8129
97028037
LP
8130 pm_runtime_get_noresume(dev->dev);
8131
e7b07cee 8132 acrtc->enabled = true;
0bc9706d
LSL
8133 acrtc->hw_mode = new_crtc_state->mode;
8134 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8135 mode_set_reset_required = true;
0bc9706d 8136 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8137 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8138 /* i.e. reset mode */
6ee90e88 8139 if (dm_old_crtc_state->stream)
54d76575 8140 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8141 mode_set_reset_required = true;
e7b07cee
HW
8142 }
8143 } /* for_each_crtc_in_state() */
8144
eb3dc897 8145 if (dc_state) {
6ee90e88 8146 /* if there mode set or reset, disable eDP PSR */
8147 if (mode_set_reset_required)
8148 amdgpu_dm_psr_disable_all(dm);
8149
eb3dc897 8150 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8151 mutex_lock(&dm->dc_lock);
eb3dc897 8152 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8153 mutex_unlock(&dm->dc_lock);
fa2123db 8154 }
e7b07cee 8155
0bc9706d 8156 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8157 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8158
54d76575 8159 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8160
54d76575 8161 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8162 const struct dc_stream_status *status =
54d76575 8163 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8164
eb3dc897 8165 if (!status)
09f609c3
LL
8166 status = dc_stream_get_status_from_state(dc_state,
8167 dm_new_crtc_state->stream);
e7b07cee 8168 if (!status)
54d76575 8169 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8170 else
8171 acrtc->otg_inst = status->primary_otg_inst;
8172 }
8173 }
0c8620d6
BL
8174#ifdef CONFIG_DRM_AMD_DC_HDCP
8175 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8176 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8177 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8178 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8179
8180 new_crtc_state = NULL;
8181
8182 if (acrtc)
8183 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8184
8185 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8186
8187 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8188 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8189 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8190 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8191 continue;
8192 }
8193
8194 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8195 hdcp_update_display(
8196 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8197 new_con_state->hdcp_content_type,
b1abe558
BL
8198 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8199 : false);
0c8620d6
BL
8200 }
8201#endif
e7b07cee 8202
02d6a6fc 8203 /* Handle connector state changes */
c2cea706 8204 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8205 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8206 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8207 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
8208 struct dc_surface_update dummy_updates[MAX_SURFACES];
8209 struct dc_stream_update stream_update;
b232d4ed 8210 struct dc_info_packet hdr_packet;
e7b07cee 8211 struct dc_stream_status *status = NULL;
b232d4ed 8212 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8213
19afd799
NC
8214 memset(&dummy_updates, 0, sizeof(dummy_updates));
8215 memset(&stream_update, 0, sizeof(stream_update));
8216
44d09c6a 8217 if (acrtc) {
0bc9706d 8218 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8219 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8220 }
0bc9706d 8221
e7b07cee 8222 /* Skip any modesets/resets */
0bc9706d 8223 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8224 continue;
8225
54d76575 8226 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8227 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8228
b232d4ed
NK
8229 scaling_changed = is_scaling_state_different(dm_new_con_state,
8230 dm_old_con_state);
8231
8232 abm_changed = dm_new_crtc_state->abm_level !=
8233 dm_old_crtc_state->abm_level;
8234
8235 hdr_changed =
8236 is_hdr_metadata_different(old_con_state, new_con_state);
8237
8238 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8239 continue;
e7b07cee 8240
b6e881c9 8241 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8242 if (scaling_changed) {
02d6a6fc 8243 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8244 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8245
02d6a6fc
DF
8246 stream_update.src = dm_new_crtc_state->stream->src;
8247 stream_update.dst = dm_new_crtc_state->stream->dst;
8248 }
8249
b232d4ed 8250 if (abm_changed) {
02d6a6fc
DF
8251 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8252
8253 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8254 }
70e8ffc5 8255
b232d4ed
NK
8256 if (hdr_changed) {
8257 fill_hdr_info_packet(new_con_state, &hdr_packet);
8258 stream_update.hdr_static_metadata = &hdr_packet;
8259 }
8260
54d76575 8261 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8262 WARN_ON(!status);
3be5262e 8263 WARN_ON(!status->plane_count);
e7b07cee 8264
02d6a6fc
DF
8265 /*
8266 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8267 * Here we create an empty update on each plane.
8268 * To fix this, DC should permit updating only stream properties.
8269 */
8270 for (j = 0; j < status->plane_count; j++)
8271 dummy_updates[j].surface = status->plane_states[0];
8272
8273
8274 mutex_lock(&dm->dc_lock);
8275 dc_commit_updates_for_stream(dm->dc,
8276 dummy_updates,
8277 status->plane_count,
8278 dm_new_crtc_state->stream,
8279 &stream_update,
8280 dc_state);
8281 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8282 }
8283
b5e83f6f 8284 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8285 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8286 new_crtc_state, i) {
fe2a1965
LP
8287 if (old_crtc_state->active && !new_crtc_state->active)
8288 crtc_disable_count++;
8289
54d76575 8290 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8291 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8292
585d450c
AP
8293 /* For freesync config update on crtc state and params for irq */
8294 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8295
66b0c973
MK
8296 /* Handle vrr on->off / off->on transitions */
8297 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8298 dm_new_crtc_state);
e7b07cee
HW
8299 }
8300
8fe684e9
NK
8301 /**
8302 * Enable interrupts for CRTCs that are newly enabled or went through
8303 * a modeset. It was intentionally deferred until after the front end
8304 * state was modified to wait until the OTG was on and so the IRQ
8305 * handlers didn't access stale or invalid state.
8306 */
8307 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8308 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
c920888c 8309 bool configure_crc = false;
8fe684e9 8310
585d450c
AP
8311 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8312
8fe684e9
NK
8313 if (new_crtc_state->active &&
8314 (!old_crtc_state->active ||
8315 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8316 dc_stream_retain(dm_new_crtc_state->stream);
8317 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8318 manage_dm_interrupts(adev, acrtc, true);
c920888c 8319 }
8fe684e9 8320#ifdef CONFIG_DEBUG_FS
c920888c
WL
8321 if (new_crtc_state->active &&
8322 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8fe684e9
NK
8323 /**
8324 * Frontend may have changed so reapply the CRC capture
8325 * settings for the stream.
8326 */
8327 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8328 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8fe684e9 8329
c920888c
WL
8330 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8331 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8332 configure_crc = true;
8333 } else {
8334 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8335 configure_crc = true;
8fe684e9 8336 }
c920888c
WL
8337
8338 if (configure_crc)
8339 amdgpu_dm_crtc_configure_crc_source(
8340 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8fe684e9 8341 }
c920888c 8342#endif
8fe684e9 8343 }
e7b07cee 8344
420cd472 8345 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8346 if (new_crtc_state->async_flip)
420cd472
DF
8347 wait_for_vblank = false;
8348
e7b07cee 8349 /* update planes when needed per crtc*/
5cc6dcbd 8350 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8351 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8352
54d76575 8353 if (dm_new_crtc_state->stream)
eb3dc897 8354 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8355 dm, crtc, wait_for_vblank);
e7b07cee
HW
8356 }
8357
6ce8f316
NK
8358 /* Update audio instances for each connector. */
8359 amdgpu_dm_commit_audio(dev, state);
8360
e7b07cee
HW
8361 /*
8362 * send vblank event on all events not handled in flip and
8363 * mark consumed event for drm_atomic_helper_commit_hw_done
8364 */
4a580877 8365 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8366 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8367
0bc9706d
LSL
8368 if (new_crtc_state->event)
8369 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8370
0bc9706d 8371 new_crtc_state->event = NULL;
e7b07cee 8372 }
4a580877 8373 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8374
29c8f234
LL
8375 /* Signal HW programming completion */
8376 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8377
8378 if (wait_for_vblank)
320a1274 8379 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8380
8381 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8382
1f6010a9
DF
8383 /*
8384 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8385 * so we can put the GPU into runtime suspend if we're not driving any
8386 * displays anymore
8387 */
fe2a1965
LP
8388 for (i = 0; i < crtc_disable_count; i++)
8389 pm_runtime_put_autosuspend(dev->dev);
97028037 8390 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8391
8392 if (dc_state_temp)
8393 dc_release_state(dc_state_temp);
e7b07cee
HW
8394}
8395
8396
8397static int dm_force_atomic_commit(struct drm_connector *connector)
8398{
8399 int ret = 0;
8400 struct drm_device *ddev = connector->dev;
8401 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8402 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8403 struct drm_plane *plane = disconnected_acrtc->base.primary;
8404 struct drm_connector_state *conn_state;
8405 struct drm_crtc_state *crtc_state;
8406 struct drm_plane_state *plane_state;
8407
8408 if (!state)
8409 return -ENOMEM;
8410
8411 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8412
8413 /* Construct an atomic state to restore previous display setting */
8414
8415 /*
8416 * Attach connectors to drm_atomic_state
8417 */
8418 conn_state = drm_atomic_get_connector_state(state, connector);
8419
8420 ret = PTR_ERR_OR_ZERO(conn_state);
8421 if (ret)
8422 goto err;
8423
8424 /* Attach crtc to drm_atomic_state*/
8425 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8426
8427 ret = PTR_ERR_OR_ZERO(crtc_state);
8428 if (ret)
8429 goto err;
8430
8431 /* force a restore */
8432 crtc_state->mode_changed = true;
8433
8434 /* Attach plane to drm_atomic_state */
8435 plane_state = drm_atomic_get_plane_state(state, plane);
8436
8437 ret = PTR_ERR_OR_ZERO(plane_state);
8438 if (ret)
8439 goto err;
8440
8441
8442 /* Call commit internally with the state we just constructed */
8443 ret = drm_atomic_commit(state);
8444 if (!ret)
8445 return 0;
8446
8447err:
8448 DRM_ERROR("Restoring old state failed with %i\n", ret);
8449 drm_atomic_state_put(state);
8450
8451 return ret;
8452}
8453
8454/*
1f6010a9
DF
8455 * This function handles all cases when set mode does not come upon hotplug.
8456 * This includes when a display is unplugged then plugged back into the
8457 * same port and when running without usermode desktop manager supprot
e7b07cee 8458 */
3ee6b26b
AD
8459void dm_restore_drm_connector_state(struct drm_device *dev,
8460 struct drm_connector *connector)
e7b07cee 8461{
c84dec2f 8462 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8463 struct amdgpu_crtc *disconnected_acrtc;
8464 struct dm_crtc_state *acrtc_state;
8465
8466 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8467 return;
8468
8469 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8470 if (!disconnected_acrtc)
8471 return;
e7b07cee 8472
70e8ffc5
HW
8473 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8474 if (!acrtc_state->stream)
e7b07cee
HW
8475 return;
8476
8477 /*
8478 * If the previous sink is not released and different from the current,
8479 * we deduce we are in a state where we can not rely on usermode call
8480 * to turn on the display, so we do it here
8481 */
8482 if (acrtc_state->stream->sink != aconnector->dc_sink)
8483 dm_force_atomic_commit(&aconnector->base);
8484}
8485
1f6010a9 8486/*
e7b07cee
HW
8487 * Grabs all modesetting locks to serialize against any blocking commits,
8488 * Waits for completion of all non blocking commits.
8489 */
3ee6b26b
AD
8490static int do_aquire_global_lock(struct drm_device *dev,
8491 struct drm_atomic_state *state)
e7b07cee
HW
8492{
8493 struct drm_crtc *crtc;
8494 struct drm_crtc_commit *commit;
8495 long ret;
8496
1f6010a9
DF
8497 /*
8498 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8499 * ensure that when the framework release it the
8500 * extra locks we are locking here will get released to
8501 */
8502 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8503 if (ret)
8504 return ret;
8505
8506 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8507 spin_lock(&crtc->commit_lock);
8508 commit = list_first_entry_or_null(&crtc->commit_list,
8509 struct drm_crtc_commit, commit_entry);
8510 if (commit)
8511 drm_crtc_commit_get(commit);
8512 spin_unlock(&crtc->commit_lock);
8513
8514 if (!commit)
8515 continue;
8516
1f6010a9
DF
8517 /*
8518 * Make sure all pending HW programming completed and
e7b07cee
HW
8519 * page flips done
8520 */
8521 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8522
8523 if (ret > 0)
8524 ret = wait_for_completion_interruptible_timeout(
8525 &commit->flip_done, 10*HZ);
8526
8527 if (ret == 0)
8528 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8529 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8530
8531 drm_crtc_commit_put(commit);
8532 }
8533
8534 return ret < 0 ? ret : 0;
8535}
8536
bb47de73
NK
8537static void get_freesync_config_for_crtc(
8538 struct dm_crtc_state *new_crtc_state,
8539 struct dm_connector_state *new_con_state)
98e6436d
AK
8540{
8541 struct mod_freesync_config config = {0};
98e6436d
AK
8542 struct amdgpu_dm_connector *aconnector =
8543 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8544 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8545 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8546
a057ec46 8547 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8548 vrefresh >= aconnector->min_vfreq &&
8549 vrefresh <= aconnector->max_vfreq;
bb47de73 8550
a057ec46
IB
8551 if (new_crtc_state->vrr_supported) {
8552 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8553 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8554 VRR_STATE_ACTIVE_VARIABLE :
8555 VRR_STATE_INACTIVE;
8556 config.min_refresh_in_uhz =
8557 aconnector->min_vfreq * 1000000;
8558 config.max_refresh_in_uhz =
8559 aconnector->max_vfreq * 1000000;
69ff8845 8560 config.vsif_supported = true;
180db303 8561 config.btr = true;
98e6436d
AK
8562 }
8563
bb47de73
NK
8564 new_crtc_state->freesync_config = config;
8565}
98e6436d 8566
bb47de73
NK
8567static void reset_freesync_config_for_crtc(
8568 struct dm_crtc_state *new_crtc_state)
8569{
8570 new_crtc_state->vrr_supported = false;
98e6436d 8571
bb47de73
NK
8572 memset(&new_crtc_state->vrr_infopacket, 0,
8573 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8574}
8575
4b9674e5
LL
8576static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8577 struct drm_atomic_state *state,
8578 struct drm_crtc *crtc,
8579 struct drm_crtc_state *old_crtc_state,
8580 struct drm_crtc_state *new_crtc_state,
8581 bool enable,
8582 bool *lock_and_validation_needed)
e7b07cee 8583{
eb3dc897 8584 struct dm_atomic_state *dm_state = NULL;
54d76575 8585 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8586 struct dc_stream_state *new_stream;
62f55537 8587 int ret = 0;
d4d4a645 8588
1f6010a9
DF
8589 /*
8590 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8591 * update changed items
8592 */
4b9674e5
LL
8593 struct amdgpu_crtc *acrtc = NULL;
8594 struct amdgpu_dm_connector *aconnector = NULL;
8595 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8596 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8597
4b9674e5 8598 new_stream = NULL;
9635b754 8599
4b9674e5
LL
8600 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8601 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8602 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8603 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8604
4b9674e5
LL
8605 /* TODO This hack should go away */
8606 if (aconnector && enable) {
8607 /* Make sure fake sink is created in plug-in scenario */
8608 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8609 &aconnector->base);
8610 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8611 &aconnector->base);
19f89e23 8612
4b9674e5
LL
8613 if (IS_ERR(drm_new_conn_state)) {
8614 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8615 goto fail;
8616 }
19f89e23 8617
4b9674e5
LL
8618 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8619 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8620
02d35a67
JFZ
8621 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8622 goto skip_modeset;
8623
cbd14ae7
SW
8624 new_stream = create_validate_stream_for_sink(aconnector,
8625 &new_crtc_state->mode,
8626 dm_new_conn_state,
8627 dm_old_crtc_state->stream);
19f89e23 8628
4b9674e5
LL
8629 /*
8630 * we can have no stream on ACTION_SET if a display
8631 * was disconnected during S3, in this case it is not an
8632 * error, the OS will be updated after detection, and
8633 * will do the right thing on next atomic commit
8634 */
19f89e23 8635
4b9674e5
LL
8636 if (!new_stream) {
8637 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8638 __func__, acrtc->base.base.id);
8639 ret = -ENOMEM;
8640 goto fail;
8641 }
e7b07cee 8642
3d4e52d0
VL
8643 /*
8644 * TODO: Check VSDB bits to decide whether this should
8645 * be enabled or not.
8646 */
8647 new_stream->triggered_crtc_reset.enabled =
8648 dm->force_timing_sync;
8649
4b9674e5 8650 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8651
88694af9
NK
8652 ret = fill_hdr_info_packet(drm_new_conn_state,
8653 &new_stream->hdr_static_metadata);
8654 if (ret)
8655 goto fail;
8656
7e930949
NK
8657 /*
8658 * If we already removed the old stream from the context
8659 * (and set the new stream to NULL) then we can't reuse
8660 * the old stream even if the stream and scaling are unchanged.
8661 * We'll hit the BUG_ON and black screen.
8662 *
8663 * TODO: Refactor this function to allow this check to work
8664 * in all conditions.
8665 */
8666 if (dm_new_crtc_state->stream &&
8667 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8668 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8669 new_crtc_state->mode_changed = false;
8670 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8671 new_crtc_state->mode_changed);
62f55537 8672 }
4b9674e5 8673 }
b830ebc9 8674
02d35a67 8675 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8676 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8677 goto skip_modeset;
e7b07cee 8678
4b9674e5
LL
8679 DRM_DEBUG_DRIVER(
8680 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8681 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8682 "connectors_changed:%d\n",
8683 acrtc->crtc_id,
8684 new_crtc_state->enable,
8685 new_crtc_state->active,
8686 new_crtc_state->planes_changed,
8687 new_crtc_state->mode_changed,
8688 new_crtc_state->active_changed,
8689 new_crtc_state->connectors_changed);
62f55537 8690
4b9674e5
LL
8691 /* Remove stream for any changed/disabled CRTC */
8692 if (!enable) {
62f55537 8693
4b9674e5
LL
8694 if (!dm_old_crtc_state->stream)
8695 goto skip_modeset;
eb3dc897 8696
4b9674e5
LL
8697 ret = dm_atomic_get_state(state, &dm_state);
8698 if (ret)
8699 goto fail;
e7b07cee 8700
4b9674e5
LL
8701 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8702 crtc->base.id);
62f55537 8703
4b9674e5
LL
8704 /* i.e. reset mode */
8705 if (dc_remove_stream_from_ctx(
8706 dm->dc,
8707 dm_state->context,
8708 dm_old_crtc_state->stream) != DC_OK) {
8709 ret = -EINVAL;
8710 goto fail;
8711 }
62f55537 8712
4b9674e5
LL
8713 dc_stream_release(dm_old_crtc_state->stream);
8714 dm_new_crtc_state->stream = NULL;
bb47de73 8715
4b9674e5 8716 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8717
4b9674e5 8718 *lock_and_validation_needed = true;
62f55537 8719
4b9674e5
LL
8720 } else {/* Add stream for any updated/enabled CRTC */
8721 /*
8722 * Quick fix to prevent NULL pointer on new_stream when
8723 * added MST connectors not found in existing crtc_state in the chained mode
8724 * TODO: need to dig out the root cause of that
8725 */
8726 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8727 goto skip_modeset;
62f55537 8728
4b9674e5
LL
8729 if (modereset_required(new_crtc_state))
8730 goto skip_modeset;
62f55537 8731
4b9674e5
LL
8732 if (modeset_required(new_crtc_state, new_stream,
8733 dm_old_crtc_state->stream)) {
62f55537 8734
4b9674e5 8735 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8736
4b9674e5
LL
8737 ret = dm_atomic_get_state(state, &dm_state);
8738 if (ret)
8739 goto fail;
27b3f4fc 8740
4b9674e5 8741 dm_new_crtc_state->stream = new_stream;
62f55537 8742
4b9674e5 8743 dc_stream_retain(new_stream);
1dc90497 8744
4b9674e5
LL
8745 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8746 crtc->base.id);
1dc90497 8747
4b9674e5
LL
8748 if (dc_add_stream_to_ctx(
8749 dm->dc,
8750 dm_state->context,
8751 dm_new_crtc_state->stream) != DC_OK) {
8752 ret = -EINVAL;
8753 goto fail;
9b690ef3
BL
8754 }
8755
4b9674e5
LL
8756 *lock_and_validation_needed = true;
8757 }
8758 }
e277adc5 8759
4b9674e5
LL
8760skip_modeset:
8761 /* Release extra reference */
8762 if (new_stream)
8763 dc_stream_release(new_stream);
e277adc5 8764
4b9674e5
LL
8765 /*
8766 * We want to do dc stream updates that do not require a
8767 * full modeset below.
8768 */
2afda735 8769 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8770 return 0;
8771 /*
8772 * Given above conditions, the dc state cannot be NULL because:
8773 * 1. We're in the process of enabling CRTCs (just been added
8774 * to the dc context, or already is on the context)
8775 * 2. Has a valid connector attached, and
8776 * 3. Is currently active and enabled.
8777 * => The dc stream state currently exists.
8778 */
8779 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8780
4b9674e5
LL
8781 /* Scaling or underscan settings */
8782 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8783 update_stream_scaling_settings(
8784 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8785
b05e2c5e
DF
8786 /* ABM settings */
8787 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8788
4b9674e5
LL
8789 /*
8790 * Color management settings. We also update color properties
8791 * when a modeset is needed, to ensure it gets reprogrammed.
8792 */
8793 if (dm_new_crtc_state->base.color_mgmt_changed ||
8794 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8795 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8796 if (ret)
8797 goto fail;
62f55537 8798 }
e7b07cee 8799
4b9674e5
LL
8800 /* Update Freesync settings. */
8801 get_freesync_config_for_crtc(dm_new_crtc_state,
8802 dm_new_conn_state);
8803
62f55537 8804 return ret;
9635b754
DS
8805
8806fail:
8807 if (new_stream)
8808 dc_stream_release(new_stream);
8809 return ret;
62f55537 8810}
9b690ef3 8811
f6ff2a08
NK
8812static bool should_reset_plane(struct drm_atomic_state *state,
8813 struct drm_plane *plane,
8814 struct drm_plane_state *old_plane_state,
8815 struct drm_plane_state *new_plane_state)
8816{
8817 struct drm_plane *other;
8818 struct drm_plane_state *old_other_state, *new_other_state;
8819 struct drm_crtc_state *new_crtc_state;
8820 int i;
8821
70a1efac
NK
8822 /*
8823 * TODO: Remove this hack once the checks below are sufficient
8824 * enough to determine when we need to reset all the planes on
8825 * the stream.
8826 */
8827 if (state->allow_modeset)
8828 return true;
8829
f6ff2a08
NK
8830 /* Exit early if we know that we're adding or removing the plane. */
8831 if (old_plane_state->crtc != new_plane_state->crtc)
8832 return true;
8833
8834 /* old crtc == new_crtc == NULL, plane not in context. */
8835 if (!new_plane_state->crtc)
8836 return false;
8837
8838 new_crtc_state =
8839 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8840
8841 if (!new_crtc_state)
8842 return true;
8843
7316c4ad
NK
8844 /* CRTC Degamma changes currently require us to recreate planes. */
8845 if (new_crtc_state->color_mgmt_changed)
8846 return true;
8847
f6ff2a08
NK
8848 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8849 return true;
8850
8851 /*
8852 * If there are any new primary or overlay planes being added or
8853 * removed then the z-order can potentially change. To ensure
8854 * correct z-order and pipe acquisition the current DC architecture
8855 * requires us to remove and recreate all existing planes.
8856 *
8857 * TODO: Come up with a more elegant solution for this.
8858 */
8859 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 8860 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
8861 if (other->type == DRM_PLANE_TYPE_CURSOR)
8862 continue;
8863
8864 if (old_other_state->crtc != new_plane_state->crtc &&
8865 new_other_state->crtc != new_plane_state->crtc)
8866 continue;
8867
8868 if (old_other_state->crtc != new_other_state->crtc)
8869 return true;
8870
dc4cb30d
NK
8871 /* Src/dst size and scaling updates. */
8872 if (old_other_state->src_w != new_other_state->src_w ||
8873 old_other_state->src_h != new_other_state->src_h ||
8874 old_other_state->crtc_w != new_other_state->crtc_w ||
8875 old_other_state->crtc_h != new_other_state->crtc_h)
8876 return true;
8877
8878 /* Rotation / mirroring updates. */
8879 if (old_other_state->rotation != new_other_state->rotation)
8880 return true;
8881
8882 /* Blending updates. */
8883 if (old_other_state->pixel_blend_mode !=
8884 new_other_state->pixel_blend_mode)
8885 return true;
8886
8887 /* Alpha updates. */
8888 if (old_other_state->alpha != new_other_state->alpha)
8889 return true;
8890
8891 /* Colorspace changes. */
8892 if (old_other_state->color_range != new_other_state->color_range ||
8893 old_other_state->color_encoding != new_other_state->color_encoding)
8894 return true;
8895
9a81cc60
NK
8896 /* Framebuffer checks fall at the end. */
8897 if (!old_other_state->fb || !new_other_state->fb)
8898 continue;
8899
8900 /* Pixel format changes can require bandwidth updates. */
8901 if (old_other_state->fb->format != new_other_state->fb->format)
8902 return true;
8903
6eed95b0
BN
8904 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8905 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
8906
8907 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
8908 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8909 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
8910 return true;
8911 }
8912
8913 return false;
8914}
8915
9e869063
LL
8916static int dm_update_plane_state(struct dc *dc,
8917 struct drm_atomic_state *state,
8918 struct drm_plane *plane,
8919 struct drm_plane_state *old_plane_state,
8920 struct drm_plane_state *new_plane_state,
8921 bool enable,
8922 bool *lock_and_validation_needed)
62f55537 8923{
eb3dc897
NK
8924
8925 struct dm_atomic_state *dm_state = NULL;
62f55537 8926 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8927 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8928 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8929 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8930 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8931 bool needs_reset;
62f55537 8932 int ret = 0;
e7b07cee 8933
9b690ef3 8934
9e869063
LL
8935 new_plane_crtc = new_plane_state->crtc;
8936 old_plane_crtc = old_plane_state->crtc;
8937 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8938 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8939
626bf90f
SS
8940 /*TODO Implement better atomic check for cursor plane */
8941 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8942 if (!enable || !new_plane_crtc ||
8943 drm_atomic_plane_disabling(plane->state, new_plane_state))
8944 return 0;
8945
8946 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8947
8948 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8949 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8950 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8951 new_plane_state->crtc_w, new_plane_state->crtc_h);
8952 return -EINVAL;
8953 }
8954
9e869063 8955 return 0;
626bf90f 8956 }
9b690ef3 8957
f6ff2a08
NK
8958 needs_reset = should_reset_plane(state, plane, old_plane_state,
8959 new_plane_state);
8960
9e869063
LL
8961 /* Remove any changed/removed planes */
8962 if (!enable) {
f6ff2a08 8963 if (!needs_reset)
9e869063 8964 return 0;
a7b06724 8965
9e869063
LL
8966 if (!old_plane_crtc)
8967 return 0;
62f55537 8968
9e869063
LL
8969 old_crtc_state = drm_atomic_get_old_crtc_state(
8970 state, old_plane_crtc);
8971 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8972
9e869063
LL
8973 if (!dm_old_crtc_state->stream)
8974 return 0;
62f55537 8975
9e869063
LL
8976 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8977 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8978
9e869063
LL
8979 ret = dm_atomic_get_state(state, &dm_state);
8980 if (ret)
8981 return ret;
eb3dc897 8982
9e869063
LL
8983 if (!dc_remove_plane_from_context(
8984 dc,
8985 dm_old_crtc_state->stream,
8986 dm_old_plane_state->dc_state,
8987 dm_state->context)) {
62f55537 8988
c3537613 8989 return -EINVAL;
9e869063 8990 }
e7b07cee 8991
9b690ef3 8992
9e869063
LL
8993 dc_plane_state_release(dm_old_plane_state->dc_state);
8994 dm_new_plane_state->dc_state = NULL;
1dc90497 8995
9e869063 8996 *lock_and_validation_needed = true;
1dc90497 8997
9e869063
LL
8998 } else { /* Add new planes */
8999 struct dc_plane_state *dc_new_plane_state;
1dc90497 9000
9e869063
LL
9001 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9002 return 0;
e7b07cee 9003
9e869063
LL
9004 if (!new_plane_crtc)
9005 return 0;
e7b07cee 9006
9e869063
LL
9007 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9008 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9009
9e869063
LL
9010 if (!dm_new_crtc_state->stream)
9011 return 0;
62f55537 9012
f6ff2a08 9013 if (!needs_reset)
9e869063 9014 return 0;
62f55537 9015
8c44515b
AP
9016 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9017 if (ret)
9018 return ret;
9019
9e869063 9020 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9021
9e869063
LL
9022 dc_new_plane_state = dc_create_plane_state(dc);
9023 if (!dc_new_plane_state)
9024 return -ENOMEM;
62f55537 9025
9e869063
LL
9026 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9027 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9028
695af5f9 9029 ret = fill_dc_plane_attributes(
1348969a 9030 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9031 dc_new_plane_state,
9032 new_plane_state,
9033 new_crtc_state);
9034 if (ret) {
9035 dc_plane_state_release(dc_new_plane_state);
9036 return ret;
9037 }
62f55537 9038
9e869063
LL
9039 ret = dm_atomic_get_state(state, &dm_state);
9040 if (ret) {
9041 dc_plane_state_release(dc_new_plane_state);
9042 return ret;
9043 }
eb3dc897 9044
9e869063
LL
9045 /*
9046 * Any atomic check errors that occur after this will
9047 * not need a release. The plane state will be attached
9048 * to the stream, and therefore part of the atomic
9049 * state. It'll be released when the atomic state is
9050 * cleaned.
9051 */
9052 if (!dc_add_plane_to_context(
9053 dc,
9054 dm_new_crtc_state->stream,
9055 dc_new_plane_state,
9056 dm_state->context)) {
62f55537 9057
9e869063
LL
9058 dc_plane_state_release(dc_new_plane_state);
9059 return -EINVAL;
9060 }
8c45c5db 9061
9e869063 9062 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9063
9e869063
LL
9064 /* Tell DC to do a full surface update every time there
9065 * is a plane change. Inefficient, but works for now.
9066 */
9067 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9068
9069 *lock_and_validation_needed = true;
62f55537 9070 }
e7b07cee
HW
9071
9072
62f55537
AG
9073 return ret;
9074}
a87fa993 9075
e10517b3 9076#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9077static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9078{
9079 struct drm_connector *connector;
9080 struct drm_connector_state *conn_state;
9081 struct amdgpu_dm_connector *aconnector = NULL;
9082 int i;
9083 for_each_new_connector_in_state(state, connector, conn_state, i) {
9084 if (conn_state->crtc != crtc)
9085 continue;
9086
9087 aconnector = to_amdgpu_dm_connector(connector);
9088 if (!aconnector->port || !aconnector->mst_port)
9089 aconnector = NULL;
9090 else
9091 break;
9092 }
9093
9094 if (!aconnector)
9095 return 0;
9096
9097 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9098}
e10517b3 9099#endif
44be939f 9100
b8592b48
LL
9101/**
9102 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9103 * @dev: The DRM device
9104 * @state: The atomic state to commit
9105 *
9106 * Validate that the given atomic state is programmable by DC into hardware.
9107 * This involves constructing a &struct dc_state reflecting the new hardware
9108 * state we wish to commit, then querying DC to see if it is programmable. It's
9109 * important not to modify the existing DC state. Otherwise, atomic_check
9110 * may unexpectedly commit hardware changes.
9111 *
9112 * When validating the DC state, it's important that the right locks are
9113 * acquired. For full updates case which removes/adds/updates streams on one
9114 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9115 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9116 * flip using DRMs synchronization events.
b8592b48
LL
9117 *
9118 * Note that DM adds the affected connectors for all CRTCs in state, when that
9119 * might not seem necessary. This is because DC stream creation requires the
9120 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9121 * be possible but non-trivial - a possible TODO item.
9122 *
9123 * Return: -Error code if validation failed.
9124 */
7578ecda
AD
9125static int amdgpu_dm_atomic_check(struct drm_device *dev,
9126 struct drm_atomic_state *state)
62f55537 9127{
1348969a 9128 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9129 struct dm_atomic_state *dm_state = NULL;
62f55537 9130 struct dc *dc = adev->dm.dc;
62f55537 9131 struct drm_connector *connector;
c2cea706 9132 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9133 struct drm_crtc *crtc;
fc9e9920 9134 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9135 struct drm_plane *plane;
9136 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9137 enum dc_status status;
1e88ad0a 9138 int ret, i;
62f55537 9139 bool lock_and_validation_needed = false;
886876ec 9140 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9141
e8a98235 9142 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9143
62f55537 9144 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9145 if (ret)
9146 goto fail;
62f55537 9147
c5892a10
SW
9148 /* Check connector changes */
9149 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9150 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9151 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9152
9153 /* Skip connectors that are disabled or part of modeset already. */
9154 if (!old_con_state->crtc && !new_con_state->crtc)
9155 continue;
9156
9157 if (!new_con_state->crtc)
9158 continue;
9159
9160 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9161 if (IS_ERR(new_crtc_state)) {
9162 ret = PTR_ERR(new_crtc_state);
9163 goto fail;
9164 }
9165
9166 if (dm_old_con_state->abm_level !=
9167 dm_new_con_state->abm_level)
9168 new_crtc_state->connectors_changed = true;
9169 }
9170
e10517b3 9171#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9172 if (adev->asic_type >= CHIP_NAVI10) {
9173 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9174 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9175 ret = add_affected_mst_dsc_crtcs(state, crtc);
9176 if (ret)
9177 goto fail;
9178 }
9179 }
9180 }
e10517b3 9181#endif
1e88ad0a 9182 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9183 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9184
1e88ad0a 9185 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9186 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9187 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9188 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9189 continue;
7bef1af3 9190
1e88ad0a
S
9191 if (!new_crtc_state->enable)
9192 continue;
fc9e9920 9193
1e88ad0a
S
9194 ret = drm_atomic_add_affected_connectors(state, crtc);
9195 if (ret)
9196 return ret;
fc9e9920 9197
1e88ad0a
S
9198 ret = drm_atomic_add_affected_planes(state, crtc);
9199 if (ret)
9200 goto fail;
e7b07cee
HW
9201 }
9202
2d9e6431
NK
9203 /*
9204 * Add all primary and overlay planes on the CRTC to the state
9205 * whenever a plane is enabled to maintain correct z-ordering
9206 * and to enable fast surface updates.
9207 */
9208 drm_for_each_crtc(crtc, dev) {
9209 bool modified = false;
9210
9211 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9212 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9213 continue;
9214
9215 if (new_plane_state->crtc == crtc ||
9216 old_plane_state->crtc == crtc) {
9217 modified = true;
9218 break;
9219 }
9220 }
9221
9222 if (!modified)
9223 continue;
9224
9225 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9226 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9227 continue;
9228
9229 new_plane_state =
9230 drm_atomic_get_plane_state(state, plane);
9231
9232 if (IS_ERR(new_plane_state)) {
9233 ret = PTR_ERR(new_plane_state);
9234 goto fail;
9235 }
9236 }
9237 }
9238
62f55537 9239 /* Remove exiting planes if they are modified */
9e869063
LL
9240 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9241 ret = dm_update_plane_state(dc, state, plane,
9242 old_plane_state,
9243 new_plane_state,
9244 false,
9245 &lock_and_validation_needed);
9246 if (ret)
9247 goto fail;
62f55537
AG
9248 }
9249
9250 /* Disable all crtcs which require disable */
4b9674e5
LL
9251 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9252 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9253 old_crtc_state,
9254 new_crtc_state,
9255 false,
9256 &lock_and_validation_needed);
9257 if (ret)
9258 goto fail;
62f55537
AG
9259 }
9260
9261 /* Enable all crtcs which require enable */
4b9674e5
LL
9262 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9263 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9264 old_crtc_state,
9265 new_crtc_state,
9266 true,
9267 &lock_and_validation_needed);
9268 if (ret)
9269 goto fail;
62f55537
AG
9270 }
9271
9272 /* Add new/modified planes */
9e869063
LL
9273 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9274 ret = dm_update_plane_state(dc, state, plane,
9275 old_plane_state,
9276 new_plane_state,
9277 true,
9278 &lock_and_validation_needed);
9279 if (ret)
9280 goto fail;
62f55537
AG
9281 }
9282
b349f76e
ES
9283 /* Run this here since we want to validate the streams we created */
9284 ret = drm_atomic_helper_check_planes(dev, state);
9285 if (ret)
9286 goto fail;
62f55537 9287
43d10d30
NK
9288 if (state->legacy_cursor_update) {
9289 /*
9290 * This is a fast cursor update coming from the plane update
9291 * helper, check if it can be done asynchronously for better
9292 * performance.
9293 */
9294 state->async_update =
9295 !drm_atomic_helper_async_check(dev, state);
9296
9297 /*
9298 * Skip the remaining global validation if this is an async
9299 * update. Cursor updates can be done without affecting
9300 * state or bandwidth calcs and this avoids the performance
9301 * penalty of locking the private state object and
9302 * allocating a new dc_state.
9303 */
9304 if (state->async_update)
9305 return 0;
9306 }
9307
ebdd27e1 9308 /* Check scaling and underscan changes*/
1f6010a9 9309 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9310 * new stream into context w\o causing full reset. Need to
9311 * decide how to handle.
9312 */
c2cea706 9313 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9314 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9315 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9316 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9317
9318 /* Skip any modesets/resets */
0bc9706d
LSL
9319 if (!acrtc || drm_atomic_crtc_needs_modeset(
9320 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9321 continue;
9322
b830ebc9 9323 /* Skip any thing not scale or underscan changes */
54d76575 9324 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9325 continue;
9326
9327 lock_and_validation_needed = true;
9328 }
9329
f6d7c7fa
NK
9330 /**
9331 * Streams and planes are reset when there are changes that affect
9332 * bandwidth. Anything that affects bandwidth needs to go through
9333 * DC global validation to ensure that the configuration can be applied
9334 * to hardware.
9335 *
9336 * We have to currently stall out here in atomic_check for outstanding
9337 * commits to finish in this case because our IRQ handlers reference
9338 * DRM state directly - we can end up disabling interrupts too early
9339 * if we don't.
9340 *
9341 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9342 */
f6d7c7fa 9343 if (lock_and_validation_needed) {
eb3dc897
NK
9344 ret = dm_atomic_get_state(state, &dm_state);
9345 if (ret)
9346 goto fail;
e7b07cee
HW
9347
9348 ret = do_aquire_global_lock(dev, state);
9349 if (ret)
9350 goto fail;
1dc90497 9351
d9fe1a4c 9352#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9353 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9354 goto fail;
9355
29b9ba74
ML
9356 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9357 if (ret)
9358 goto fail;
d9fe1a4c 9359#endif
29b9ba74 9360
ded58c7b
ZL
9361 /*
9362 * Perform validation of MST topology in the state:
9363 * We need to perform MST atomic check before calling
9364 * dc_validate_global_state(), or there is a chance
9365 * to get stuck in an infinite loop and hang eventually.
9366 */
9367 ret = drm_dp_mst_atomic_check(state);
9368 if (ret)
9369 goto fail;
74a16675
RS
9370 status = dc_validate_global_state(dc, dm_state->context, false);
9371 if (status != DC_OK) {
9372 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9373 dc_status_to_str(status), status);
e7b07cee
HW
9374 ret = -EINVAL;
9375 goto fail;
9376 }
bd200d19 9377 } else {
674e78ac 9378 /*
bd200d19
NK
9379 * The commit is a fast update. Fast updates shouldn't change
9380 * the DC context, affect global validation, and can have their
9381 * commit work done in parallel with other commits not touching
9382 * the same resource. If we have a new DC context as part of
9383 * the DM atomic state from validation we need to free it and
9384 * retain the existing one instead.
fde9f39a
MR
9385 *
9386 * Furthermore, since the DM atomic state only contains the DC
9387 * context and can safely be annulled, we can free the state
9388 * and clear the associated private object now to free
9389 * some memory and avoid a possible use-after-free later.
674e78ac 9390 */
bd200d19 9391
fde9f39a
MR
9392 for (i = 0; i < state->num_private_objs; i++) {
9393 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9394
fde9f39a
MR
9395 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9396 int j = state->num_private_objs-1;
bd200d19 9397
fde9f39a
MR
9398 dm_atomic_destroy_state(obj,
9399 state->private_objs[i].state);
9400
9401 /* If i is not at the end of the array then the
9402 * last element needs to be moved to where i was
9403 * before the array can safely be truncated.
9404 */
9405 if (i != j)
9406 state->private_objs[i] =
9407 state->private_objs[j];
bd200d19 9408
fde9f39a
MR
9409 state->private_objs[j].ptr = NULL;
9410 state->private_objs[j].state = NULL;
9411 state->private_objs[j].old_state = NULL;
9412 state->private_objs[j].new_state = NULL;
9413
9414 state->num_private_objs = j;
9415 break;
9416 }
bd200d19 9417 }
e7b07cee
HW
9418 }
9419
caff0e66
NK
9420 /* Store the overall update type for use later in atomic check. */
9421 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9422 struct dm_crtc_state *dm_new_crtc_state =
9423 to_dm_crtc_state(new_crtc_state);
9424
f6d7c7fa
NK
9425 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9426 UPDATE_TYPE_FULL :
9427 UPDATE_TYPE_FAST;
e7b07cee
HW
9428 }
9429
9430 /* Must be success */
9431 WARN_ON(ret);
e8a98235
RS
9432
9433 trace_amdgpu_dm_atomic_check_finish(state, ret);
9434
e7b07cee
HW
9435 return ret;
9436
9437fail:
9438 if (ret == -EDEADLK)
01e28f9c 9439 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9440 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9441 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9442 else
01e28f9c 9443 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9444
e8a98235
RS
9445 trace_amdgpu_dm_atomic_check_finish(state, ret);
9446
e7b07cee
HW
9447 return ret;
9448}
9449
3ee6b26b
AD
9450static bool is_dp_capable_without_timing_msa(struct dc *dc,
9451 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9452{
9453 uint8_t dpcd_data;
9454 bool capable = false;
9455
c84dec2f 9456 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9457 dm_helpers_dp_read_dpcd(
9458 NULL,
c84dec2f 9459 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9460 DP_DOWN_STREAM_PORT_COUNT,
9461 &dpcd_data,
9462 sizeof(dpcd_data))) {
9463 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9464 }
9465
9466 return capable;
9467}
98e6436d
AK
9468void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9469 struct edid *edid)
e7b07cee
HW
9470{
9471 int i;
e7b07cee
HW
9472 bool edid_check_required;
9473 struct detailed_timing *timing;
9474 struct detailed_non_pixel *data;
9475 struct detailed_data_monitor_range *range;
c84dec2f
HW
9476 struct amdgpu_dm_connector *amdgpu_dm_connector =
9477 to_amdgpu_dm_connector(connector);
bb47de73 9478 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9479
9480 struct drm_device *dev = connector->dev;
1348969a 9481 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9482 bool freesync_capable = false;
b830ebc9 9483
8218d7f1
HW
9484 if (!connector->state) {
9485 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9486 goto update;
8218d7f1
HW
9487 }
9488
98e6436d
AK
9489 if (!edid) {
9490 dm_con_state = to_dm_connector_state(connector->state);
9491
9492 amdgpu_dm_connector->min_vfreq = 0;
9493 amdgpu_dm_connector->max_vfreq = 0;
9494 amdgpu_dm_connector->pixel_clock_mhz = 0;
9495
bb47de73 9496 goto update;
98e6436d
AK
9497 }
9498
8218d7f1
HW
9499 dm_con_state = to_dm_connector_state(connector->state);
9500
e7b07cee 9501 edid_check_required = false;
c84dec2f 9502 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9503 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9504 goto update;
e7b07cee
HW
9505 }
9506 if (!adev->dm.freesync_module)
bb47de73 9507 goto update;
e7b07cee
HW
9508 /*
9509 * if edid non zero restrict freesync only for dp and edp
9510 */
9511 if (edid) {
c84dec2f
HW
9512 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9513 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9514 edid_check_required = is_dp_capable_without_timing_msa(
9515 adev->dm.dc,
c84dec2f 9516 amdgpu_dm_connector);
e7b07cee
HW
9517 }
9518 }
e7b07cee
HW
9519 if (edid_check_required == true && (edid->version > 1 ||
9520 (edid->version == 1 && edid->revision > 1))) {
9521 for (i = 0; i < 4; i++) {
9522
9523 timing = &edid->detailed_timings[i];
9524 data = &timing->data.other_data;
9525 range = &data->data.range;
9526 /*
9527 * Check if monitor has continuous frequency mode
9528 */
9529 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9530 continue;
9531 /*
9532 * Check for flag range limits only. If flag == 1 then
9533 * no additional timing information provided.
9534 * Default GTF, GTF Secondary curve and CVT are not
9535 * supported
9536 */
9537 if (range->flags != 1)
9538 continue;
9539
c84dec2f
HW
9540 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9541 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9542 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9543 range->pixel_clock_mhz * 10;
9544 break;
9545 }
9546
c84dec2f 9547 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9548 amdgpu_dm_connector->min_vfreq > 10) {
9549
bb47de73 9550 freesync_capable = true;
e7b07cee
HW
9551 }
9552 }
bb47de73
NK
9553
9554update:
9555 if (dm_con_state)
9556 dm_con_state->freesync_capable = freesync_capable;
9557
9558 if (connector->vrr_capable_property)
9559 drm_connector_set_vrr_capable_property(connector,
9560 freesync_capable);
e7b07cee
HW
9561}
9562
8c322309
RL
9563static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9564{
9565 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9566
9567 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9568 return;
9569 if (link->type == dc_connection_none)
9570 return;
9571 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9572 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9573 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9574
9575 if (dpcd_data[0] == 0) {
1cfbbdde 9576 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9577 link->psr_settings.psr_feature_enabled = false;
9578 } else {
1cfbbdde 9579 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9580 link->psr_settings.psr_feature_enabled = true;
9581 }
9582
9583 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9584 }
9585}
9586
9587/*
9588 * amdgpu_dm_link_setup_psr() - configure psr link
9589 * @stream: stream state
9590 *
9591 * Return: true if success
9592 */
9593static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9594{
9595 struct dc_link *link = NULL;
9596 struct psr_config psr_config = {0};
9597 struct psr_context psr_context = {0};
8c322309
RL
9598 bool ret = false;
9599
9600 if (stream == NULL)
9601 return false;
9602
9603 link = stream->link;
8c322309 9604
d1ebfdd8 9605 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9606
9607 if (psr_config.psr_version > 0) {
9608 psr_config.psr_exit_link_training_required = 0x1;
9609 psr_config.psr_frame_capture_indication_req = 0;
9610 psr_config.psr_rfb_setup_time = 0x37;
9611 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9612 psr_config.allow_smu_optimizations = 0x0;
9613
9614 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9615
9616 }
d1ebfdd8 9617 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9618
9619 return ret;
9620}
9621
9622/*
9623 * amdgpu_dm_psr_enable() - enable psr f/w
9624 * @stream: stream state
9625 *
9626 * Return: true if success
9627 */
9628bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9629{
9630 struct dc_link *link = stream->link;
5b5abe95
AK
9631 unsigned int vsync_rate_hz = 0;
9632 struct dc_static_screen_params params = {0};
9633 /* Calculate number of static frames before generating interrupt to
9634 * enter PSR.
9635 */
5b5abe95
AK
9636 // Init fail safe of 2 frames static
9637 unsigned int num_frames_static = 2;
8c322309
RL
9638
9639 DRM_DEBUG_DRIVER("Enabling psr...\n");
9640
5b5abe95
AK
9641 vsync_rate_hz = div64_u64(div64_u64((
9642 stream->timing.pix_clk_100hz * 100),
9643 stream->timing.v_total),
9644 stream->timing.h_total);
9645
9646 /* Round up
9647 * Calculate number of frames such that at least 30 ms of time has
9648 * passed.
9649 */
7aa62404
RL
9650 if (vsync_rate_hz != 0) {
9651 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9652 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9653 }
5b5abe95
AK
9654
9655 params.triggers.cursor_update = true;
9656 params.triggers.overlay_update = true;
9657 params.triggers.surface_update = true;
9658 params.num_frames = num_frames_static;
8c322309 9659
5b5abe95 9660 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9661 &stream, 1,
5b5abe95 9662 &params);
8c322309 9663
1d496907 9664 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
9665}
9666
9667/*
9668 * amdgpu_dm_psr_disable() - disable psr f/w
9669 * @stream: stream state
9670 *
9671 * Return: true if success
9672 */
9673static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9674{
9675
9676 DRM_DEBUG_DRIVER("Disabling psr...\n");
9677
1d496907 9678 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 9679}
3d4e52d0 9680
6ee90e88 9681/*
9682 * amdgpu_dm_psr_disable() - disable psr f/w
9683 * if psr is enabled on any stream
9684 *
9685 * Return: true if success
9686 */
9687static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9688{
9689 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9690 return dc_set_psr_allow_active(dm->dc, false);
9691}
9692
3d4e52d0
VL
9693void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9694{
1348969a 9695 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9696 struct dc *dc = adev->dm.dc;
9697 int i;
9698
9699 mutex_lock(&adev->dm.dc_lock);
9700 if (dc->current_state) {
9701 for (i = 0; i < dc->current_state->stream_count; ++i)
9702 dc->current_state->streams[i]
9703 ->triggered_crtc_reset.enabled =
9704 adev->dm.force_timing_sync;
9705
9706 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9707 dc_trigger_sync(dc, dc->current_state);
9708 }
9709 mutex_unlock(&adev->dm.dc_lock);
9710}
9d83722d
RS
9711
9712void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9713 uint32_t value, const char *func_name)
9714{
9715#ifdef DM_CHECK_ADDR_0
9716 if (address == 0) {
9717 DC_ERR("invalid register write. address = 0");
9718 return;
9719 }
9720#endif
9721 cgs_write_register(ctx->cgs_device, address, value);
9722 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9723}
9724
9725uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9726 const char *func_name)
9727{
9728 uint32_t value;
9729#ifdef DM_CHECK_ADDR_0
9730 if (address == 0) {
9731 DC_ERR("invalid register read; address = 0\n");
9732 return 0;
9733 }
9734#endif
9735
9736 if (ctx->dmub_srv &&
9737 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9738 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9739 ASSERT(false);
9740 return 0;
9741 }
9742
9743 value = cgs_read_register(ctx->cgs_device, address);
9744
9745 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9746
9747 return value;
9748}