drm/amd/display: check cursor scaling
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
9d83722d 37#include "amdgpu_dm_trace.h"
4562236b
HW
38
39#include "vid.h"
40#include "amdgpu.h"
a49dcb88 41#include "amdgpu_display.h"
a94d5569 42#include "amdgpu_ucode.h"
4562236b
HW
43#include "atom.h"
44#include "amdgpu_dm.h"
52704fca
BL
45#ifdef CONFIG_DRM_AMD_DC_HDCP
46#include "amdgpu_dm_hdcp.h"
53e108aa 47#include <drm/drm_hdcp.h>
52704fca 48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
63#include <linux/version.h>
e7b07cee 64#include <linux/types.h>
97028037 65#include <linux/pm_runtime.h>
09d21852 66#include <linux/pci.h>
a94d5569 67#include <linux/firmware.h>
6ce8f316 68#include <linux/component.h>
4562236b
HW
69
70#include <drm/drm_atomic.h>
674e78ac 71#include <drm/drm_atomic_uapi.h>
4562236b
HW
72#include <drm/drm_atomic_helper.h>
73#include <drm/drm_dp_mst_helper.h>
e7b07cee 74#include <drm/drm_fb_helper.h>
09d21852 75#include <drm/drm_fourcc.h>
e7b07cee 76#include <drm/drm_edid.h>
09d21852 77#include <drm/drm_vblank.h>
6ce8f316 78#include <drm/drm_audio_component.h>
0c8620d6 79#include <drm/drm_hdcp.h>
4562236b 80
b86a1aa3 81#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 82#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 83
ad941f7a
FX
84#include "dcn/dcn_1_0_offset.h"
85#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
86#include "soc15_hw_ip.h"
87#include "vega10_ip_offset.h"
ff5ef992
AD
88
89#include "soc15_common.h"
90#endif
91
e7b07cee 92#include "modules/inc/mod_freesync.h"
bbf854dc 93#include "modules/power/power_helpers.h"
ecd0136b 94#include "modules/inc/mod_info_packet.h"
e7b07cee 95
743b9786
NK
96#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
102#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
104#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
106#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 108
a94d5569
DF
109#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 111
5ea23931
RL
112#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114
8c7aea40
NK
115/* Number of bytes in PSP header for firmware. */
116#define PSP_HEADER_BYTES 0x100
117
118/* Number of bytes in PSP footer for firmware. */
119#define PSP_FOOTER_BYTES 0x100
120
b8592b48
LL
121/**
122 * DOC: overview
123 *
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
127 *
128 * The root control structure is &struct amdgpu_display_manager.
129 */
130
7578ecda
AD
131/* basic init/fini API */
132static int amdgpu_dm_init(struct amdgpu_device *adev);
133static void amdgpu_dm_fini(struct amdgpu_device *adev);
134
0f877894
OV
135static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136{
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 default:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
151 }
152}
153
154static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155{
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 return;
162
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
165
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
168 subconnector);
169}
170
1f6010a9
DF
171/*
172 * initializes drm_device display related structures, based on the information
7578ecda
AD
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
175 *
176 * Returns 0 on success
177 */
178static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179/* removes and deallocates the drm structures, created by the above function */
180static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
7578ecda 182static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 183 struct drm_plane *plane,
cc1fec57
NK
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
7578ecda
AD
186static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 uint32_t link_index,
192 struct amdgpu_encoder *amdgpu_encoder);
193static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
196
197static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200 struct drm_atomic_state *state,
201 bool nonblock);
202
203static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204
205static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 struct drm_atomic_state *state);
207
674e78ac
NK
208static void handle_cursor_update(struct drm_plane *plane,
209 struct drm_plane_state *old_plane_state);
7578ecda 210
8c322309
RL
211static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 215static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
4562236b
HW
220/*
221 * dm_vblank_get_counter
222 *
223 * @brief
224 * Get counter for number of vertical blanks
225 *
226 * @param
227 * struct amdgpu_device *adev - [in] desired amdgpu device
228 * int disp_idx - [in] which CRTC to get the counter from
229 *
230 * @return
231 * Counter for vertical blanks
232 */
233static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234{
235 if (crtc >= adev->mode_info.num_crtc)
236 return 0;
237 else {
238 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239
585d450c 240 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
241 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 crtc);
4562236b
HW
243 return 0;
244 }
245
585d450c 246 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
247 }
248}
249
250static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 251 u32 *vbl, u32 *position)
4562236b 252{
81c50963
ST
253 uint32_t v_blank_start, v_blank_end, h_position, v_position;
254
4562236b
HW
255 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 return -EINVAL;
257 else {
258 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259
585d450c 260 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
261 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 crtc);
4562236b
HW
263 return 0;
264 }
265
81c50963
ST
266 /*
267 * TODO rework base driver to use values directly.
268 * for now parse it back into reg-format
269 */
585d450c 270 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
271 &v_blank_start,
272 &v_blank_end,
273 &h_position,
274 &v_position);
275
e806208d
AG
276 *position = v_position | (h_position << 16);
277 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
278 }
279
280 return 0;
281}
282
283static bool dm_is_idle(void *handle)
284{
285 /* XXX todo */
286 return true;
287}
288
289static int dm_wait_for_idle(void *handle)
290{
291 /* XXX todo */
292 return 0;
293}
294
295static bool dm_check_soft_reset(void *handle)
296{
297 return false;
298}
299
300static int dm_soft_reset(void *handle)
301{
302 /* XXX todo */
303 return 0;
304}
305
3ee6b26b
AD
306static struct amdgpu_crtc *
307get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 int otg_inst)
4562236b 309{
4a580877 310 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
311 struct drm_crtc *crtc;
312 struct amdgpu_crtc *amdgpu_crtc;
313
4562236b
HW
314 if (otg_inst == -1) {
315 WARN_ON(1);
316 return adev->mode_info.crtcs[0];
317 }
318
319 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320 amdgpu_crtc = to_amdgpu_crtc(crtc);
321
322 if (amdgpu_crtc->otg_inst == otg_inst)
323 return amdgpu_crtc;
324 }
325
326 return NULL;
327}
328
585d450c
AP
329static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330{
331 return acrtc->dm_irq_params.freesync_config.state ==
332 VRR_STATE_ACTIVE_VARIABLE ||
333 acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_FIXED;
335}
336
66b0c973
MK
337static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338{
339 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341}
342
b8e8c934
HW
343/**
344 * dm_pflip_high_irq() - Handle pageflip interrupt
345 * @interrupt_params: ignored
346 *
347 * Handles the pageflip interrupt by notifying all interested parties
348 * that the pageflip has been completed.
349 */
4562236b
HW
350static void dm_pflip_high_irq(void *interrupt_params)
351{
4562236b
HW
352 struct amdgpu_crtc *amdgpu_crtc;
353 struct common_irq_params *irq_params = interrupt_params;
354 struct amdgpu_device *adev = irq_params->adev;
355 unsigned long flags;
71bbe51a 356 struct drm_pending_vblank_event *e;
71bbe51a
MK
357 uint32_t vpos, hpos, v_blank_start, v_blank_end;
358 bool vrr_active;
4562236b
HW
359
360 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
361
362 /* IRQ could occur when in initial stage */
1f6010a9 363 /* TODO work and BO cleanup */
4562236b
HW
364 if (amdgpu_crtc == NULL) {
365 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
366 return;
367 }
368
4a580877 369 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
370
371 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373 amdgpu_crtc->pflip_status,
374 AMDGPU_FLIP_SUBMITTED,
375 amdgpu_crtc->crtc_id,
376 amdgpu_crtc);
4a580877 377 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
378 return;
379 }
380
71bbe51a
MK
381 /* page flip completed. */
382 e = amdgpu_crtc->event;
383 amdgpu_crtc->event = NULL;
4562236b 384
71bbe51a
MK
385 if (!e)
386 WARN_ON(1);
1159898a 387
585d450c 388 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
389
390 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
391 if (!vrr_active ||
585d450c 392 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
393 &v_blank_end, &hpos, &vpos) ||
394 (vpos < v_blank_start)) {
395 /* Update to correct count and vblank timestamp if racing with
396 * vblank irq. This also updates to the correct vblank timestamp
397 * even in VRR mode, as scanout is past the front-porch atm.
398 */
399 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 400
71bbe51a
MK
401 /* Wake up userspace by sending the pageflip event with proper
402 * count and timestamp of vblank of flip completion.
403 */
404 if (e) {
405 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
406
407 /* Event sent, so done with vblank for this flip */
408 drm_crtc_vblank_put(&amdgpu_crtc->base);
409 }
410 } else if (e) {
411 /* VRR active and inside front-porch: vblank count and
412 * timestamp for pageflip event will only be up to date after
413 * drm_crtc_handle_vblank() has been executed from late vblank
414 * irq handler after start of back-porch (vline 0). We queue the
415 * pageflip event for send-out by drm_crtc_handle_vblank() with
416 * updated timestamp and count, once it runs after us.
417 *
418 * We need to open-code this instead of using the helper
419 * drm_crtc_arm_vblank_event(), as that helper would
420 * call drm_crtc_accurate_vblank_count(), which we must
421 * not call in VRR mode while we are in front-porch!
422 */
423
424 /* sequence will be replaced by real count during send-out. */
425 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426 e->pipe = amdgpu_crtc->crtc_id;
427
4a580877 428 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
429 e = NULL;
430 }
4562236b 431
fdd1fe57
MK
432 /* Keep track of vblank of this flip for flip throttling. We use the
433 * cooked hw counter, as that one incremented at start of this vblank
434 * of pageflip completion, so last_flip_vblank is the forbidden count
435 * for queueing new pageflips if vsync + VRR is enabled.
436 */
5d1c59c4 437 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 438 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 439
54f5499a 440 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 441 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 442
71bbe51a
MK
443 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444 amdgpu_crtc->crtc_id, amdgpu_crtc,
445 vrr_active, (int) !e);
4562236b
HW
446}
447
d2574c33
MK
448static void dm_vupdate_high_irq(void *interrupt_params)
449{
450 struct common_irq_params *irq_params = interrupt_params;
451 struct amdgpu_device *adev = irq_params->adev;
452 struct amdgpu_crtc *acrtc;
09aef2c4 453 unsigned long flags;
585d450c 454 int vrr_active;
d2574c33
MK
455
456 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
457
458 if (acrtc) {
585d450c 459 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 460
7f2be468
LP
461 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
462 acrtc->crtc_id,
585d450c 463 vrr_active);
d2574c33
MK
464
465 /* Core vblank handling is done here after end of front-porch in
466 * vrr mode, as vblank timestamping will give valid results
467 * while now done after front-porch. This will also deliver
468 * page-flip completion events that have been queued to us
469 * if a pageflip happened inside front-porch.
470 */
585d450c 471 if (vrr_active) {
d2574c33 472 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
473
474 /* BTR processing for pre-DCE12 ASICs */
585d450c 475 if (acrtc->dm_irq_params.stream &&
09aef2c4 476 adev->family < AMDGPU_FAMILY_AI) {
4a580877 477 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
478 mod_freesync_handle_v_update(
479 adev->dm.freesync_module,
585d450c
AP
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
482
483 dc_stream_adjust_vmin_vmax(
484 adev->dm.dc,
585d450c
AP
485 acrtc->dm_irq_params.stream,
486 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 487 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
488 }
489 }
d2574c33
MK
490 }
491}
492
b8e8c934
HW
493/**
494 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 495 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
496 *
497 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
498 * event handler.
499 */
4562236b
HW
500static void dm_crtc_high_irq(void *interrupt_params)
501{
502 struct common_irq_params *irq_params = interrupt_params;
503 struct amdgpu_device *adev = irq_params->adev;
4562236b 504 struct amdgpu_crtc *acrtc;
09aef2c4 505 unsigned long flags;
585d450c 506 int vrr_active;
4562236b 507
b57de80a 508 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
509 if (!acrtc)
510 return;
511
585d450c 512 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 513
2b5aed9a 514 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 515 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 516
2346ef47
NK
517 /**
518 * Core vblank handling at start of front-porch is only possible
519 * in non-vrr mode, as only there vblank timestamping will give
520 * valid results while done in front-porch. Otherwise defer it
521 * to dm_vupdate_high_irq after end of front-porch.
522 */
585d450c 523 if (!vrr_active)
2346ef47
NK
524 drm_crtc_handle_vblank(&acrtc->base);
525
526 /**
527 * Following stuff must happen at start of vblank, for crc
528 * computation and below-the-range btr support in vrr mode.
529 */
16f17eda 530 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
531
532 /* BTR updates need to happen before VUPDATE on Vega and above. */
533 if (adev->family < AMDGPU_FAMILY_AI)
534 return;
16f17eda 535
4a580877 536 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 537
585d450c
AP
538 if (acrtc->dm_irq_params.stream &&
539 acrtc->dm_irq_params.vrr_params.supported &&
540 acrtc->dm_irq_params.freesync_config.state ==
541 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 542 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
543 acrtc->dm_irq_params.stream,
544 &acrtc->dm_irq_params.vrr_params);
16f17eda 545
585d450c
AP
546 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
548 }
549
2b5aed9a
MK
550 /*
551 * If there aren't any active_planes then DCH HUBP may be clock-gated.
552 * In that case, pageflip completion interrupts won't fire and pageflip
553 * completion events won't get delivered. Prevent this by sending
554 * pending pageflip events from here if a flip is still pending.
555 *
556 * If any planes are enabled, use dm_pflip_high_irq() instead, to
557 * avoid race conditions between flip programming and completion,
558 * which could cause too early flip completion events.
559 */
2346ef47
NK
560 if (adev->family >= AMDGPU_FAMILY_RV &&
561 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 562 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
563 if (acrtc->event) {
564 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
565 acrtc->event = NULL;
566 drm_crtc_vblank_put(&acrtc->base);
567 }
568 acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 }
570
4a580877 571 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
572}
573
4562236b
HW
574static int dm_set_clockgating_state(void *handle,
575 enum amd_clockgating_state state)
576{
577 return 0;
578}
579
580static int dm_set_powergating_state(void *handle,
581 enum amd_powergating_state state)
582{
583 return 0;
584}
585
586/* Prototypes of private functions */
587static int dm_early_init(void* handle);
588
a32e24b4 589/* Allocate memory for FBC compressed data */
3e332d3a 590static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 591{
3e332d3a 592 struct drm_device *dev = connector->dev;
1348969a 593 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 594 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
595 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596 struct drm_display_mode *mode;
42e67c3b
RL
597 unsigned long max_size = 0;
598
599 if (adev->dm.dc->fbc_compressor == NULL)
600 return;
a32e24b4 601
3e332d3a 602 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
603 return;
604
3e332d3a
RL
605 if (compressor->bo_ptr)
606 return;
42e67c3b 607
42e67c3b 608
3e332d3a
RL
609 list_for_each_entry(mode, &connector->modes, head) {
610 if (max_size < mode->htotal * mode->vtotal)
611 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
612 }
613
614 if (max_size) {
615 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 616 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 617 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
618
619 if (r)
42e67c3b
RL
620 DRM_ERROR("DM: Failed to initialize FBC\n");
621 else {
622 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624 }
625
a32e24b4
RL
626 }
627
628}
a32e24b4 629
6ce8f316
NK
630static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631 int pipe, bool *enabled,
632 unsigned char *buf, int max_bytes)
633{
634 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 635 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
636 struct drm_connector *connector;
637 struct drm_connector_list_iter conn_iter;
638 struct amdgpu_dm_connector *aconnector;
639 int ret = 0;
640
641 *enabled = false;
642
643 mutex_lock(&adev->dm.audio_lock);
644
645 drm_connector_list_iter_begin(dev, &conn_iter);
646 drm_for_each_connector_iter(connector, &conn_iter) {
647 aconnector = to_amdgpu_dm_connector(connector);
648 if (aconnector->audio_inst != port)
649 continue;
650
651 *enabled = true;
652 ret = drm_eld_size(connector->eld);
653 memcpy(buf, connector->eld, min(max_bytes, ret));
654
655 break;
656 }
657 drm_connector_list_iter_end(&conn_iter);
658
659 mutex_unlock(&adev->dm.audio_lock);
660
661 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662
663 return ret;
664}
665
666static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667 .get_eld = amdgpu_dm_audio_component_get_eld,
668};
669
670static int amdgpu_dm_audio_component_bind(struct device *kdev,
671 struct device *hda_kdev, void *data)
672{
673 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 674 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
675 struct drm_audio_component *acomp = data;
676
677 acomp->ops = &amdgpu_dm_audio_component_ops;
678 acomp->dev = kdev;
679 adev->dm.audio_component = acomp;
680
681 return 0;
682}
683
684static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685 struct device *hda_kdev, void *data)
686{
687 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 688 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
689 struct drm_audio_component *acomp = data;
690
691 acomp->ops = NULL;
692 acomp->dev = NULL;
693 adev->dm.audio_component = NULL;
694}
695
696static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697 .bind = amdgpu_dm_audio_component_bind,
698 .unbind = amdgpu_dm_audio_component_unbind,
699};
700
701static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702{
703 int i, ret;
704
705 if (!amdgpu_audio)
706 return 0;
707
708 adev->mode_info.audio.enabled = true;
709
710 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711
712 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713 adev->mode_info.audio.pin[i].channels = -1;
714 adev->mode_info.audio.pin[i].rate = -1;
715 adev->mode_info.audio.pin[i].bits_per_sample = -1;
716 adev->mode_info.audio.pin[i].status_bits = 0;
717 adev->mode_info.audio.pin[i].category_code = 0;
718 adev->mode_info.audio.pin[i].connected = false;
719 adev->mode_info.audio.pin[i].id =
720 adev->dm.dc->res_pool->audios[i]->inst;
721 adev->mode_info.audio.pin[i].offset = 0;
722 }
723
724 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725 if (ret < 0)
726 return ret;
727
728 adev->dm.audio_registered = true;
729
730 return 0;
731}
732
733static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734{
735 if (!amdgpu_audio)
736 return;
737
738 if (!adev->mode_info.audio.enabled)
739 return;
740
741 if (adev->dm.audio_registered) {
742 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743 adev->dm.audio_registered = false;
744 }
745
746 /* TODO: Disable audio? */
747
748 adev->mode_info.audio.enabled = false;
749}
750
dfd84d90 751static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
752{
753 struct drm_audio_component *acomp = adev->dm.audio_component;
754
755 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757
758 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 pin, -1);
760 }
761}
762
743b9786
NK
763static int dm_dmub_hw_init(struct amdgpu_device *adev)
764{
743b9786
NK
765 const struct dmcub_firmware_header_v1_0 *hdr;
766 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 767 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
768 const struct firmware *dmub_fw = adev->dm.dmub_fw;
769 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
771 struct dmub_srv_hw_params hw_params;
772 enum dmub_status status;
773 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 774 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
775 bool has_hw_support;
776
777 if (!dmub_srv)
778 /* DMUB isn't supported on the ASIC. */
779 return 0;
780
8c7aea40
NK
781 if (!fb_info) {
782 DRM_ERROR("No framebuffer info for DMUB service.\n");
783 return -EINVAL;
784 }
785
743b9786
NK
786 if (!dmub_fw) {
787 /* Firmware required for DMUB support. */
788 DRM_ERROR("No firmware provided for DMUB.\n");
789 return -EINVAL;
790 }
791
792 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793 if (status != DMUB_STATUS_OK) {
794 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795 return -EINVAL;
796 }
797
798 if (!has_hw_support) {
799 DRM_INFO("DMUB unsupported on ASIC\n");
800 return 0;
801 }
802
803 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804
743b9786
NK
805 fw_inst_const = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 807 PSP_HEADER_BYTES;
743b9786
NK
808
809 fw_bss_data = dmub_fw->data +
810 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811 le32_to_cpu(hdr->inst_const_bytes);
812
813 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
814 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816
817 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818
ddde28a5
HW
819 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820 * amdgpu_ucode_init_single_fw will load dmub firmware
821 * fw_inst_const part to cw0; otherwise, the firmware back door load
822 * will be done by dm_dmub_hw_init
823 */
824 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
826 fw_inst_const_size);
827 }
828
a576b345
NK
829 if (fw_bss_data_size)
830 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
832
833 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
834 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 adev->bios_size);
836
837 /* Reset regions that need to be reset. */
838 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840
841 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843
844 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
846
847 /* Initialize hardware. */
848 memset(&hw_params, 0, sizeof(hw_params));
849 hw_params.fb_base = adev->gmc.fb_start;
850 hw_params.fb_offset = adev->gmc.aper_base;
851
31a7f4bb
HW
852 /* backdoor load firmware and trigger dmub running */
853 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854 hw_params.load_inst_const = true;
855
743b9786
NK
856 if (dmcu)
857 hw_params.psp_version = dmcu->psp_version;
858
8c7aea40
NK
859 for (i = 0; i < fb_info->num_fb; ++i)
860 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
861
862 status = dmub_srv_hw_init(dmub_srv, &hw_params);
863 if (status != DMUB_STATUS_OK) {
864 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
865 return -EINVAL;
866 }
867
868 /* Wait for firmware load to finish. */
869 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870 if (status != DMUB_STATUS_OK)
871 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872
873 /* Init DMCU and ABM if available. */
874 if (dmcu && abm) {
875 dmcu->funcs->dmcu_init(dmcu);
876 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 }
878
9a71c7d3
NK
879 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880 if (!adev->dm.dc->ctx->dmub_srv) {
881 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
882 return -ENOMEM;
883 }
884
743b9786
NK
885 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886 adev->dm.dmcub_fw_version);
887
888 return 0;
889}
890
e6cd859d 891#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 892static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 893{
c0fb85ae
YZ
894 uint64_t pt_base;
895 uint32_t logical_addr_low;
896 uint32_t logical_addr_high;
897 uint32_t agp_base, agp_bot, agp_top;
898 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 899
c0fb85ae
YZ
900 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 902
c0fb85ae
YZ
903 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
904 /*
905 * Raven2 has a HW issue that it is unable to use the vram which
906 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907 * workaround that increase system aperture high address (add 1)
908 * to get rid of the VM fault and hardware hang.
909 */
910 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
911 else
912 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 913
c0fb85ae
YZ
914 agp_base = 0;
915 agp_bot = adev->gmc.agp_start >> 24;
916 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 917
c44a22b3 918
c0fb85ae
YZ
919 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 925
c0fb85ae
YZ
926 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
928
929 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
932
933 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
936
937 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
940
941 pa_config->is_hvm_enabled = 0;
c44a22b3 942
c44a22b3 943}
e6cd859d 944#endif
c44a22b3 945
c920888c
WL
946#ifdef CONFIG_DEBUG_FS
947static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
948{
949 dm->crc_win_x_start_property =
950 drm_property_create_range(adev_to_drm(dm->adev),
951 DRM_MODE_PROP_ATOMIC,
952 "AMD_CRC_WIN_X_START", 0, U16_MAX);
953 if (!dm->crc_win_x_start_property)
954 return -ENOMEM;
955
956 dm->crc_win_y_start_property =
957 drm_property_create_range(adev_to_drm(dm->adev),
958 DRM_MODE_PROP_ATOMIC,
959 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
960 if (!dm->crc_win_y_start_property)
961 return -ENOMEM;
962
963 dm->crc_win_x_end_property =
964 drm_property_create_range(adev_to_drm(dm->adev),
965 DRM_MODE_PROP_ATOMIC,
966 "AMD_CRC_WIN_X_END", 0, U16_MAX);
967 if (!dm->crc_win_x_end_property)
968 return -ENOMEM;
969
970 dm->crc_win_y_end_property =
971 drm_property_create_range(adev_to_drm(dm->adev),
972 DRM_MODE_PROP_ATOMIC,
973 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
974 if (!dm->crc_win_y_end_property)
975 return -ENOMEM;
976
977 return 0;
978}
979#endif
980
7578ecda 981static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
982{
983 struct dc_init_data init_data;
52704fca
BL
984#ifdef CONFIG_DRM_AMD_DC_HDCP
985 struct dc_callback_init init_params;
986#endif
743b9786 987 int r;
52704fca 988
4a580877 989 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
990 adev->dm.adev = adev;
991
4562236b
HW
992 /* Zero all the fields */
993 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
994#ifdef CONFIG_DRM_AMD_DC_HDCP
995 memset(&init_params, 0, sizeof(init_params));
996#endif
4562236b 997
674e78ac 998 mutex_init(&adev->dm.dc_lock);
6ce8f316 999 mutex_init(&adev->dm.audio_lock);
674e78ac 1000
4562236b
HW
1001 if(amdgpu_dm_irq_init(adev)) {
1002 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1003 goto error;
1004 }
1005
1006 init_data.asic_id.chip_family = adev->family;
1007
2dc31ca1 1008 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1009 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1010
770d13b1 1011 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1012 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1013 init_data.asic_id.atombios_base_address =
1014 adev->mode_info.atom_context->bios;
1015
1016 init_data.driver = adev;
1017
1018 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1019
1020 if (!adev->dm.cgs_device) {
1021 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1022 goto error;
1023 }
1024
1025 init_data.cgs_device = adev->dm.cgs_device;
1026
4562236b
HW
1027 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1028
60fb100b
AD
1029 switch (adev->asic_type) {
1030 case CHIP_CARRIZO:
1031 case CHIP_STONEY:
1032 case CHIP_RAVEN:
fe3db437 1033 case CHIP_RENOIR:
6e227308 1034 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1035 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1036 init_data.flags.disable_dmcu = true;
60fb100b
AD
1037 break;
1038 default:
1039 break;
1040 }
6e227308 1041
04b94af4
AD
1042 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043 init_data.flags.fbc_support = true;
1044
d99f38ae
AD
1045 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046 init_data.flags.multi_mon_pp_mclk_switch = true;
1047
eaf56410
LL
1048 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049 init_data.flags.disable_fractional_pwm = true;
1050
27eaa492 1051 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1052
48321c3d 1053 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 1054
4562236b
HW
1055 /* Display Core create. */
1056 adev->dm.dc = dc_create(&init_data);
1057
423788c7 1058 if (adev->dm.dc) {
76121231 1059 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1060 } else {
76121231 1061 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1062 goto error;
1063 }
4562236b 1064
8a791dab
HW
1065 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068 }
1069
f99d8762
HW
1070 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072
8a791dab
HW
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074 adev->dm.dc->debug.disable_stutter = true;
1075
1076 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077 adev->dm.dc->debug.disable_dsc = true;
1078
1079 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080 adev->dm.dc->debug.disable_clock_gate = true;
1081
743b9786
NK
1082 r = dm_dmub_hw_init(adev);
1083 if (r) {
1084 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085 goto error;
1086 }
1087
bb6785c1
NK
1088 dc_hardware_init(adev->dm.dc);
1089
0b08c54b
YZ
1090#if defined(CONFIG_DRM_AMD_DC_DCN)
1091 if (adev->asic_type == CHIP_RENOIR) {
e6cd859d
AD
1092 struct dc_phy_addr_space_config pa_config;
1093
0b08c54b 1094 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1095
0b08c54b
YZ
1096 // Call the DC init_memory func
1097 dc_setup_system_context(adev->dm.dc, &pa_config);
1098 }
1099#endif
c0fb85ae 1100
4562236b
HW
1101 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102 if (!adev->dm.freesync_module) {
1103 DRM_ERROR(
1104 "amdgpu: failed to initialize freesync_module.\n");
1105 } else
f1ad2f5e 1106 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1107 adev->dm.freesync_module);
1108
e277adc5
LSL
1109 amdgpu_dm_init_color_mod();
1110
52704fca 1111#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1112 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1113 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1114
96a3b32e
BL
1115 if (!adev->dm.hdcp_workqueue)
1116 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117 else
1118 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1119
96a3b32e
BL
1120 dc_init_callbacks(adev->dm.dc, &init_params);
1121 }
c920888c
WL
1122#endif
1123#ifdef CONFIG_DEBUG_FS
1124 if (create_crtc_crc_properties(&adev->dm))
1125 DRM_ERROR("amdgpu: failed to create crc property.\n");
52704fca 1126#endif
4562236b
HW
1127 if (amdgpu_dm_initialize_drm_device(adev)) {
1128 DRM_ERROR(
1129 "amdgpu: failed to initialize sw for display support.\n");
1130 goto error;
1131 }
1132
1133 /* Update the actual used number of crtc */
1134 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1135
f74367e4
AD
1136 /* create fake encoders for MST */
1137 dm_dp_create_fake_mst_encoders(adev);
1138
4562236b
HW
1139 /* TODO: Add_display_info? */
1140
1141 /* TODO use dynamic cursor width */
4a580877
LT
1142 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1143 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1144
4a580877 1145 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1146 DRM_ERROR(
1147 "amdgpu: failed to initialize sw for display support.\n");
1148 goto error;
1149 }
1150
c0fb85ae 1151
f1ad2f5e 1152 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1153
1154 return 0;
1155error:
1156 amdgpu_dm_fini(adev);
1157
59d0f396 1158 return -EINVAL;
4562236b
HW
1159}
1160
7578ecda 1161static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1162{
f74367e4
AD
1163 int i;
1164
1165 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1166 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 }
1168
6ce8f316
NK
1169 amdgpu_dm_audio_fini(adev);
1170
4562236b 1171 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1172
52704fca
BL
1173#ifdef CONFIG_DRM_AMD_DC_HDCP
1174 if (adev->dm.hdcp_workqueue) {
1175 hdcp_destroy(adev->dm.hdcp_workqueue);
1176 adev->dm.hdcp_workqueue = NULL;
1177 }
1178
1179 if (adev->dm.dc)
1180 dc_deinit_callbacks(adev->dm.dc);
1181#endif
9a71c7d3
NK
1182 if (adev->dm.dc->ctx->dmub_srv) {
1183 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1184 adev->dm.dc->ctx->dmub_srv = NULL;
1185 }
1186
743b9786
NK
1187 if (adev->dm.dmub_bo)
1188 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1189 &adev->dm.dmub_bo_gpu_addr,
1190 &adev->dm.dmub_bo_cpu_addr);
52704fca 1191
c8bdf2b6
ED
1192 /* DC Destroy TODO: Replace destroy DAL */
1193 if (adev->dm.dc)
1194 dc_destroy(&adev->dm.dc);
4562236b
HW
1195 /*
1196 * TODO: pageflip, vlank interrupt
1197 *
1198 * amdgpu_dm_irq_fini(adev);
1199 */
1200
1201 if (adev->dm.cgs_device) {
1202 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1203 adev->dm.cgs_device = NULL;
1204 }
1205 if (adev->dm.freesync_module) {
1206 mod_freesync_destroy(adev->dm.freesync_module);
1207 adev->dm.freesync_module = NULL;
1208 }
674e78ac 1209
6ce8f316 1210 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1211 mutex_destroy(&adev->dm.dc_lock);
1212
4562236b
HW
1213 return;
1214}
1215
a94d5569 1216static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1217{
a7669aff 1218 const char *fw_name_dmcu = NULL;
a94d5569
DF
1219 int r;
1220 const struct dmcu_firmware_header_v1_0 *hdr;
1221
1222 switch(adev->asic_type) {
55e56389
MR
1223#if defined(CONFIG_DRM_AMD_DC_SI)
1224 case CHIP_TAHITI:
1225 case CHIP_PITCAIRN:
1226 case CHIP_VERDE:
1227 case CHIP_OLAND:
1228#endif
a94d5569
DF
1229 case CHIP_BONAIRE:
1230 case CHIP_HAWAII:
1231 case CHIP_KAVERI:
1232 case CHIP_KABINI:
1233 case CHIP_MULLINS:
1234 case CHIP_TONGA:
1235 case CHIP_FIJI:
1236 case CHIP_CARRIZO:
1237 case CHIP_STONEY:
1238 case CHIP_POLARIS11:
1239 case CHIP_POLARIS10:
1240 case CHIP_POLARIS12:
1241 case CHIP_VEGAM:
1242 case CHIP_VEGA10:
1243 case CHIP_VEGA12:
1244 case CHIP_VEGA20:
476e955d 1245 case CHIP_NAVI10:
baebcf2e 1246 case CHIP_NAVI14:
30221ad8 1247 case CHIP_RENOIR:
79037324 1248 case CHIP_SIENNA_CICHLID:
a6c5308f 1249 case CHIP_NAVY_FLOUNDER:
2a411205 1250 case CHIP_DIMGREY_CAVEFISH:
469989ca 1251 case CHIP_VANGOGH:
a94d5569 1252 return 0;
5ea23931
RL
1253 case CHIP_NAVI12:
1254 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1255 break;
a94d5569 1256 case CHIP_RAVEN:
a7669aff
HW
1257 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1258 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1259 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1260 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1261 else
a7669aff 1262 return 0;
a94d5569
DF
1263 break;
1264 default:
1265 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1266 return -EINVAL;
a94d5569
DF
1267 }
1268
1269 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1270 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1271 return 0;
1272 }
1273
1274 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1275 if (r == -ENOENT) {
1276 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1277 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1278 adev->dm.fw_dmcu = NULL;
1279 return 0;
1280 }
1281 if (r) {
1282 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1283 fw_name_dmcu);
1284 return r;
1285 }
1286
1287 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1288 if (r) {
1289 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1290 fw_name_dmcu);
1291 release_firmware(adev->dm.fw_dmcu);
1292 adev->dm.fw_dmcu = NULL;
1293 return r;
1294 }
1295
1296 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1297 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1298 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1299 adev->firmware.fw_size +=
1300 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1301
1302 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1303 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1304 adev->firmware.fw_size +=
1305 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1306
ee6e89c0
DF
1307 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1308
a94d5569
DF
1309 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1310
4562236b
HW
1311 return 0;
1312}
1313
743b9786
NK
1314static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1315{
1316 struct amdgpu_device *adev = ctx;
1317
1318 return dm_read_reg(adev->dm.dc->ctx, address);
1319}
1320
1321static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1322 uint32_t value)
1323{
1324 struct amdgpu_device *adev = ctx;
1325
1326 return dm_write_reg(adev->dm.dc->ctx, address, value);
1327}
1328
1329static int dm_dmub_sw_init(struct amdgpu_device *adev)
1330{
1331 struct dmub_srv_create_params create_params;
8c7aea40
NK
1332 struct dmub_srv_region_params region_params;
1333 struct dmub_srv_region_info region_info;
1334 struct dmub_srv_fb_params fb_params;
1335 struct dmub_srv_fb_info *fb_info;
1336 struct dmub_srv *dmub_srv;
743b9786
NK
1337 const struct dmcub_firmware_header_v1_0 *hdr;
1338 const char *fw_name_dmub;
1339 enum dmub_asic dmub_asic;
1340 enum dmub_status status;
1341 int r;
1342
1343 switch (adev->asic_type) {
1344 case CHIP_RENOIR:
1345 dmub_asic = DMUB_ASIC_DCN21;
1346 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1347 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1348 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1349 break;
79037324
BL
1350 case CHIP_SIENNA_CICHLID:
1351 dmub_asic = DMUB_ASIC_DCN30;
1352 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1353 break;
5ce868fc
BL
1354 case CHIP_NAVY_FLOUNDER:
1355 dmub_asic = DMUB_ASIC_DCN30;
1356 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1357 break;
469989ca
RL
1358 case CHIP_VANGOGH:
1359 dmub_asic = DMUB_ASIC_DCN301;
1360 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1361 break;
2a411205
BL
1362 case CHIP_DIMGREY_CAVEFISH:
1363 dmub_asic = DMUB_ASIC_DCN302;
1364 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1365 break;
743b9786
NK
1366
1367 default:
1368 /* ASIC doesn't support DMUB. */
1369 return 0;
1370 }
1371
743b9786
NK
1372 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1373 if (r) {
1374 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1375 return 0;
1376 }
1377
1378 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1379 if (r) {
1380 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1381 return 0;
1382 }
1383
743b9786 1384 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1385
9a6ed547
NK
1386 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1387 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1388 AMDGPU_UCODE_ID_DMCUB;
1389 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1390 adev->dm.dmub_fw;
1391 adev->firmware.fw_size +=
1392 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1393
9a6ed547
NK
1394 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1395 adev->dm.dmcub_fw_version);
1396 }
1397
1398 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1399
8c7aea40
NK
1400 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1401 dmub_srv = adev->dm.dmub_srv;
1402
1403 if (!dmub_srv) {
1404 DRM_ERROR("Failed to allocate DMUB service!\n");
1405 return -ENOMEM;
1406 }
1407
1408 memset(&create_params, 0, sizeof(create_params));
1409 create_params.user_ctx = adev;
1410 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1411 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1412 create_params.asic = dmub_asic;
1413
1414 /* Create the DMUB service. */
1415 status = dmub_srv_create(dmub_srv, &create_params);
1416 if (status != DMUB_STATUS_OK) {
1417 DRM_ERROR("Error creating DMUB service: %d\n", status);
1418 return -EINVAL;
1419 }
1420
1421 /* Calculate the size of all the regions for the DMUB service. */
1422 memset(&region_params, 0, sizeof(region_params));
1423
1424 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1425 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1426 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1427 region_params.vbios_size = adev->bios_size;
0922b899 1428 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1429 adev->dm.dmub_fw->data +
1430 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1431 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1432 region_params.fw_inst_const =
1433 adev->dm.dmub_fw->data +
1434 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1435 PSP_HEADER_BYTES;
8c7aea40
NK
1436
1437 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1438 &region_info);
1439
1440 if (status != DMUB_STATUS_OK) {
1441 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1442 return -EINVAL;
1443 }
1444
1445 /*
1446 * Allocate a framebuffer based on the total size of all the regions.
1447 * TODO: Move this into GART.
1448 */
1449 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1450 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1451 &adev->dm.dmub_bo_gpu_addr,
1452 &adev->dm.dmub_bo_cpu_addr);
1453 if (r)
1454 return r;
1455
1456 /* Rebase the regions on the framebuffer address. */
1457 memset(&fb_params, 0, sizeof(fb_params));
1458 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1459 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1460 fb_params.region_info = &region_info;
1461
1462 adev->dm.dmub_fb_info =
1463 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1464 fb_info = adev->dm.dmub_fb_info;
1465
1466 if (!fb_info) {
1467 DRM_ERROR(
1468 "Failed to allocate framebuffer info for DMUB service!\n");
1469 return -ENOMEM;
1470 }
1471
1472 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1473 if (status != DMUB_STATUS_OK) {
1474 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1475 return -EINVAL;
1476 }
1477
743b9786
NK
1478 return 0;
1479}
1480
a94d5569
DF
1481static int dm_sw_init(void *handle)
1482{
1483 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1484 int r;
1485
1486 r = dm_dmub_sw_init(adev);
1487 if (r)
1488 return r;
a94d5569
DF
1489
1490 return load_dmcu_fw(adev);
1491}
1492
4562236b
HW
1493static int dm_sw_fini(void *handle)
1494{
a94d5569
DF
1495 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496
8c7aea40
NK
1497 kfree(adev->dm.dmub_fb_info);
1498 adev->dm.dmub_fb_info = NULL;
1499
743b9786
NK
1500 if (adev->dm.dmub_srv) {
1501 dmub_srv_destroy(adev->dm.dmub_srv);
1502 adev->dm.dmub_srv = NULL;
1503 }
1504
75e1658e
ND
1505 release_firmware(adev->dm.dmub_fw);
1506 adev->dm.dmub_fw = NULL;
743b9786 1507
75e1658e
ND
1508 release_firmware(adev->dm.fw_dmcu);
1509 adev->dm.fw_dmcu = NULL;
a94d5569 1510
4562236b
HW
1511 return 0;
1512}
1513
7abcf6b5 1514static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1515{
c84dec2f 1516 struct amdgpu_dm_connector *aconnector;
4562236b 1517 struct drm_connector *connector;
f8d2d39e 1518 struct drm_connector_list_iter iter;
7abcf6b5 1519 int ret = 0;
4562236b 1520
f8d2d39e
LP
1521 drm_connector_list_iter_begin(dev, &iter);
1522 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1523 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1524 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1525 aconnector->mst_mgr.aux) {
f1ad2f5e 1526 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1527 aconnector,
1528 aconnector->base.base.id);
7abcf6b5
AG
1529
1530 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1531 if (ret < 0) {
1532 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1533 aconnector->dc_link->type =
1534 dc_connection_single;
1535 break;
7abcf6b5 1536 }
f8d2d39e 1537 }
4562236b 1538 }
f8d2d39e 1539 drm_connector_list_iter_end(&iter);
4562236b 1540
7abcf6b5
AG
1541 return ret;
1542}
1543
1544static int dm_late_init(void *handle)
1545{
42e67c3b 1546 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1547
bbf854dc
DF
1548 struct dmcu_iram_parameters params;
1549 unsigned int linear_lut[16];
1550 int i;
17bdb4a8 1551 struct dmcu *dmcu = NULL;
5cb32419 1552 bool ret = true;
bbf854dc 1553
17bdb4a8
JFZ
1554 dmcu = adev->dm.dc->res_pool->dmcu;
1555
bbf854dc
DF
1556 for (i = 0; i < 16; i++)
1557 linear_lut[i] = 0xFFFF * i / 15;
1558
1559 params.set = 0;
1560 params.backlight_ramping_start = 0xCCCC;
1561 params.backlight_ramping_reduction = 0xCCCCCCCC;
1562 params.backlight_lut_array_size = 16;
1563 params.backlight_lut_array = linear_lut;
1564
2ad0cdf9
AK
1565 /* Min backlight level after ABM reduction, Don't allow below 1%
1566 * 0xFFFF x 0.01 = 0x28F
1567 */
1568 params.min_abm_backlight = 0x28F;
1569
5cb32419
RL
1570 /* In the case where abm is implemented on dmcub,
1571 * dmcu object will be null.
1572 * ABM 2.4 and up are implemented on dmcub.
1573 */
1574 if (dmcu)
1575 ret = dmcu_load_iram(dmcu, params);
1576 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1577 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1578
14ed1c90
HW
1579 if (!ret)
1580 return -EINVAL;
bbf854dc 1581
4a580877 1582 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1583}
1584
1585static void s3_handle_mst(struct drm_device *dev, bool suspend)
1586{
c84dec2f 1587 struct amdgpu_dm_connector *aconnector;
4562236b 1588 struct drm_connector *connector;
f8d2d39e 1589 struct drm_connector_list_iter iter;
fe7553be
LP
1590 struct drm_dp_mst_topology_mgr *mgr;
1591 int ret;
1592 bool need_hotplug = false;
4562236b 1593
f8d2d39e
LP
1594 drm_connector_list_iter_begin(dev, &iter);
1595 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1596 aconnector = to_amdgpu_dm_connector(connector);
1597 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1598 aconnector->mst_port)
1599 continue;
1600
1601 mgr = &aconnector->mst_mgr;
1602
1603 if (suspend) {
1604 drm_dp_mst_topology_mgr_suspend(mgr);
1605 } else {
6f85f738 1606 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1607 if (ret < 0) {
1608 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1609 need_hotplug = true;
1610 }
1611 }
4562236b 1612 }
f8d2d39e 1613 drm_connector_list_iter_end(&iter);
fe7553be
LP
1614
1615 if (need_hotplug)
1616 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1617}
1618
9340dfd3
HW
1619static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1620{
1621 struct smu_context *smu = &adev->smu;
1622 int ret = 0;
1623
1624 if (!is_support_sw_smu(adev))
1625 return 0;
1626
1627 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1628 * on window driver dc implementation.
1629 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1630 * should be passed to smu during boot up and resume from s3.
1631 * boot up: dc calculate dcn watermark clock settings within dc_create,
1632 * dcn20_resource_construct
1633 * then call pplib functions below to pass the settings to smu:
1634 * smu_set_watermarks_for_clock_ranges
1635 * smu_set_watermarks_table
1636 * navi10_set_watermarks_table
1637 * smu_write_watermarks_table
1638 *
1639 * For Renoir, clock settings of dcn watermark are also fixed values.
1640 * dc has implemented different flow for window driver:
1641 * dc_hardware_init / dc_set_power_state
1642 * dcn10_init_hw
1643 * notify_wm_ranges
1644 * set_wm_ranges
1645 * -- Linux
1646 * smu_set_watermarks_for_clock_ranges
1647 * renoir_set_watermarks_table
1648 * smu_write_watermarks_table
1649 *
1650 * For Linux,
1651 * dc_hardware_init -> amdgpu_dm_init
1652 * dc_set_power_state --> dm_resume
1653 *
1654 * therefore, this function apply to navi10/12/14 but not Renoir
1655 * *
1656 */
1657 switch(adev->asic_type) {
1658 case CHIP_NAVI10:
1659 case CHIP_NAVI14:
1660 case CHIP_NAVI12:
1661 break;
1662 default:
1663 return 0;
1664 }
1665
e7a95eea
EQ
1666 ret = smu_write_watermarks_table(smu);
1667 if (ret) {
1668 DRM_ERROR("Failed to update WMTABLE!\n");
1669 return ret;
9340dfd3
HW
1670 }
1671
9340dfd3
HW
1672 return 0;
1673}
1674
b8592b48
LL
1675/**
1676 * dm_hw_init() - Initialize DC device
28d687ea 1677 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1678 *
1679 * Initialize the &struct amdgpu_display_manager device. This involves calling
1680 * the initializers of each DM component, then populating the struct with them.
1681 *
1682 * Although the function implies hardware initialization, both hardware and
1683 * software are initialized here. Splitting them out to their relevant init
1684 * hooks is a future TODO item.
1685 *
1686 * Some notable things that are initialized here:
1687 *
1688 * - Display Core, both software and hardware
1689 * - DC modules that we need (freesync and color management)
1690 * - DRM software states
1691 * - Interrupt sources and handlers
1692 * - Vblank support
1693 * - Debug FS entries, if enabled
1694 */
4562236b
HW
1695static int dm_hw_init(void *handle)
1696{
1697 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1698 /* Create DAL display manager */
1699 amdgpu_dm_init(adev);
4562236b
HW
1700 amdgpu_dm_hpd_init(adev);
1701
4562236b
HW
1702 return 0;
1703}
1704
b8592b48
LL
1705/**
1706 * dm_hw_fini() - Teardown DC device
28d687ea 1707 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1708 *
1709 * Teardown components within &struct amdgpu_display_manager that require
1710 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1711 * were loaded. Also flush IRQ workqueues and disable them.
1712 */
4562236b
HW
1713static int dm_hw_fini(void *handle)
1714{
1715 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716
1717 amdgpu_dm_hpd_fini(adev);
1718
1719 amdgpu_dm_irq_fini(adev);
21de3396 1720 amdgpu_dm_fini(adev);
4562236b
HW
1721 return 0;
1722}
1723
cdaae837
BL
1724
1725static int dm_enable_vblank(struct drm_crtc *crtc);
1726static void dm_disable_vblank(struct drm_crtc *crtc);
1727
1728static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1729 struct dc_state *state, bool enable)
1730{
1731 enum dc_irq_source irq_source;
1732 struct amdgpu_crtc *acrtc;
1733 int rc = -EBUSY;
1734 int i = 0;
1735
1736 for (i = 0; i < state->stream_count; i++) {
1737 acrtc = get_crtc_by_otg_inst(
1738 adev, state->stream_status[i].primary_otg_inst);
1739
1740 if (acrtc && state->stream_status[i].plane_count != 0) {
1741 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1742 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1743 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1744 acrtc->crtc_id, enable ? "en" : "dis", rc);
1745 if (rc)
1746 DRM_WARN("Failed to %s pflip interrupts\n",
1747 enable ? "enable" : "disable");
1748
1749 if (enable) {
1750 rc = dm_enable_vblank(&acrtc->base);
1751 if (rc)
1752 DRM_WARN("Failed to enable vblank interrupts\n");
1753 } else {
1754 dm_disable_vblank(&acrtc->base);
1755 }
1756
1757 }
1758 }
1759
1760}
1761
dfd84d90 1762static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1763{
1764 struct dc_state *context = NULL;
1765 enum dc_status res = DC_ERROR_UNEXPECTED;
1766 int i;
1767 struct dc_stream_state *del_streams[MAX_PIPES];
1768 int del_streams_count = 0;
1769
1770 memset(del_streams, 0, sizeof(del_streams));
1771
1772 context = dc_create_state(dc);
1773 if (context == NULL)
1774 goto context_alloc_fail;
1775
1776 dc_resource_state_copy_construct_current(dc, context);
1777
1778 /* First remove from context all streams */
1779 for (i = 0; i < context->stream_count; i++) {
1780 struct dc_stream_state *stream = context->streams[i];
1781
1782 del_streams[del_streams_count++] = stream;
1783 }
1784
1785 /* Remove all planes for removed streams and then remove the streams */
1786 for (i = 0; i < del_streams_count; i++) {
1787 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1788 res = DC_FAIL_DETACH_SURFACES;
1789 goto fail;
1790 }
1791
1792 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1793 if (res != DC_OK)
1794 goto fail;
1795 }
1796
1797
1798 res = dc_validate_global_state(dc, context, false);
1799
1800 if (res != DC_OK) {
1801 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1802 goto fail;
1803 }
1804
1805 res = dc_commit_state(dc, context);
1806
1807fail:
1808 dc_release_state(context);
1809
1810context_alloc_fail:
1811 return res;
1812}
1813
4562236b
HW
1814static int dm_suspend(void *handle)
1815{
1816 struct amdgpu_device *adev = handle;
1817 struct amdgpu_display_manager *dm = &adev->dm;
1818 int ret = 0;
4562236b 1819
53b3f8f4 1820 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1821 mutex_lock(&dm->dc_lock);
1822 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1823
1824 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1825
1826 amdgpu_dm_commit_zero_streams(dm->dc);
1827
1828 amdgpu_dm_irq_suspend(adev);
1829
1830 return ret;
1831 }
4562236b 1832
d2f0b53b 1833 WARN_ON(adev->dm.cached_state);
4a580877 1834 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1835
4a580877 1836 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1837
4562236b
HW
1838 amdgpu_dm_irq_suspend(adev);
1839
a3621485 1840
32f5062d 1841 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1842
1c2075d4 1843 return 0;
4562236b
HW
1844}
1845
1daf8c63
AD
1846static struct amdgpu_dm_connector *
1847amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1848 struct drm_crtc *crtc)
4562236b
HW
1849{
1850 uint32_t i;
c2cea706 1851 struct drm_connector_state *new_con_state;
4562236b
HW
1852 struct drm_connector *connector;
1853 struct drm_crtc *crtc_from_state;
1854
c2cea706
LSL
1855 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1856 crtc_from_state = new_con_state->crtc;
4562236b
HW
1857
1858 if (crtc_from_state == crtc)
c84dec2f 1859 return to_amdgpu_dm_connector(connector);
4562236b
HW
1860 }
1861
1862 return NULL;
1863}
1864
fbbdadf2
BL
1865static void emulated_link_detect(struct dc_link *link)
1866{
1867 struct dc_sink_init_data sink_init_data = { 0 };
1868 struct display_sink_capability sink_caps = { 0 };
1869 enum dc_edid_status edid_status;
1870 struct dc_context *dc_ctx = link->ctx;
1871 struct dc_sink *sink = NULL;
1872 struct dc_sink *prev_sink = NULL;
1873
1874 link->type = dc_connection_none;
1875 prev_sink = link->local_sink;
1876
1877 if (prev_sink != NULL)
1878 dc_sink_retain(prev_sink);
1879
1880 switch (link->connector_signal) {
1881 case SIGNAL_TYPE_HDMI_TYPE_A: {
1882 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1883 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1884 break;
1885 }
1886
1887 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1888 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1889 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1890 break;
1891 }
1892
1893 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1894 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1895 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1896 break;
1897 }
1898
1899 case SIGNAL_TYPE_LVDS: {
1900 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1901 sink_caps.signal = SIGNAL_TYPE_LVDS;
1902 break;
1903 }
1904
1905 case SIGNAL_TYPE_EDP: {
1906 sink_caps.transaction_type =
1907 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1908 sink_caps.signal = SIGNAL_TYPE_EDP;
1909 break;
1910 }
1911
1912 case SIGNAL_TYPE_DISPLAY_PORT: {
1913 sink_caps.transaction_type =
1914 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1915 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1916 break;
1917 }
1918
1919 default:
1920 DC_ERROR("Invalid connector type! signal:%d\n",
1921 link->connector_signal);
1922 return;
1923 }
1924
1925 sink_init_data.link = link;
1926 sink_init_data.sink_signal = sink_caps.signal;
1927
1928 sink = dc_sink_create(&sink_init_data);
1929 if (!sink) {
1930 DC_ERROR("Failed to create sink!\n");
1931 return;
1932 }
1933
dcd5fb82 1934 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1935 link->local_sink = sink;
1936
1937 edid_status = dm_helpers_read_local_edid(
1938 link->ctx,
1939 link,
1940 sink);
1941
1942 if (edid_status != EDID_OK)
1943 DC_ERROR("Failed to read EDID");
1944
1945}
1946
cdaae837
BL
1947static void dm_gpureset_commit_state(struct dc_state *dc_state,
1948 struct amdgpu_display_manager *dm)
1949{
1950 struct {
1951 struct dc_surface_update surface_updates[MAX_SURFACES];
1952 struct dc_plane_info plane_infos[MAX_SURFACES];
1953 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1954 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1955 struct dc_stream_update stream_update;
1956 } * bundle;
1957 int k, m;
1958
1959 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1960
1961 if (!bundle) {
1962 dm_error("Failed to allocate update bundle\n");
1963 goto cleanup;
1964 }
1965
1966 for (k = 0; k < dc_state->stream_count; k++) {
1967 bundle->stream_update.stream = dc_state->streams[k];
1968
1969 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1970 bundle->surface_updates[m].surface =
1971 dc_state->stream_status->plane_states[m];
1972 bundle->surface_updates[m].surface->force_full_update =
1973 true;
1974 }
1975 dc_commit_updates_for_stream(
1976 dm->dc, bundle->surface_updates,
1977 dc_state->stream_status->plane_count,
1978 dc_state->streams[k], &bundle->stream_update, dc_state);
1979 }
1980
1981cleanup:
1982 kfree(bundle);
1983
1984 return;
1985}
1986
4562236b
HW
1987static int dm_resume(void *handle)
1988{
1989 struct amdgpu_device *adev = handle;
4a580877 1990 struct drm_device *ddev = adev_to_drm(adev);
4562236b 1991 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1992 struct amdgpu_dm_connector *aconnector;
4562236b 1993 struct drm_connector *connector;
f8d2d39e 1994 struct drm_connector_list_iter iter;
4562236b 1995 struct drm_crtc *crtc;
c2cea706 1996 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1997 struct dm_crtc_state *dm_new_crtc_state;
1998 struct drm_plane *plane;
1999 struct drm_plane_state *new_plane_state;
2000 struct dm_plane_state *dm_new_plane_state;
113b7a01 2001 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2002 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2003 struct dc_state *dc_state;
2004 int i, r, j;
4562236b 2005
53b3f8f4 2006 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2007 dc_state = dm->cached_dc_state;
2008
2009 r = dm_dmub_hw_init(adev);
2010 if (r)
2011 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2012
2013 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2014 dc_resume(dm->dc);
2015
2016 amdgpu_dm_irq_resume_early(adev);
2017
2018 for (i = 0; i < dc_state->stream_count; i++) {
2019 dc_state->streams[i]->mode_changed = true;
2020 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2021 dc_state->stream_status->plane_states[j]->update_flags.raw
2022 = 0xffffffff;
2023 }
2024 }
2025
2026 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2027
cdaae837
BL
2028 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2029
2030 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2031
2032 dc_release_state(dm->cached_dc_state);
2033 dm->cached_dc_state = NULL;
2034
2035 amdgpu_dm_irq_resume_late(adev);
2036
2037 mutex_unlock(&dm->dc_lock);
2038
2039 return 0;
2040 }
113b7a01
LL
2041 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2042 dc_release_state(dm_state->context);
2043 dm_state->context = dc_create_state(dm->dc);
2044 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2045 dc_resource_state_construct(dm->dc, dm_state->context);
2046
8c7aea40
NK
2047 /* Before powering on DC we need to re-initialize DMUB. */
2048 r = dm_dmub_hw_init(adev);
2049 if (r)
2050 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2051
a80aa93d
ML
2052 /* power on hardware */
2053 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2054
4562236b
HW
2055 /* program HPD filter */
2056 dc_resume(dm->dc);
2057
4562236b
HW
2058 /*
2059 * early enable HPD Rx IRQ, should be done before set mode as short
2060 * pulse interrupts are used for MST
2061 */
2062 amdgpu_dm_irq_resume_early(adev);
2063
d20ebea8 2064 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2065 s3_handle_mst(ddev, false);
2066
4562236b 2067 /* Do detection*/
f8d2d39e
LP
2068 drm_connector_list_iter_begin(ddev, &iter);
2069 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2070 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2071
2072 /*
2073 * this is the case when traversing through already created
2074 * MST connectors, should be skipped
2075 */
2076 if (aconnector->mst_port)
2077 continue;
2078
03ea364c 2079 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2080 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2081 DRM_ERROR("KMS: Failed to detect connector\n");
2082
2083 if (aconnector->base.force && new_connection_type == dc_connection_none)
2084 emulated_link_detect(aconnector->dc_link);
2085 else
2086 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2087
2088 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2089 aconnector->fake_enable = false;
2090
dcd5fb82
MF
2091 if (aconnector->dc_sink)
2092 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2093 aconnector->dc_sink = NULL;
2094 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2095 mutex_unlock(&aconnector->hpd_lock);
4562236b 2096 }
f8d2d39e 2097 drm_connector_list_iter_end(&iter);
4562236b 2098
1f6010a9 2099 /* Force mode set in atomic commit */
a80aa93d 2100 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2101 new_crtc_state->active_changed = true;
4f346e65 2102
fcb4019e
LSL
2103 /*
2104 * atomic_check is expected to create the dc states. We need to release
2105 * them here, since they were duplicated as part of the suspend
2106 * procedure.
2107 */
a80aa93d 2108 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2109 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2110 if (dm_new_crtc_state->stream) {
2111 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2112 dc_stream_release(dm_new_crtc_state->stream);
2113 dm_new_crtc_state->stream = NULL;
2114 }
2115 }
2116
a80aa93d 2117 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2118 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2119 if (dm_new_plane_state->dc_state) {
2120 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2121 dc_plane_state_release(dm_new_plane_state->dc_state);
2122 dm_new_plane_state->dc_state = NULL;
2123 }
2124 }
2125
2d1af6a1 2126 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2127
a80aa93d 2128 dm->cached_state = NULL;
0a214e2f 2129
9faa4237 2130 amdgpu_dm_irq_resume_late(adev);
4562236b 2131
9340dfd3
HW
2132 amdgpu_dm_smu_write_watermarks_table(adev);
2133
2d1af6a1 2134 return 0;
4562236b
HW
2135}
2136
b8592b48
LL
2137/**
2138 * DOC: DM Lifecycle
2139 *
2140 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2141 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2142 * the base driver's device list to be initialized and torn down accordingly.
2143 *
2144 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2145 */
2146
4562236b
HW
2147static const struct amd_ip_funcs amdgpu_dm_funcs = {
2148 .name = "dm",
2149 .early_init = dm_early_init,
7abcf6b5 2150 .late_init = dm_late_init,
4562236b
HW
2151 .sw_init = dm_sw_init,
2152 .sw_fini = dm_sw_fini,
2153 .hw_init = dm_hw_init,
2154 .hw_fini = dm_hw_fini,
2155 .suspend = dm_suspend,
2156 .resume = dm_resume,
2157 .is_idle = dm_is_idle,
2158 .wait_for_idle = dm_wait_for_idle,
2159 .check_soft_reset = dm_check_soft_reset,
2160 .soft_reset = dm_soft_reset,
2161 .set_clockgating_state = dm_set_clockgating_state,
2162 .set_powergating_state = dm_set_powergating_state,
2163};
2164
2165const struct amdgpu_ip_block_version dm_ip_block =
2166{
2167 .type = AMD_IP_BLOCK_TYPE_DCE,
2168 .major = 1,
2169 .minor = 0,
2170 .rev = 0,
2171 .funcs = &amdgpu_dm_funcs,
2172};
2173
ca3268c4 2174
b8592b48
LL
2175/**
2176 * DOC: atomic
2177 *
2178 * *WIP*
2179 */
0a323b84 2180
b3663f70 2181static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2182 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2183 .get_format_info = amd_get_format_info,
366c1baa 2184 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2185 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2186 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2187};
2188
2189static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2190 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2191};
2192
94562810
RS
2193static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2194{
2195 u32 max_cll, min_cll, max, min, q, r;
2196 struct amdgpu_dm_backlight_caps *caps;
2197 struct amdgpu_display_manager *dm;
2198 struct drm_connector *conn_base;
2199 struct amdgpu_device *adev;
ec11fe37 2200 struct dc_link *link = NULL;
94562810
RS
2201 static const u8 pre_computed_values[] = {
2202 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2203 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2204
2205 if (!aconnector || !aconnector->dc_link)
2206 return;
2207
ec11fe37 2208 link = aconnector->dc_link;
2209 if (link->connector_signal != SIGNAL_TYPE_EDP)
2210 return;
2211
94562810 2212 conn_base = &aconnector->base;
1348969a 2213 adev = drm_to_adev(conn_base->dev);
94562810
RS
2214 dm = &adev->dm;
2215 caps = &dm->backlight_caps;
2216 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2217 caps->aux_support = false;
2218 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2219 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2220
2221 if (caps->ext_caps->bits.oled == 1 ||
2222 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2223 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2224 caps->aux_support = true;
2225
2226 /* From the specification (CTA-861-G), for calculating the maximum
2227 * luminance we need to use:
2228 * Luminance = 50*2**(CV/32)
2229 * Where CV is a one-byte value.
2230 * For calculating this expression we may need float point precision;
2231 * to avoid this complexity level, we take advantage that CV is divided
2232 * by a constant. From the Euclids division algorithm, we know that CV
2233 * can be written as: CV = 32*q + r. Next, we replace CV in the
2234 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2235 * need to pre-compute the value of r/32. For pre-computing the values
2236 * We just used the following Ruby line:
2237 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2238 * The results of the above expressions can be verified at
2239 * pre_computed_values.
2240 */
2241 q = max_cll >> 5;
2242 r = max_cll % 32;
2243 max = (1 << q) * pre_computed_values[r];
2244
2245 // min luminance: maxLum * (CV/255)^2 / 100
2246 q = DIV_ROUND_CLOSEST(min_cll, 255);
2247 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2248
2249 caps->aux_max_input_signal = max;
2250 caps->aux_min_input_signal = min;
2251}
2252
97e51c16
HW
2253void amdgpu_dm_update_connector_after_detect(
2254 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2255{
2256 struct drm_connector *connector = &aconnector->base;
2257 struct drm_device *dev = connector->dev;
b73a22d3 2258 struct dc_sink *sink;
4562236b
HW
2259
2260 /* MST handled by drm_mst framework */
2261 if (aconnector->mst_mgr.mst_state == true)
2262 return;
2263
4562236b 2264 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2265 if (sink)
2266 dc_sink_retain(sink);
4562236b 2267
1f6010a9
DF
2268 /*
2269 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2270 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2271 * Skip if already done during boot.
4562236b
HW
2272 */
2273 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2274 && aconnector->dc_em_sink) {
2275
1f6010a9
DF
2276 /*
2277 * For S3 resume with headless use eml_sink to fake stream
2278 * because on resume connector->sink is set to NULL
4562236b
HW
2279 */
2280 mutex_lock(&dev->mode_config.mutex);
2281
2282 if (sink) {
922aa1e1 2283 if (aconnector->dc_sink) {
98e6436d 2284 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2285 /*
2286 * retain and release below are used to
2287 * bump up refcount for sink because the link doesn't point
2288 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2289 * reshuffle by UMD we will get into unwanted dc_sink release
2290 */
dcd5fb82 2291 dc_sink_release(aconnector->dc_sink);
922aa1e1 2292 }
4562236b 2293 aconnector->dc_sink = sink;
dcd5fb82 2294 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2295 amdgpu_dm_update_freesync_caps(connector,
2296 aconnector->edid);
4562236b 2297 } else {
98e6436d 2298 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2299 if (!aconnector->dc_sink) {
4562236b 2300 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2301 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2302 }
4562236b
HW
2303 }
2304
2305 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2306
2307 if (sink)
2308 dc_sink_release(sink);
4562236b
HW
2309 return;
2310 }
2311
2312 /*
2313 * TODO: temporary guard to look for proper fix
2314 * if this sink is MST sink, we should not do anything
2315 */
dcd5fb82
MF
2316 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2317 dc_sink_release(sink);
4562236b 2318 return;
dcd5fb82 2319 }
4562236b
HW
2320
2321 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2322 /*
2323 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2324 * Do nothing!!
2325 */
f1ad2f5e 2326 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2327 aconnector->connector_id);
dcd5fb82
MF
2328 if (sink)
2329 dc_sink_release(sink);
4562236b
HW
2330 return;
2331 }
2332
f1ad2f5e 2333 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2334 aconnector->connector_id, aconnector->dc_sink, sink);
2335
2336 mutex_lock(&dev->mode_config.mutex);
2337
1f6010a9
DF
2338 /*
2339 * 1. Update status of the drm connector
2340 * 2. Send an event and let userspace tell us what to do
2341 */
4562236b 2342 if (sink) {
1f6010a9
DF
2343 /*
2344 * TODO: check if we still need the S3 mode update workaround.
2345 * If yes, put it here.
2346 */
4562236b 2347 if (aconnector->dc_sink)
98e6436d 2348 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2349
2350 aconnector->dc_sink = sink;
dcd5fb82 2351 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2352 if (sink->dc_edid.length == 0) {
4562236b 2353 aconnector->edid = NULL;
e6142dd5
AP
2354 if (aconnector->dc_link->aux_mode) {
2355 drm_dp_cec_unset_edid(
2356 &aconnector->dm_dp_aux.aux);
2357 }
900b3cb1 2358 } else {
4562236b 2359 aconnector->edid =
e6142dd5 2360 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2361
c555f023 2362 drm_connector_update_edid_property(connector,
e6142dd5 2363 aconnector->edid);
b24bdc37 2364 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2365
2366 if (aconnector->dc_link->aux_mode)
2367 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2368 aconnector->edid);
4562236b 2369 }
e6142dd5 2370
98e6436d 2371 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2372 update_connector_ext_caps(aconnector);
4562236b 2373 } else {
e86e8947 2374 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2375 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2376 drm_connector_update_edid_property(connector, NULL);
4562236b 2377 aconnector->num_modes = 0;
dcd5fb82 2378 dc_sink_release(aconnector->dc_sink);
4562236b 2379 aconnector->dc_sink = NULL;
5326c452 2380 aconnector->edid = NULL;
0c8620d6
BL
2381#ifdef CONFIG_DRM_AMD_DC_HDCP
2382 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2383 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2384 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2385#endif
4562236b
HW
2386 }
2387
2388 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2389
0f877894
OV
2390 update_subconnector_property(aconnector);
2391
dcd5fb82
MF
2392 if (sink)
2393 dc_sink_release(sink);
4562236b
HW
2394}
2395
2396static void handle_hpd_irq(void *param)
2397{
c84dec2f 2398 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2399 struct drm_connector *connector = &aconnector->base;
2400 struct drm_device *dev = connector->dev;
fbbdadf2 2401 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2402#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2403 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2404 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2405#endif
4562236b 2406
1f6010a9
DF
2407 /*
2408 * In case of failure or MST no need to update connector status or notify the OS
2409 * since (for MST case) MST does this in its own context.
4562236b
HW
2410 */
2411 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2412
0c8620d6 2413#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2414 if (adev->dm.hdcp_workqueue) {
96a3b32e 2415 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2416 dm_con_state->update_hdcp = true;
2417 }
0c8620d6 2418#endif
2e0ac3d6
HW
2419 if (aconnector->fake_enable)
2420 aconnector->fake_enable = false;
2421
fbbdadf2
BL
2422 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2423 DRM_ERROR("KMS: Failed to detect connector\n");
2424
2425 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2426 emulated_link_detect(aconnector->dc_link);
2427
2428
2429 drm_modeset_lock_all(dev);
2430 dm_restore_drm_connector_state(dev, connector);
2431 drm_modeset_unlock_all(dev);
2432
2433 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2434 drm_kms_helper_hotplug_event(dev);
2435
2436 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2437 amdgpu_dm_update_connector_after_detect(aconnector);
2438
2439
2440 drm_modeset_lock_all(dev);
2441 dm_restore_drm_connector_state(dev, connector);
2442 drm_modeset_unlock_all(dev);
2443
2444 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2445 drm_kms_helper_hotplug_event(dev);
2446 }
2447 mutex_unlock(&aconnector->hpd_lock);
2448
2449}
2450
c84dec2f 2451static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2452{
2453 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2454 uint8_t dret;
2455 bool new_irq_handled = false;
2456 int dpcd_addr;
2457 int dpcd_bytes_to_read;
2458
2459 const int max_process_count = 30;
2460 int process_count = 0;
2461
2462 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2463
2464 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2465 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2466 /* DPCD 0x200 - 0x201 for downstream IRQ */
2467 dpcd_addr = DP_SINK_COUNT;
2468 } else {
2469 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2470 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2471 dpcd_addr = DP_SINK_COUNT_ESI;
2472 }
2473
2474 dret = drm_dp_dpcd_read(
2475 &aconnector->dm_dp_aux.aux,
2476 dpcd_addr,
2477 esi,
2478 dpcd_bytes_to_read);
2479
2480 while (dret == dpcd_bytes_to_read &&
2481 process_count < max_process_count) {
2482 uint8_t retry;
2483 dret = 0;
2484
2485 process_count++;
2486
f1ad2f5e 2487 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2488 /* handle HPD short pulse irq */
2489 if (aconnector->mst_mgr.mst_state)
2490 drm_dp_mst_hpd_irq(
2491 &aconnector->mst_mgr,
2492 esi,
2493 &new_irq_handled);
4562236b
HW
2494
2495 if (new_irq_handled) {
2496 /* ACK at DPCD to notify down stream */
2497 const int ack_dpcd_bytes_to_write =
2498 dpcd_bytes_to_read - 1;
2499
2500 for (retry = 0; retry < 3; retry++) {
2501 uint8_t wret;
2502
2503 wret = drm_dp_dpcd_write(
2504 &aconnector->dm_dp_aux.aux,
2505 dpcd_addr + 1,
2506 &esi[1],
2507 ack_dpcd_bytes_to_write);
2508 if (wret == ack_dpcd_bytes_to_write)
2509 break;
2510 }
2511
1f6010a9 2512 /* check if there is new irq to be handled */
4562236b
HW
2513 dret = drm_dp_dpcd_read(
2514 &aconnector->dm_dp_aux.aux,
2515 dpcd_addr,
2516 esi,
2517 dpcd_bytes_to_read);
2518
2519 new_irq_handled = false;
d4a6e8a9 2520 } else {
4562236b 2521 break;
d4a6e8a9 2522 }
4562236b
HW
2523 }
2524
2525 if (process_count == max_process_count)
f1ad2f5e 2526 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2527}
2528
2529static void handle_hpd_rx_irq(void *param)
2530{
c84dec2f 2531 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2532 struct drm_connector *connector = &aconnector->base;
2533 struct drm_device *dev = connector->dev;
53cbf65c 2534 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2535 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2536 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2537#ifdef CONFIG_DRM_AMD_DC_HDCP
2538 union hpd_irq_data hpd_irq_data;
1348969a 2539 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270
BL
2540
2541 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2542#endif
4562236b 2543
1f6010a9
DF
2544 /*
2545 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2546 * conflict, after implement i2c helper, this mutex should be
2547 * retired.
2548 */
53cbf65c 2549 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2550 mutex_lock(&aconnector->hpd_lock);
2551
2a0f9270
BL
2552
2553#ifdef CONFIG_DRM_AMD_DC_HDCP
2554 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2555#else
4e18814e 2556 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2557#endif
4562236b
HW
2558 !is_mst_root_connector) {
2559 /* Downstream Port status changed. */
fbbdadf2
BL
2560 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2561 DRM_ERROR("KMS: Failed to detect connector\n");
2562
2563 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2564 emulated_link_detect(dc_link);
2565
2566 if (aconnector->fake_enable)
2567 aconnector->fake_enable = false;
2568
2569 amdgpu_dm_update_connector_after_detect(aconnector);
2570
2571
2572 drm_modeset_lock_all(dev);
2573 dm_restore_drm_connector_state(dev, connector);
2574 drm_modeset_unlock_all(dev);
2575
2576 drm_kms_helper_hotplug_event(dev);
2577 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2578
2579 if (aconnector->fake_enable)
2580 aconnector->fake_enable = false;
2581
4562236b
HW
2582 amdgpu_dm_update_connector_after_detect(aconnector);
2583
2584
2585 drm_modeset_lock_all(dev);
2586 dm_restore_drm_connector_state(dev, connector);
2587 drm_modeset_unlock_all(dev);
2588
2589 drm_kms_helper_hotplug_event(dev);
2590 }
2591 }
2a0f9270 2592#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2593 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2594 if (adev->dm.hdcp_workqueue)
2595 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2596 }
2a0f9270 2597#endif
4562236b 2598 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2599 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2600 dm_handle_hpd_rx_irq(aconnector);
2601
e86e8947
HV
2602 if (dc_link->type != dc_connection_mst_branch) {
2603 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2604 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2605 }
4562236b
HW
2606}
2607
2608static void register_hpd_handlers(struct amdgpu_device *adev)
2609{
4a580877 2610 struct drm_device *dev = adev_to_drm(adev);
4562236b 2611 struct drm_connector *connector;
c84dec2f 2612 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2613 const struct dc_link *dc_link;
2614 struct dc_interrupt_params int_params = {0};
2615
2616 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2617 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2618
2619 list_for_each_entry(connector,
2620 &dev->mode_config.connector_list, head) {
2621
c84dec2f 2622 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2623 dc_link = aconnector->dc_link;
2624
2625 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2626 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2627 int_params.irq_source = dc_link->irq_source_hpd;
2628
2629 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2630 handle_hpd_irq,
2631 (void *) aconnector);
2632 }
2633
2634 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2635
2636 /* Also register for DP short pulse (hpd_rx). */
2637 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2638 int_params.irq_source = dc_link->irq_source_hpd_rx;
2639
2640 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2641 handle_hpd_rx_irq,
2642 (void *) aconnector);
2643 }
2644 }
2645}
2646
55e56389
MR
2647#if defined(CONFIG_DRM_AMD_DC_SI)
2648/* Register IRQ sources and initialize IRQ callbacks */
2649static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2650{
2651 struct dc *dc = adev->dm.dc;
2652 struct common_irq_params *c_irq_params;
2653 struct dc_interrupt_params int_params = {0};
2654 int r;
2655 int i;
2656 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2657
2658 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2659 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2660
2661 /*
2662 * Actions of amdgpu_irq_add_id():
2663 * 1. Register a set() function with base driver.
2664 * Base driver will call set() function to enable/disable an
2665 * interrupt in DC hardware.
2666 * 2. Register amdgpu_dm_irq_handler().
2667 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2668 * coming from DC hardware.
2669 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2670 * for acknowledging and handling. */
2671
2672 /* Use VBLANK interrupt */
2673 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2674 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2675 if (r) {
2676 DRM_ERROR("Failed to add crtc irq id!\n");
2677 return r;
2678 }
2679
2680 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2681 int_params.irq_source =
2682 dc_interrupt_to_irq_source(dc, i+1 , 0);
2683
2684 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2685
2686 c_irq_params->adev = adev;
2687 c_irq_params->irq_src = int_params.irq_source;
2688
2689 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2690 dm_crtc_high_irq, c_irq_params);
2691 }
2692
2693 /* Use GRPH_PFLIP interrupt */
2694 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2695 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2696 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2697 if (r) {
2698 DRM_ERROR("Failed to add page flip irq id!\n");
2699 return r;
2700 }
2701
2702 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2703 int_params.irq_source =
2704 dc_interrupt_to_irq_source(dc, i, 0);
2705
2706 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2707
2708 c_irq_params->adev = adev;
2709 c_irq_params->irq_src = int_params.irq_source;
2710
2711 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2712 dm_pflip_high_irq, c_irq_params);
2713
2714 }
2715
2716 /* HPD */
2717 r = amdgpu_irq_add_id(adev, client_id,
2718 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2719 if (r) {
2720 DRM_ERROR("Failed to add hpd irq id!\n");
2721 return r;
2722 }
2723
2724 register_hpd_handlers(adev);
2725
2726 return 0;
2727}
2728#endif
2729
4562236b
HW
2730/* Register IRQ sources and initialize IRQ callbacks */
2731static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2732{
2733 struct dc *dc = adev->dm.dc;
2734 struct common_irq_params *c_irq_params;
2735 struct dc_interrupt_params int_params = {0};
2736 int r;
2737 int i;
1ffdeca6 2738 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2739
84374725 2740 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2741 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2742
2743 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2744 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2745
1f6010a9
DF
2746 /*
2747 * Actions of amdgpu_irq_add_id():
4562236b
HW
2748 * 1. Register a set() function with base driver.
2749 * Base driver will call set() function to enable/disable an
2750 * interrupt in DC hardware.
2751 * 2. Register amdgpu_dm_irq_handler().
2752 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2753 * coming from DC hardware.
2754 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2755 * for acknowledging and handling. */
2756
b57de80a 2757 /* Use VBLANK interrupt */
e9029155 2758 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2759 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2760 if (r) {
2761 DRM_ERROR("Failed to add crtc irq id!\n");
2762 return r;
2763 }
2764
2765 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2766 int_params.irq_source =
3d761e79 2767 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2768
b57de80a 2769 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2770
2771 c_irq_params->adev = adev;
2772 c_irq_params->irq_src = int_params.irq_source;
2773
2774 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2775 dm_crtc_high_irq, c_irq_params);
2776 }
2777
d2574c33
MK
2778 /* Use VUPDATE interrupt */
2779 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2780 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2781 if (r) {
2782 DRM_ERROR("Failed to add vupdate irq id!\n");
2783 return r;
2784 }
2785
2786 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2787 int_params.irq_source =
2788 dc_interrupt_to_irq_source(dc, i, 0);
2789
2790 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2791
2792 c_irq_params->adev = adev;
2793 c_irq_params->irq_src = int_params.irq_source;
2794
2795 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2796 dm_vupdate_high_irq, c_irq_params);
2797 }
2798
3d761e79 2799 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2800 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2801 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2802 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2803 if (r) {
2804 DRM_ERROR("Failed to add page flip irq id!\n");
2805 return r;
2806 }
2807
2808 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2809 int_params.irq_source =
2810 dc_interrupt_to_irq_source(dc, i, 0);
2811
2812 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2813
2814 c_irq_params->adev = adev;
2815 c_irq_params->irq_src = int_params.irq_source;
2816
2817 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818 dm_pflip_high_irq, c_irq_params);
2819
2820 }
2821
2822 /* HPD */
2c8ad2d5
AD
2823 r = amdgpu_irq_add_id(adev, client_id,
2824 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2825 if (r) {
2826 DRM_ERROR("Failed to add hpd irq id!\n");
2827 return r;
2828 }
2829
2830 register_hpd_handlers(adev);
2831
2832 return 0;
2833}
2834
b86a1aa3 2835#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2836/* Register IRQ sources and initialize IRQ callbacks */
2837static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2838{
2839 struct dc *dc = adev->dm.dc;
2840 struct common_irq_params *c_irq_params;
2841 struct dc_interrupt_params int_params = {0};
2842 int r;
2843 int i;
2844
2845 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2846 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2847
1f6010a9
DF
2848 /*
2849 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2850 * 1. Register a set() function with base driver.
2851 * Base driver will call set() function to enable/disable an
2852 * interrupt in DC hardware.
2853 * 2. Register amdgpu_dm_irq_handler().
2854 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2855 * coming from DC hardware.
2856 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2857 * for acknowledging and handling.
1f6010a9 2858 */
ff5ef992
AD
2859
2860 /* Use VSTARTUP interrupt */
2861 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2862 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2863 i++) {
3760f76c 2864 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2865
2866 if (r) {
2867 DRM_ERROR("Failed to add crtc irq id!\n");
2868 return r;
2869 }
2870
2871 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2872 int_params.irq_source =
2873 dc_interrupt_to_irq_source(dc, i, 0);
2874
2875 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2876
2877 c_irq_params->adev = adev;
2878 c_irq_params->irq_src = int_params.irq_source;
2879
2346ef47
NK
2880 amdgpu_dm_irq_register_interrupt(
2881 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2882 }
2883
2884 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2885 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2886 * to trigger at end of each vblank, regardless of state of the lock,
2887 * matching DCE behaviour.
2888 */
2889 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2890 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2891 i++) {
2892 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2893
2894 if (r) {
2895 DRM_ERROR("Failed to add vupdate irq id!\n");
2896 return r;
2897 }
2898
2899 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2900 int_params.irq_source =
2901 dc_interrupt_to_irq_source(dc, i, 0);
2902
2903 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2904
2905 c_irq_params->adev = adev;
2906 c_irq_params->irq_src = int_params.irq_source;
2907
ff5ef992 2908 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2909 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2910 }
2911
ff5ef992
AD
2912 /* Use GRPH_PFLIP interrupt */
2913 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2914 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2915 i++) {
3760f76c 2916 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2917 if (r) {
2918 DRM_ERROR("Failed to add page flip irq id!\n");
2919 return r;
2920 }
2921
2922 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2923 int_params.irq_source =
2924 dc_interrupt_to_irq_source(dc, i, 0);
2925
2926 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2927
2928 c_irq_params->adev = adev;
2929 c_irq_params->irq_src = int_params.irq_source;
2930
2931 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2932 dm_pflip_high_irq, c_irq_params);
2933
2934 }
2935
2936 /* HPD */
3760f76c 2937 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2938 &adev->hpd_irq);
2939 if (r) {
2940 DRM_ERROR("Failed to add hpd irq id!\n");
2941 return r;
2942 }
2943
2944 register_hpd_handlers(adev);
2945
2946 return 0;
2947}
2948#endif
2949
eb3dc897
NK
2950/*
2951 * Acquires the lock for the atomic state object and returns
2952 * the new atomic state.
2953 *
2954 * This should only be called during atomic check.
2955 */
2956static int dm_atomic_get_state(struct drm_atomic_state *state,
2957 struct dm_atomic_state **dm_state)
2958{
2959 struct drm_device *dev = state->dev;
1348969a 2960 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2961 struct amdgpu_display_manager *dm = &adev->dm;
2962 struct drm_private_state *priv_state;
eb3dc897
NK
2963
2964 if (*dm_state)
2965 return 0;
2966
eb3dc897
NK
2967 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2968 if (IS_ERR(priv_state))
2969 return PTR_ERR(priv_state);
2970
2971 *dm_state = to_dm_atomic_state(priv_state);
2972
2973 return 0;
2974}
2975
dfd84d90 2976static struct dm_atomic_state *
eb3dc897
NK
2977dm_atomic_get_new_state(struct drm_atomic_state *state)
2978{
2979 struct drm_device *dev = state->dev;
1348969a 2980 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2981 struct amdgpu_display_manager *dm = &adev->dm;
2982 struct drm_private_obj *obj;
2983 struct drm_private_state *new_obj_state;
2984 int i;
2985
2986 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2987 if (obj->funcs == dm->atomic_obj.funcs)
2988 return to_dm_atomic_state(new_obj_state);
2989 }
2990
2991 return NULL;
2992}
2993
eb3dc897
NK
2994static struct drm_private_state *
2995dm_atomic_duplicate_state(struct drm_private_obj *obj)
2996{
2997 struct dm_atomic_state *old_state, *new_state;
2998
2999 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3000 if (!new_state)
3001 return NULL;
3002
3003 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3004
813d20dc
AW
3005 old_state = to_dm_atomic_state(obj->state);
3006
3007 if (old_state && old_state->context)
3008 new_state->context = dc_copy_state(old_state->context);
3009
eb3dc897
NK
3010 if (!new_state->context) {
3011 kfree(new_state);
3012 return NULL;
3013 }
3014
eb3dc897
NK
3015 return &new_state->base;
3016}
3017
3018static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3019 struct drm_private_state *state)
3020{
3021 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3022
3023 if (dm_state && dm_state->context)
3024 dc_release_state(dm_state->context);
3025
3026 kfree(dm_state);
3027}
3028
3029static struct drm_private_state_funcs dm_atomic_state_funcs = {
3030 .atomic_duplicate_state = dm_atomic_duplicate_state,
3031 .atomic_destroy_state = dm_atomic_destroy_state,
3032};
3033
4562236b
HW
3034static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3035{
eb3dc897 3036 struct dm_atomic_state *state;
4562236b
HW
3037 int r;
3038
3039 adev->mode_info.mode_config_initialized = true;
3040
4a580877
LT
3041 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3042 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3043
4a580877
LT
3044 adev_to_drm(adev)->mode_config.max_width = 16384;
3045 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3046
4a580877
LT
3047 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3048 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3049 /* indicates support for immediate flip */
4a580877 3050 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3051
4a580877 3052 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3053
eb3dc897
NK
3054 state = kzalloc(sizeof(*state), GFP_KERNEL);
3055 if (!state)
3056 return -ENOMEM;
3057
813d20dc 3058 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3059 if (!state->context) {
3060 kfree(state);
3061 return -ENOMEM;
3062 }
3063
3064 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3065
4a580877 3066 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3067 &adev->dm.atomic_obj,
eb3dc897
NK
3068 &state->base,
3069 &dm_atomic_state_funcs);
3070
3dc9b1ce 3071 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3072 if (r) {
3073 dc_release_state(state->context);
3074 kfree(state);
4562236b 3075 return r;
b67a468a 3076 }
4562236b 3077
6ce8f316 3078 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3079 if (r) {
3080 dc_release_state(state->context);
3081 kfree(state);
6ce8f316 3082 return r;
b67a468a 3083 }
6ce8f316 3084
4562236b
HW
3085 return 0;
3086}
3087
206bbafe
DF
3088#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3089#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3090#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3091
4562236b
HW
3092#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3093 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3094
206bbafe
DF
3095static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3096{
3097#if defined(CONFIG_ACPI)
3098 struct amdgpu_dm_backlight_caps caps;
3099
58965855
FS
3100 memset(&caps, 0, sizeof(caps));
3101
206bbafe
DF
3102 if (dm->backlight_caps.caps_valid)
3103 return;
3104
3105 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3106 if (caps.caps_valid) {
94562810
RS
3107 dm->backlight_caps.caps_valid = true;
3108 if (caps.aux_support)
3109 return;
206bbafe
DF
3110 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3111 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3112 } else {
3113 dm->backlight_caps.min_input_signal =
3114 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3115 dm->backlight_caps.max_input_signal =
3116 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3117 }
3118#else
94562810
RS
3119 if (dm->backlight_caps.aux_support)
3120 return;
3121
8bcbc9ef
DF
3122 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3123 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3124#endif
3125}
3126
94562810
RS
3127static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3128{
3129 bool rc;
3130
3131 if (!link)
3132 return 1;
3133
3134 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3135 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3136
3137 return rc ? 0 : 1;
3138}
3139
69d9f427
AM
3140static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3141 unsigned *min, unsigned *max)
94562810 3142{
94562810 3143 if (!caps)
69d9f427 3144 return 0;
94562810 3145
69d9f427
AM
3146 if (caps->aux_support) {
3147 // Firmware limits are in nits, DC API wants millinits.
3148 *max = 1000 * caps->aux_max_input_signal;
3149 *min = 1000 * caps->aux_min_input_signal;
94562810 3150 } else {
69d9f427
AM
3151 // Firmware limits are 8-bit, PWM control is 16-bit.
3152 *max = 0x101 * caps->max_input_signal;
3153 *min = 0x101 * caps->min_input_signal;
94562810 3154 }
69d9f427
AM
3155 return 1;
3156}
94562810 3157
69d9f427
AM
3158static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3159 uint32_t brightness)
3160{
3161 unsigned min, max;
94562810 3162
69d9f427
AM
3163 if (!get_brightness_range(caps, &min, &max))
3164 return brightness;
3165
3166 // Rescale 0..255 to min..max
3167 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3168 AMDGPU_MAX_BL_LEVEL);
3169}
3170
3171static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3172 uint32_t brightness)
3173{
3174 unsigned min, max;
3175
3176 if (!get_brightness_range(caps, &min, &max))
3177 return brightness;
3178
3179 if (brightness < min)
3180 return 0;
3181 // Rescale min..max to 0..255
3182 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3183 max - min);
94562810
RS
3184}
3185
4562236b
HW
3186static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3187{
3188 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3189 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3190 struct dc_link *link = NULL;
3191 u32 brightness;
3192 bool rc;
4562236b 3193
206bbafe
DF
3194 amdgpu_dm_update_backlight_caps(dm);
3195 caps = dm->backlight_caps;
94562810
RS
3196
3197 link = (struct dc_link *)dm->backlight_link;
3198
69d9f427 3199 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3200 // Change brightness based on AUX property
3201 if (caps.aux_support)
3202 return set_backlight_via_aux(link, brightness);
3203
3204 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3205
3206 return rc ? 0 : 1;
4562236b
HW
3207}
3208
3209static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3210{
620a0d27
DF
3211 struct amdgpu_display_manager *dm = bl_get_data(bd);
3212 int ret = dc_link_get_backlight_level(dm->backlight_link);
3213
3214 if (ret == DC_ERROR_UNEXPECTED)
3215 return bd->props.brightness;
69d9f427 3216 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3217}
3218
3219static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3220 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3221 .get_brightness = amdgpu_dm_backlight_get_brightness,
3222 .update_status = amdgpu_dm_backlight_update_status,
3223};
3224
7578ecda
AD
3225static void
3226amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3227{
3228 char bl_name[16];
3229 struct backlight_properties props = { 0 };
3230
206bbafe
DF
3231 amdgpu_dm_update_backlight_caps(dm);
3232
4562236b 3233 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3234 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3235 props.type = BACKLIGHT_RAW;
3236
3237 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3238 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3239
3240 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3241 adev_to_drm(dm->adev)->dev,
3242 dm,
3243 &amdgpu_dm_backlight_ops,
3244 &props);
4562236b 3245
74baea42 3246 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3247 DRM_ERROR("DM: Backlight registration failed!\n");
3248 else
f1ad2f5e 3249 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3250}
3251
3252#endif
3253
df534fff 3254static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3255 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3256 enum drm_plane_type plane_type,
3257 const struct dc_plane_cap *plane_cap)
df534fff 3258{
f180b4bc 3259 struct drm_plane *plane;
df534fff
S
3260 unsigned long possible_crtcs;
3261 int ret = 0;
3262
f180b4bc 3263 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3264 if (!plane) {
3265 DRM_ERROR("KMS: Failed to allocate plane\n");
3266 return -ENOMEM;
3267 }
b2fddb13 3268 plane->type = plane_type;
df534fff
S
3269
3270 /*
b2fddb13
NK
3271 * HACK: IGT tests expect that the primary plane for a CRTC
3272 * can only have one possible CRTC. Only expose support for
3273 * any CRTC if they're not going to be used as a primary plane
3274 * for a CRTC - like overlay or underlay planes.
df534fff
S
3275 */
3276 possible_crtcs = 1 << plane_id;
3277 if (plane_id >= dm->dc->caps.max_streams)
3278 possible_crtcs = 0xff;
3279
cc1fec57 3280 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3281
3282 if (ret) {
3283 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3284 kfree(plane);
df534fff
S
3285 return ret;
3286 }
3287
54087768
NK
3288 if (mode_info)
3289 mode_info->planes[plane_id] = plane;
3290
df534fff
S
3291 return ret;
3292}
3293
89fc8d4e
HW
3294
3295static void register_backlight_device(struct amdgpu_display_manager *dm,
3296 struct dc_link *link)
3297{
3298#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3299 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3300
3301 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3302 link->type != dc_connection_none) {
1f6010a9
DF
3303 /*
3304 * Event if registration failed, we should continue with
89fc8d4e
HW
3305 * DM initialization because not having a backlight control
3306 * is better then a black screen.
3307 */
3308 amdgpu_dm_register_backlight_device(dm);
3309
3310 if (dm->backlight_dev)
3311 dm->backlight_link = link;
3312 }
3313#endif
3314}
3315
3316
1f6010a9
DF
3317/*
3318 * In this architecture, the association
4562236b
HW
3319 * connector -> encoder -> crtc
3320 * id not really requried. The crtc and connector will hold the
3321 * display_index as an abstraction to use with DAL component
3322 *
3323 * Returns 0 on success
3324 */
7578ecda 3325static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3326{
3327 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3328 int32_t i;
c84dec2f 3329 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3330 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3331 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3332 uint32_t link_cnt;
cc1fec57 3333 int32_t primary_planes;
fbbdadf2 3334 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3335 const struct dc_plane_cap *plane;
4562236b
HW
3336
3337 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3338 if (amdgpu_dm_mode_config_init(dm->adev)) {
3339 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3340 return -EINVAL;
4562236b
HW
3341 }
3342
b2fddb13
NK
3343 /* There is one primary plane per CRTC */
3344 primary_planes = dm->dc->caps.max_streams;
54087768 3345 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3346
b2fddb13
NK
3347 /*
3348 * Initialize primary planes, implicit planes for legacy IOCTLS.
3349 * Order is reversed to match iteration order in atomic check.
3350 */
3351 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3352 plane = &dm->dc->caps.planes[i];
3353
b2fddb13 3354 if (initialize_plane(dm, mode_info, i,
cc1fec57 3355 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3356 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3357 goto fail;
d4e13b0d 3358 }
df534fff 3359 }
92f3ac40 3360
0d579c7e
NK
3361 /*
3362 * Initialize overlay planes, index starting after primary planes.
3363 * These planes have a higher DRM index than the primary planes since
3364 * they should be considered as having a higher z-order.
3365 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3366 *
3367 * Only support DCN for now, and only expose one so we don't encourage
3368 * userspace to use up all the pipes.
0d579c7e 3369 */
cc1fec57
NK
3370 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3371 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3372
3373 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3374 continue;
3375
3376 if (!plane->blends_with_above || !plane->blends_with_below)
3377 continue;
3378
ea36ad34 3379 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3380 continue;
3381
54087768 3382 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3383 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3384 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3385 goto fail;
d4e13b0d 3386 }
cc1fec57
NK
3387
3388 /* Only create one overlay plane. */
3389 break;
d4e13b0d 3390 }
4562236b 3391
d4e13b0d 3392 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3393 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3394 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3395 goto fail;
4562236b 3396 }
4562236b 3397
ab2541b6 3398 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3399
3400 /* loops over all connectors on the board */
3401 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3402 struct dc_link *link = NULL;
4562236b
HW
3403
3404 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3405 DRM_ERROR(
3406 "KMS: Cannot support more than %d display indexes\n",
3407 AMDGPU_DM_MAX_DISPLAY_INDEX);
3408 continue;
3409 }
3410
3411 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3412 if (!aconnector)
cd8a2ae8 3413 goto fail;
4562236b
HW
3414
3415 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3416 if (!aencoder)
cd8a2ae8 3417 goto fail;
4562236b
HW
3418
3419 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3420 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3421 goto fail;
4562236b
HW
3422 }
3423
3424 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3425 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3426 goto fail;
4562236b
HW
3427 }
3428
89fc8d4e
HW
3429 link = dc_get_link_at_index(dm->dc, i);
3430
fbbdadf2
BL
3431 if (!dc_link_detect_sink(link, &new_connection_type))
3432 DRM_ERROR("KMS: Failed to detect connector\n");
3433
3434 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3435 emulated_link_detect(link);
3436 amdgpu_dm_update_connector_after_detect(aconnector);
3437
3438 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3439 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3440 register_backlight_device(dm, link);
397a9bc5
RL
3441 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3442 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3443 }
3444
3445
4562236b
HW
3446 }
3447
3448 /* Software is initialized. Now we can register interrupt handlers. */
3449 switch (adev->asic_type) {
55e56389
MR
3450#if defined(CONFIG_DRM_AMD_DC_SI)
3451 case CHIP_TAHITI:
3452 case CHIP_PITCAIRN:
3453 case CHIP_VERDE:
3454 case CHIP_OLAND:
3455 if (dce60_register_irq_handlers(dm->adev)) {
3456 DRM_ERROR("DM: Failed to initialize IRQ\n");
3457 goto fail;
3458 }
3459 break;
3460#endif
4562236b
HW
3461 case CHIP_BONAIRE:
3462 case CHIP_HAWAII:
cd4b356f
AD
3463 case CHIP_KAVERI:
3464 case CHIP_KABINI:
3465 case CHIP_MULLINS:
4562236b
HW
3466 case CHIP_TONGA:
3467 case CHIP_FIJI:
3468 case CHIP_CARRIZO:
3469 case CHIP_STONEY:
3470 case CHIP_POLARIS11:
3471 case CHIP_POLARIS10:
b264d345 3472 case CHIP_POLARIS12:
7737de91 3473 case CHIP_VEGAM:
2c8ad2d5 3474 case CHIP_VEGA10:
2325ff30 3475 case CHIP_VEGA12:
1fe6bf2f 3476 case CHIP_VEGA20:
4562236b
HW
3477 if (dce110_register_irq_handlers(dm->adev)) {
3478 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3479 goto fail;
4562236b
HW
3480 }
3481 break;
b86a1aa3 3482#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3483 case CHIP_RAVEN:
fbd2afe5 3484 case CHIP_NAVI12:
476e955d 3485 case CHIP_NAVI10:
fce651e3 3486 case CHIP_NAVI14:
30221ad8 3487 case CHIP_RENOIR:
79037324 3488 case CHIP_SIENNA_CICHLID:
a6c5308f 3489 case CHIP_NAVY_FLOUNDER:
2a411205 3490 case CHIP_DIMGREY_CAVEFISH:
469989ca 3491 case CHIP_VANGOGH:
ff5ef992
AD
3492 if (dcn10_register_irq_handlers(dm->adev)) {
3493 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3494 goto fail;
ff5ef992
AD
3495 }
3496 break;
3497#endif
4562236b 3498 default:
e63f8673 3499 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3500 goto fail;
4562236b
HW
3501 }
3502
4562236b 3503 return 0;
cd8a2ae8 3504fail:
4562236b 3505 kfree(aencoder);
4562236b 3506 kfree(aconnector);
54087768 3507
59d0f396 3508 return -EINVAL;
4562236b
HW
3509}
3510
7578ecda 3511static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3512{
3513 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3514 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3515 return;
3516}
3517
3518/******************************************************************************
3519 * amdgpu_display_funcs functions
3520 *****************************************************************************/
3521
1f6010a9 3522/*
4562236b
HW
3523 * dm_bandwidth_update - program display watermarks
3524 *
3525 * @adev: amdgpu_device pointer
3526 *
3527 * Calculate and program the display watermarks and line buffer allocation.
3528 */
3529static void dm_bandwidth_update(struct amdgpu_device *adev)
3530{
49c07a99 3531 /* TODO: implement later */
4562236b
HW
3532}
3533
39cc5be2 3534static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3535 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3536 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3537 .backlight_set_level = NULL, /* never called for DC */
3538 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3539 .hpd_sense = NULL,/* called unconditionally */
3540 .hpd_set_polarity = NULL, /* called unconditionally */
3541 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3542 .page_flip_get_scanoutpos =
3543 dm_crtc_get_scanoutpos,/* called unconditionally */
3544 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3545 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3546};
3547
3548#if defined(CONFIG_DEBUG_KERNEL_DC)
3549
3ee6b26b
AD
3550static ssize_t s3_debug_store(struct device *device,
3551 struct device_attribute *attr,
3552 const char *buf,
3553 size_t count)
4562236b
HW
3554{
3555 int ret;
3556 int s3_state;
ef1de361 3557 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3558 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3559
3560 ret = kstrtoint(buf, 0, &s3_state);
3561
3562 if (ret == 0) {
3563 if (s3_state) {
3564 dm_resume(adev);
4a580877 3565 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3566 } else
3567 dm_suspend(adev);
3568 }
3569
3570 return ret == 0 ? count : 0;
3571}
3572
3573DEVICE_ATTR_WO(s3_debug);
3574
3575#endif
3576
3577static int dm_early_init(void *handle)
3578{
3579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3580
4562236b 3581 switch (adev->asic_type) {
55e56389
MR
3582#if defined(CONFIG_DRM_AMD_DC_SI)
3583 case CHIP_TAHITI:
3584 case CHIP_PITCAIRN:
3585 case CHIP_VERDE:
3586 adev->mode_info.num_crtc = 6;
3587 adev->mode_info.num_hpd = 6;
3588 adev->mode_info.num_dig = 6;
3589 break;
3590 case CHIP_OLAND:
3591 adev->mode_info.num_crtc = 2;
3592 adev->mode_info.num_hpd = 2;
3593 adev->mode_info.num_dig = 2;
3594 break;
3595#endif
4562236b
HW
3596 case CHIP_BONAIRE:
3597 case CHIP_HAWAII:
3598 adev->mode_info.num_crtc = 6;
3599 adev->mode_info.num_hpd = 6;
3600 adev->mode_info.num_dig = 6;
4562236b 3601 break;
cd4b356f
AD
3602 case CHIP_KAVERI:
3603 adev->mode_info.num_crtc = 4;
3604 adev->mode_info.num_hpd = 6;
3605 adev->mode_info.num_dig = 7;
cd4b356f
AD
3606 break;
3607 case CHIP_KABINI:
3608 case CHIP_MULLINS:
3609 adev->mode_info.num_crtc = 2;
3610 adev->mode_info.num_hpd = 6;
3611 adev->mode_info.num_dig = 6;
cd4b356f 3612 break;
4562236b
HW
3613 case CHIP_FIJI:
3614 case CHIP_TONGA:
3615 adev->mode_info.num_crtc = 6;
3616 adev->mode_info.num_hpd = 6;
3617 adev->mode_info.num_dig = 7;
4562236b
HW
3618 break;
3619 case CHIP_CARRIZO:
3620 adev->mode_info.num_crtc = 3;
3621 adev->mode_info.num_hpd = 6;
3622 adev->mode_info.num_dig = 9;
4562236b
HW
3623 break;
3624 case CHIP_STONEY:
3625 adev->mode_info.num_crtc = 2;
3626 adev->mode_info.num_hpd = 6;
3627 adev->mode_info.num_dig = 9;
4562236b
HW
3628 break;
3629 case CHIP_POLARIS11:
b264d345 3630 case CHIP_POLARIS12:
4562236b
HW
3631 adev->mode_info.num_crtc = 5;
3632 adev->mode_info.num_hpd = 5;
3633 adev->mode_info.num_dig = 5;
4562236b
HW
3634 break;
3635 case CHIP_POLARIS10:
7737de91 3636 case CHIP_VEGAM:
4562236b
HW
3637 adev->mode_info.num_crtc = 6;
3638 adev->mode_info.num_hpd = 6;
3639 adev->mode_info.num_dig = 6;
4562236b 3640 break;
2c8ad2d5 3641 case CHIP_VEGA10:
2325ff30 3642 case CHIP_VEGA12:
1fe6bf2f 3643 case CHIP_VEGA20:
2c8ad2d5
AD
3644 adev->mode_info.num_crtc = 6;
3645 adev->mode_info.num_hpd = 6;
3646 adev->mode_info.num_dig = 6;
3647 break;
b86a1aa3 3648#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3649 case CHIP_RAVEN:
20f2ffe5
AD
3650 case CHIP_RENOIR:
3651 case CHIP_VANGOGH:
ff5ef992
AD
3652 adev->mode_info.num_crtc = 4;
3653 adev->mode_info.num_hpd = 4;
3654 adev->mode_info.num_dig = 4;
ff5ef992 3655 break;
476e955d 3656 case CHIP_NAVI10:
fbd2afe5 3657 case CHIP_NAVI12:
79037324 3658 case CHIP_SIENNA_CICHLID:
a6c5308f 3659 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3660 adev->mode_info.num_crtc = 6;
3661 adev->mode_info.num_hpd = 6;
3662 adev->mode_info.num_dig = 6;
3663 break;
fce651e3 3664 case CHIP_NAVI14:
2a411205 3665 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3666 adev->mode_info.num_crtc = 5;
3667 adev->mode_info.num_hpd = 5;
3668 adev->mode_info.num_dig = 5;
3669 break;
20f2ffe5 3670#endif
4562236b 3671 default:
e63f8673 3672 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3673 return -EINVAL;
3674 }
3675
c8dd5715
MD
3676 amdgpu_dm_set_irq_funcs(adev);
3677
39cc5be2
AD
3678 if (adev->mode_info.funcs == NULL)
3679 adev->mode_info.funcs = &dm_display_funcs;
3680
1f6010a9
DF
3681 /*
3682 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3683 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3684 * amdgpu_device_init()
3685 */
4562236b
HW
3686#if defined(CONFIG_DEBUG_KERNEL_DC)
3687 device_create_file(
4a580877 3688 adev_to_drm(adev)->dev,
4562236b
HW
3689 &dev_attr_s3_debug);
3690#endif
3691
3692 return 0;
3693}
3694
9b690ef3 3695static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3696 struct dc_stream_state *new_stream,
3697 struct dc_stream_state *old_stream)
9b690ef3 3698{
2afda735 3699 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3700}
3701
3702static bool modereset_required(struct drm_crtc_state *crtc_state)
3703{
2afda735 3704 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3705}
3706
7578ecda 3707static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3708{
3709 drm_encoder_cleanup(encoder);
3710 kfree(encoder);
3711}
3712
3713static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3714 .destroy = amdgpu_dm_encoder_destroy,
3715};
3716
e7b07cee 3717
695af5f9
NK
3718static int fill_dc_scaling_info(const struct drm_plane_state *state,
3719 struct dc_scaling_info *scaling_info)
e7b07cee 3720{
6491f0c0 3721 int scale_w, scale_h;
e7b07cee 3722
695af5f9 3723 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3724
695af5f9
NK
3725 /* Source is fixed 16.16 but we ignore mantissa for now... */
3726 scaling_info->src_rect.x = state->src_x >> 16;
3727 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3728
695af5f9
NK
3729 scaling_info->src_rect.width = state->src_w >> 16;
3730 if (scaling_info->src_rect.width == 0)
3731 return -EINVAL;
3732
3733 scaling_info->src_rect.height = state->src_h >> 16;
3734 if (scaling_info->src_rect.height == 0)
3735 return -EINVAL;
3736
3737 scaling_info->dst_rect.x = state->crtc_x;
3738 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3739
3740 if (state->crtc_w == 0)
695af5f9 3741 return -EINVAL;
e7b07cee 3742
695af5f9 3743 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3744
3745 if (state->crtc_h == 0)
695af5f9 3746 return -EINVAL;
e7b07cee 3747
695af5f9 3748 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3749
695af5f9
NK
3750 /* DRM doesn't specify clipping on destination output. */
3751 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3752
6491f0c0
NK
3753 /* TODO: Validate scaling per-format with DC plane caps */
3754 scale_w = scaling_info->dst_rect.width * 1000 /
3755 scaling_info->src_rect.width;
e7b07cee 3756
6491f0c0
NK
3757 if (scale_w < 250 || scale_w > 16000)
3758 return -EINVAL;
3759
3760 scale_h = scaling_info->dst_rect.height * 1000 /
3761 scaling_info->src_rect.height;
3762
3763 if (scale_h < 250 || scale_h > 16000)
3764 return -EINVAL;
3765
695af5f9
NK
3766 /*
3767 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3768 * assume reasonable defaults based on the format.
3769 */
e7b07cee 3770
695af5f9 3771 return 0;
4562236b 3772}
695af5f9 3773
a3241991
BN
3774static void
3775fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3776 uint64_t tiling_flags)
e7b07cee 3777{
a3241991
BN
3778 /* Fill GFX8 params */
3779 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3780 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3781
a3241991
BN
3782 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3783 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3784 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3785 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3786 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3787
a3241991
BN
3788 /* XXX fix me for VI */
3789 tiling_info->gfx8.num_banks = num_banks;
3790 tiling_info->gfx8.array_mode =
3791 DC_ARRAY_2D_TILED_THIN1;
3792 tiling_info->gfx8.tile_split = tile_split;
3793 tiling_info->gfx8.bank_width = bankw;
3794 tiling_info->gfx8.bank_height = bankh;
3795 tiling_info->gfx8.tile_aspect = mtaspect;
3796 tiling_info->gfx8.tile_mode =
3797 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3798 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3799 == DC_ARRAY_1D_TILED_THIN1) {
3800 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3801 }
3802
a3241991
BN
3803 tiling_info->gfx8.pipe_config =
3804 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3805}
3806
a3241991
BN
3807static void
3808fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3809 union dc_tiling_info *tiling_info)
3810{
3811 tiling_info->gfx9.num_pipes =
3812 adev->gfx.config.gb_addr_config_fields.num_pipes;
3813 tiling_info->gfx9.num_banks =
3814 adev->gfx.config.gb_addr_config_fields.num_banks;
3815 tiling_info->gfx9.pipe_interleave =
3816 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3817 tiling_info->gfx9.num_shader_engines =
3818 adev->gfx.config.gb_addr_config_fields.num_se;
3819 tiling_info->gfx9.max_compressed_frags =
3820 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3821 tiling_info->gfx9.num_rb_per_se =
3822 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3823 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
3824 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3825 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3826 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3827 adev->asic_type == CHIP_VANGOGH)
3828 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
3829}
3830
695af5f9 3831static int
a3241991
BN
3832validate_dcc(struct amdgpu_device *adev,
3833 const enum surface_pixel_format format,
3834 const enum dc_rotation_angle rotation,
3835 const union dc_tiling_info *tiling_info,
3836 const struct dc_plane_dcc_param *dcc,
3837 const struct dc_plane_address *address,
3838 const struct plane_size *plane_size)
7df7e505
NK
3839{
3840 struct dc *dc = adev->dm.dc;
8daa1218
NC
3841 struct dc_dcc_surface_param input;
3842 struct dc_surface_dcc_cap output;
7df7e505 3843
8daa1218
NC
3844 memset(&input, 0, sizeof(input));
3845 memset(&output, 0, sizeof(output));
3846
a3241991 3847 if (!dcc->enable)
87b7ebc2
RS
3848 return 0;
3849
a3241991
BN
3850 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3851 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3852 return -EINVAL;
7df7e505 3853
695af5f9 3854 input.format = format;
12e2b2d4
DL
3855 input.surface_size.width = plane_size->surface_size.width;
3856 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3857 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3858
695af5f9 3859 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3860 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3861 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3862 input.scan = SCAN_DIRECTION_VERTICAL;
3863
3864 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3865 return -EINVAL;
7df7e505
NK
3866
3867 if (!output.capable)
09e5665a 3868 return -EINVAL;
7df7e505 3869
a3241991
BN
3870 if (dcc->independent_64b_blks == 0 &&
3871 output.grph.rgb.independent_64b_blks != 0)
09e5665a 3872 return -EINVAL;
7df7e505 3873
a3241991
BN
3874 return 0;
3875}
3876
37384b3f
BN
3877static bool
3878modifier_has_dcc(uint64_t modifier)
3879{
3880 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3881}
3882
3883static unsigned
3884modifier_gfx9_swizzle_mode(uint64_t modifier)
3885{
3886 if (modifier == DRM_FORMAT_MOD_LINEAR)
3887 return 0;
3888
3889 return AMD_FMT_MOD_GET(TILE, modifier);
3890}
3891
dfbbfe3c
BN
3892static const struct drm_format_info *
3893amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3894{
816853f9 3895 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
3896}
3897
37384b3f
BN
3898static void
3899fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3900 union dc_tiling_info *tiling_info,
3901 uint64_t modifier)
3902{
3903 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3904 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3905 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3906 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3907
3908 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3909
3910 if (!IS_AMD_FMT_MOD(modifier))
3911 return;
3912
3913 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3914 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3915
3916 if (adev->family >= AMDGPU_FAMILY_NV) {
3917 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3918 } else {
3919 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3920
3921 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3922 }
3923}
3924
faa37f54
BN
3925enum dm_micro_swizzle {
3926 MICRO_SWIZZLE_Z = 0,
3927 MICRO_SWIZZLE_S = 1,
3928 MICRO_SWIZZLE_D = 2,
3929 MICRO_SWIZZLE_R = 3
3930};
3931
3932static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3933 uint32_t format,
3934 uint64_t modifier)
3935{
3936 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3937 const struct drm_format_info *info = drm_format_info(format);
3938
3939 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3940
3941 if (!info)
3942 return false;
3943
3944 /*
3945 * We always have to allow this modifier, because core DRM still
3946 * checks LINEAR support if userspace does not provide modifers.
3947 */
3948 if (modifier == DRM_FORMAT_MOD_LINEAR)
3949 return true;
3950
3951 /*
3952 * The arbitrary tiling support for multiplane formats has not been hooked
3953 * up.
3954 */
3955 if (info->num_planes > 1)
3956 return false;
3957
3958 /*
3959 * For D swizzle the canonical modifier depends on the bpp, so check
3960 * it here.
3961 */
3962 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3963 adev->family >= AMDGPU_FAMILY_NV) {
3964 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3965 return false;
3966 }
3967
3968 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3969 info->cpp[0] < 8)
3970 return false;
3971
3972 if (modifier_has_dcc(modifier)) {
3973 /* Per radeonsi comments 16/64 bpp are more complicated. */
3974 if (info->cpp[0] != 4)
3975 return false;
3976 }
3977
3978 return true;
3979}
3980
3981static void
3982add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
3983{
3984 if (!*mods)
3985 return;
3986
3987 if (*cap - *size < 1) {
3988 uint64_t new_cap = *cap * 2;
3989 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
3990
3991 if (!new_mods) {
3992 kfree(*mods);
3993 *mods = NULL;
3994 return;
3995 }
3996
3997 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
3998 kfree(*mods);
3999 *mods = new_mods;
4000 *cap = new_cap;
4001 }
4002
4003 (*mods)[*size] = mod;
4004 *size += 1;
4005}
4006
4007static void
4008add_gfx9_modifiers(const struct amdgpu_device *adev,
4009 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4010{
4011 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4012 int pipe_xor_bits = min(8, pipes +
4013 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4014 int bank_xor_bits = min(8 - pipe_xor_bits,
4015 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4016 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4017 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4018
4019
4020 if (adev->family == AMDGPU_FAMILY_RV) {
4021 /* Raven2 and later */
4022 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4023
4024 /*
4025 * No _D DCC swizzles yet because we only allow 32bpp, which
4026 * doesn't support _D on DCN
4027 */
4028
4029 if (has_constant_encode) {
4030 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4031 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4032 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4033 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4034 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4035 AMD_FMT_MOD_SET(DCC, 1) |
4036 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4037 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4038 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4039 }
4040
4041 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4042 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4043 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4044 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4045 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4046 AMD_FMT_MOD_SET(DCC, 1) |
4047 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4048 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4049 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4050
4051 if (has_constant_encode) {
4052 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4053 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4054 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4055 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4056 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4057 AMD_FMT_MOD_SET(DCC, 1) |
4058 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4059 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4060 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4061
4062 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4063 AMD_FMT_MOD_SET(RB, rb) |
4064 AMD_FMT_MOD_SET(PIPE, pipes));
4065 }
4066
4067 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4068 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4069 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4070 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4071 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4072 AMD_FMT_MOD_SET(DCC, 1) |
4073 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4074 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4075 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4076 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4077 AMD_FMT_MOD_SET(RB, rb) |
4078 AMD_FMT_MOD_SET(PIPE, pipes));
4079 }
4080
4081 /*
4082 * Only supported for 64bpp on Raven, will be filtered on format in
4083 * dm_plane_format_mod_supported.
4084 */
4085 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4086 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4087 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4088 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4089 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4090
4091 if (adev->family == AMDGPU_FAMILY_RV) {
4092 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4093 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4094 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4095 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4096 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4097 }
4098
4099 /*
4100 * Only supported for 64bpp on Raven, will be filtered on format in
4101 * dm_plane_format_mod_supported.
4102 */
4103 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4104 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4105 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4106
4107 if (adev->family == AMDGPU_FAMILY_RV) {
4108 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4109 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4110 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4111 }
4112}
4113
4114static void
4115add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4116 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4117{
4118 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4119
4120 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4121 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4122 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4123 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4124 AMD_FMT_MOD_SET(DCC, 1) |
4125 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4126 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4127 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4128
4129 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4130 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4131 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4132 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4133 AMD_FMT_MOD_SET(DCC, 1) |
4134 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4135 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4136 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4137 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4138
4139 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4140 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4141 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4142 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4143
4144 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4145 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4146 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4147 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4148
4149
4150 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4151 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4152 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4153 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4154
4155 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4156 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4157 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4158}
4159
4160static void
4161add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4162 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4163{
4164 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4165 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4166
4167 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4168 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4169 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4170 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4171 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4172 AMD_FMT_MOD_SET(DCC, 1) |
4173 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4174 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4175 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4176 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4177
4178 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4179 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4180 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4181 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4182 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4183 AMD_FMT_MOD_SET(DCC, 1) |
4184 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4185 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4187 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4188 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4189
4190 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4191 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4192 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4193 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4194 AMD_FMT_MOD_SET(PACKERS, pkrs));
4195
4196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4199 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4200 AMD_FMT_MOD_SET(PACKERS, pkrs));
4201
4202 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4203 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4204 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4205 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4206
4207 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4208 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4209 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4210}
4211
4212static int
4213get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4214{
4215 uint64_t size = 0, capacity = 128;
4216 *mods = NULL;
4217
4218 /* We have not hooked up any pre-GFX9 modifiers. */
4219 if (adev->family < AMDGPU_FAMILY_AI)
4220 return 0;
4221
4222 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4223
4224 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4225 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4226 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4227 return *mods ? 0 : -ENOMEM;
4228 }
4229
4230 switch (adev->family) {
4231 case AMDGPU_FAMILY_AI:
4232 case AMDGPU_FAMILY_RV:
4233 add_gfx9_modifiers(adev, mods, &size, &capacity);
4234 break;
4235 case AMDGPU_FAMILY_NV:
4236 case AMDGPU_FAMILY_VGH:
4237 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4238 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4239 else
4240 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4241 break;
4242 }
4243
4244 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4245
4246 /* INVALID marks the end of the list. */
4247 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4248
4249 if (!*mods)
4250 return -ENOMEM;
4251
4252 return 0;
4253}
4254
37384b3f
BN
4255static int
4256fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4257 const struct amdgpu_framebuffer *afb,
4258 const enum surface_pixel_format format,
4259 const enum dc_rotation_angle rotation,
4260 const struct plane_size *plane_size,
4261 union dc_tiling_info *tiling_info,
4262 struct dc_plane_dcc_param *dcc,
4263 struct dc_plane_address *address,
4264 const bool force_disable_dcc)
4265{
4266 const uint64_t modifier = afb->base.modifier;
4267 int ret;
4268
4269 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4270 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4271
4272 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4273 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4274
4275 dcc->enable = 1;
4276 dcc->meta_pitch = afb->base.pitches[1];
4277 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4278
4279 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4280 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4281 }
4282
4283 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4284 if (ret)
4285 return ret;
7df7e505 4286
09e5665a
NK
4287 return 0;
4288}
4289
4290static int
320932bf 4291fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4292 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4293 const enum surface_pixel_format format,
4294 const enum dc_rotation_angle rotation,
4295 const uint64_t tiling_flags,
09e5665a 4296 union dc_tiling_info *tiling_info,
12e2b2d4 4297 struct plane_size *plane_size,
09e5665a 4298 struct dc_plane_dcc_param *dcc,
87b7ebc2 4299 struct dc_plane_address *address,
5888f07a 4300 bool tmz_surface,
87b7ebc2 4301 bool force_disable_dcc)
09e5665a 4302{
320932bf 4303 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4304 int ret;
4305
4306 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4307 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4308 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4309 memset(address, 0, sizeof(*address));
4310
5888f07a
HW
4311 address->tmz_surface = tmz_surface;
4312
695af5f9 4313 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4314 uint64_t addr = afb->address + fb->offsets[0];
4315
12e2b2d4
DL
4316 plane_size->surface_size.x = 0;
4317 plane_size->surface_size.y = 0;
4318 plane_size->surface_size.width = fb->width;
4319 plane_size->surface_size.height = fb->height;
4320 plane_size->surface_pitch =
320932bf
NK
4321 fb->pitches[0] / fb->format->cpp[0];
4322
e0634e8d 4323 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4324 address->grph.addr.low_part = lower_32_bits(addr);
4325 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4326 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4327 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4328 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4329
12e2b2d4
DL
4330 plane_size->surface_size.x = 0;
4331 plane_size->surface_size.y = 0;
4332 plane_size->surface_size.width = fb->width;
4333 plane_size->surface_size.height = fb->height;
4334 plane_size->surface_pitch =
320932bf
NK
4335 fb->pitches[0] / fb->format->cpp[0];
4336
12e2b2d4
DL
4337 plane_size->chroma_size.x = 0;
4338 plane_size->chroma_size.y = 0;
320932bf 4339 /* TODO: set these based on surface format */
12e2b2d4
DL
4340 plane_size->chroma_size.width = fb->width / 2;
4341 plane_size->chroma_size.height = fb->height / 2;
320932bf 4342
12e2b2d4 4343 plane_size->chroma_pitch =
320932bf
NK
4344 fb->pitches[1] / fb->format->cpp[1];
4345
e0634e8d
NK
4346 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4347 address->video_progressive.luma_addr.low_part =
be7b9b32 4348 lower_32_bits(luma_addr);
e0634e8d 4349 address->video_progressive.luma_addr.high_part =
be7b9b32 4350 upper_32_bits(luma_addr);
e0634e8d
NK
4351 address->video_progressive.chroma_addr.low_part =
4352 lower_32_bits(chroma_addr);
4353 address->video_progressive.chroma_addr.high_part =
4354 upper_32_bits(chroma_addr);
4355 }
09e5665a 4356
a3241991 4357 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4358 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4359 rotation, plane_size,
4360 tiling_info, dcc,
4361 address,
4362 force_disable_dcc);
09e5665a
NK
4363 if (ret)
4364 return ret;
a3241991
BN
4365 } else {
4366 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4367 }
4368
4369 return 0;
7df7e505
NK
4370}
4371
d74004b6 4372static void
695af5f9 4373fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4374 bool *per_pixel_alpha, bool *global_alpha,
4375 int *global_alpha_value)
4376{
4377 *per_pixel_alpha = false;
4378 *global_alpha = false;
4379 *global_alpha_value = 0xff;
4380
4381 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4382 return;
4383
4384 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4385 static const uint32_t alpha_formats[] = {
4386 DRM_FORMAT_ARGB8888,
4387 DRM_FORMAT_RGBA8888,
4388 DRM_FORMAT_ABGR8888,
4389 };
4390 uint32_t format = plane_state->fb->format->format;
4391 unsigned int i;
4392
4393 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4394 if (format == alpha_formats[i]) {
4395 *per_pixel_alpha = true;
4396 break;
4397 }
4398 }
4399 }
4400
4401 if (plane_state->alpha < 0xffff) {
4402 *global_alpha = true;
4403 *global_alpha_value = plane_state->alpha >> 8;
4404 }
4405}
4406
004fefa3
NK
4407static int
4408fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4409 const enum surface_pixel_format format,
004fefa3
NK
4410 enum dc_color_space *color_space)
4411{
4412 bool full_range;
4413
4414 *color_space = COLOR_SPACE_SRGB;
4415
4416 /* DRM color properties only affect non-RGB formats. */
695af5f9 4417 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4418 return 0;
4419
4420 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4421
4422 switch (plane_state->color_encoding) {
4423 case DRM_COLOR_YCBCR_BT601:
4424 if (full_range)
4425 *color_space = COLOR_SPACE_YCBCR601;
4426 else
4427 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4428 break;
4429
4430 case DRM_COLOR_YCBCR_BT709:
4431 if (full_range)
4432 *color_space = COLOR_SPACE_YCBCR709;
4433 else
4434 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4435 break;
4436
4437 case DRM_COLOR_YCBCR_BT2020:
4438 if (full_range)
4439 *color_space = COLOR_SPACE_2020_YCBCR;
4440 else
4441 return -EINVAL;
4442 break;
4443
4444 default:
4445 return -EINVAL;
4446 }
4447
4448 return 0;
4449}
4450
695af5f9
NK
4451static int
4452fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4453 const struct drm_plane_state *plane_state,
4454 const uint64_t tiling_flags,
4455 struct dc_plane_info *plane_info,
87b7ebc2 4456 struct dc_plane_address *address,
5888f07a 4457 bool tmz_surface,
87b7ebc2 4458 bool force_disable_dcc)
695af5f9
NK
4459{
4460 const struct drm_framebuffer *fb = plane_state->fb;
4461 const struct amdgpu_framebuffer *afb =
4462 to_amdgpu_framebuffer(plane_state->fb);
4463 struct drm_format_name_buf format_name;
4464 int ret;
4465
4466 memset(plane_info, 0, sizeof(*plane_info));
4467
4468 switch (fb->format->format) {
4469 case DRM_FORMAT_C8:
4470 plane_info->format =
4471 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4472 break;
4473 case DRM_FORMAT_RGB565:
4474 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4475 break;
4476 case DRM_FORMAT_XRGB8888:
4477 case DRM_FORMAT_ARGB8888:
4478 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4479 break;
4480 case DRM_FORMAT_XRGB2101010:
4481 case DRM_FORMAT_ARGB2101010:
4482 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4483 break;
4484 case DRM_FORMAT_XBGR2101010:
4485 case DRM_FORMAT_ABGR2101010:
4486 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4487 break;
4488 case DRM_FORMAT_XBGR8888:
4489 case DRM_FORMAT_ABGR8888:
4490 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4491 break;
4492 case DRM_FORMAT_NV21:
4493 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4494 break;
4495 case DRM_FORMAT_NV12:
4496 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4497 break;
cbec6477
SW
4498 case DRM_FORMAT_P010:
4499 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4500 break;
492548dc
SW
4501 case DRM_FORMAT_XRGB16161616F:
4502 case DRM_FORMAT_ARGB16161616F:
4503 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4504 break;
2a5195dc
MK
4505 case DRM_FORMAT_XBGR16161616F:
4506 case DRM_FORMAT_ABGR16161616F:
4507 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4508 break;
695af5f9
NK
4509 default:
4510 DRM_ERROR(
4511 "Unsupported screen format %s\n",
4512 drm_get_format_name(fb->format->format, &format_name));
4513 return -EINVAL;
4514 }
4515
4516 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4517 case DRM_MODE_ROTATE_0:
4518 plane_info->rotation = ROTATION_ANGLE_0;
4519 break;
4520 case DRM_MODE_ROTATE_90:
4521 plane_info->rotation = ROTATION_ANGLE_90;
4522 break;
4523 case DRM_MODE_ROTATE_180:
4524 plane_info->rotation = ROTATION_ANGLE_180;
4525 break;
4526 case DRM_MODE_ROTATE_270:
4527 plane_info->rotation = ROTATION_ANGLE_270;
4528 break;
4529 default:
4530 plane_info->rotation = ROTATION_ANGLE_0;
4531 break;
4532 }
4533
4534 plane_info->visible = true;
4535 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4536
6d83a32d
MS
4537 plane_info->layer_index = 0;
4538
695af5f9
NK
4539 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4540 &plane_info->color_space);
4541 if (ret)
4542 return ret;
4543
4544 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4545 plane_info->rotation, tiling_flags,
4546 &plane_info->tiling_info,
4547 &plane_info->plane_size,
5888f07a 4548 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4549 force_disable_dcc);
695af5f9
NK
4550 if (ret)
4551 return ret;
4552
4553 fill_blending_from_plane_state(
4554 plane_state, &plane_info->per_pixel_alpha,
4555 &plane_info->global_alpha, &plane_info->global_alpha_value);
4556
4557 return 0;
4558}
4559
4560static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4561 struct dc_plane_state *dc_plane_state,
4562 struct drm_plane_state *plane_state,
4563 struct drm_crtc_state *crtc_state)
e7b07cee 4564{
cf020d49 4565 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4566 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4567 struct dc_scaling_info scaling_info;
4568 struct dc_plane_info plane_info;
695af5f9 4569 int ret;
87b7ebc2 4570 bool force_disable_dcc = false;
e7b07cee 4571
695af5f9
NK
4572 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4573 if (ret)
4574 return ret;
e7b07cee 4575
695af5f9
NK
4576 dc_plane_state->src_rect = scaling_info.src_rect;
4577 dc_plane_state->dst_rect = scaling_info.dst_rect;
4578 dc_plane_state->clip_rect = scaling_info.clip_rect;
4579 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4580
87b7ebc2 4581 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4582 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4583 afb->tiling_flags,
695af5f9 4584 &plane_info,
87b7ebc2 4585 &dc_plane_state->address,
6eed95b0 4586 afb->tmz_surface,
87b7ebc2 4587 force_disable_dcc);
004fefa3
NK
4588 if (ret)
4589 return ret;
4590
695af5f9
NK
4591 dc_plane_state->format = plane_info.format;
4592 dc_plane_state->color_space = plane_info.color_space;
4593 dc_plane_state->format = plane_info.format;
4594 dc_plane_state->plane_size = plane_info.plane_size;
4595 dc_plane_state->rotation = plane_info.rotation;
4596 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4597 dc_plane_state->stereo_format = plane_info.stereo_format;
4598 dc_plane_state->tiling_info = plane_info.tiling_info;
4599 dc_plane_state->visible = plane_info.visible;
4600 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4601 dc_plane_state->global_alpha = plane_info.global_alpha;
4602 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4603 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4604 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4605
e277adc5
LSL
4606 /*
4607 * Always set input transfer function, since plane state is refreshed
4608 * every time.
4609 */
cf020d49
NK
4610 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4611 if (ret)
4612 return ret;
e7b07cee 4613
cf020d49 4614 return 0;
e7b07cee
HW
4615}
4616
3ee6b26b
AD
4617static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4618 const struct dm_connector_state *dm_state,
4619 struct dc_stream_state *stream)
e7b07cee
HW
4620{
4621 enum amdgpu_rmx_type rmx_type;
4622
4623 struct rect src = { 0 }; /* viewport in composition space*/
4624 struct rect dst = { 0 }; /* stream addressable area */
4625
4626 /* no mode. nothing to be done */
4627 if (!mode)
4628 return;
4629
4630 /* Full screen scaling by default */
4631 src.width = mode->hdisplay;
4632 src.height = mode->vdisplay;
4633 dst.width = stream->timing.h_addressable;
4634 dst.height = stream->timing.v_addressable;
4635
f4791779
HW
4636 if (dm_state) {
4637 rmx_type = dm_state->scaling;
4638 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4639 if (src.width * dst.height <
4640 src.height * dst.width) {
4641 /* height needs less upscaling/more downscaling */
4642 dst.width = src.width *
4643 dst.height / src.height;
4644 } else {
4645 /* width needs less upscaling/more downscaling */
4646 dst.height = src.height *
4647 dst.width / src.width;
4648 }
4649 } else if (rmx_type == RMX_CENTER) {
4650 dst = src;
e7b07cee 4651 }
e7b07cee 4652
f4791779
HW
4653 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4654 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4655
f4791779
HW
4656 if (dm_state->underscan_enable) {
4657 dst.x += dm_state->underscan_hborder / 2;
4658 dst.y += dm_state->underscan_vborder / 2;
4659 dst.width -= dm_state->underscan_hborder;
4660 dst.height -= dm_state->underscan_vborder;
4661 }
e7b07cee
HW
4662 }
4663
4664 stream->src = src;
4665 stream->dst = dst;
4666
f1ad2f5e 4667 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4668 dst.x, dst.y, dst.width, dst.height);
4669
4670}
4671
3ee6b26b 4672static enum dc_color_depth
42ba01fc 4673convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4674 bool is_y420, int requested_bpc)
e7b07cee 4675{
1bc22f20 4676 uint8_t bpc;
01c22997 4677
1bc22f20
SW
4678 if (is_y420) {
4679 bpc = 8;
4680
4681 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4682 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4683 bpc = 16;
4684 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4685 bpc = 12;
4686 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4687 bpc = 10;
4688 } else {
4689 bpc = (uint8_t)connector->display_info.bpc;
4690 /* Assume 8 bpc by default if no bpc is specified. */
4691 bpc = bpc ? bpc : 8;
4692 }
e7b07cee 4693
cbd14ae7 4694 if (requested_bpc > 0) {
01c22997
NK
4695 /*
4696 * Cap display bpc based on the user requested value.
4697 *
4698 * The value for state->max_bpc may not correctly updated
4699 * depending on when the connector gets added to the state
4700 * or if this was called outside of atomic check, so it
4701 * can't be used directly.
4702 */
cbd14ae7 4703 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4704
1825fd34
NK
4705 /* Round down to the nearest even number. */
4706 bpc = bpc - (bpc & 1);
4707 }
07e3a1cf 4708
e7b07cee
HW
4709 switch (bpc) {
4710 case 0:
1f6010a9
DF
4711 /*
4712 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4713 * EDID revision before 1.4
4714 * TODO: Fix edid parsing
4715 */
4716 return COLOR_DEPTH_888;
4717 case 6:
4718 return COLOR_DEPTH_666;
4719 case 8:
4720 return COLOR_DEPTH_888;
4721 case 10:
4722 return COLOR_DEPTH_101010;
4723 case 12:
4724 return COLOR_DEPTH_121212;
4725 case 14:
4726 return COLOR_DEPTH_141414;
4727 case 16:
4728 return COLOR_DEPTH_161616;
4729 default:
4730 return COLOR_DEPTH_UNDEFINED;
4731 }
4732}
4733
3ee6b26b
AD
4734static enum dc_aspect_ratio
4735get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4736{
e11d4147
LSL
4737 /* 1-1 mapping, since both enums follow the HDMI spec. */
4738 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4739}
4740
3ee6b26b
AD
4741static enum dc_color_space
4742get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4743{
4744 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4745
4746 switch (dc_crtc_timing->pixel_encoding) {
4747 case PIXEL_ENCODING_YCBCR422:
4748 case PIXEL_ENCODING_YCBCR444:
4749 case PIXEL_ENCODING_YCBCR420:
4750 {
4751 /*
4752 * 27030khz is the separation point between HDTV and SDTV
4753 * according to HDMI spec, we use YCbCr709 and YCbCr601
4754 * respectively
4755 */
380604e2 4756 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4757 if (dc_crtc_timing->flags.Y_ONLY)
4758 color_space =
4759 COLOR_SPACE_YCBCR709_LIMITED;
4760 else
4761 color_space = COLOR_SPACE_YCBCR709;
4762 } else {
4763 if (dc_crtc_timing->flags.Y_ONLY)
4764 color_space =
4765 COLOR_SPACE_YCBCR601_LIMITED;
4766 else
4767 color_space = COLOR_SPACE_YCBCR601;
4768 }
4769
4770 }
4771 break;
4772 case PIXEL_ENCODING_RGB:
4773 color_space = COLOR_SPACE_SRGB;
4774 break;
4775
4776 default:
4777 WARN_ON(1);
4778 break;
4779 }
4780
4781 return color_space;
4782}
4783
ea117312
TA
4784static bool adjust_colour_depth_from_display_info(
4785 struct dc_crtc_timing *timing_out,
4786 const struct drm_display_info *info)
400443e8 4787{
ea117312 4788 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4789 int normalized_clk;
400443e8 4790 do {
380604e2 4791 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4792 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4793 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4794 normalized_clk /= 2;
4795 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4796 switch (depth) {
4797 case COLOR_DEPTH_888:
4798 break;
400443e8
ML
4799 case COLOR_DEPTH_101010:
4800 normalized_clk = (normalized_clk * 30) / 24;
4801 break;
4802 case COLOR_DEPTH_121212:
4803 normalized_clk = (normalized_clk * 36) / 24;
4804 break;
4805 case COLOR_DEPTH_161616:
4806 normalized_clk = (normalized_clk * 48) / 24;
4807 break;
4808 default:
ea117312
TA
4809 /* The above depths are the only ones valid for HDMI. */
4810 return false;
400443e8 4811 }
ea117312
TA
4812 if (normalized_clk <= info->max_tmds_clock) {
4813 timing_out->display_color_depth = depth;
4814 return true;
4815 }
4816 } while (--depth > COLOR_DEPTH_666);
4817 return false;
400443e8 4818}
e7b07cee 4819
42ba01fc
NK
4820static void fill_stream_properties_from_drm_display_mode(
4821 struct dc_stream_state *stream,
4822 const struct drm_display_mode *mode_in,
4823 const struct drm_connector *connector,
4824 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4825 const struct dc_stream_state *old_stream,
4826 int requested_bpc)
e7b07cee
HW
4827{
4828 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4829 const struct drm_display_info *info = &connector->display_info;
d4252eee 4830 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4831 struct hdmi_vendor_infoframe hv_frame;
4832 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4833
acf83f86
WL
4834 memset(&hv_frame, 0, sizeof(hv_frame));
4835 memset(&avi_frame, 0, sizeof(avi_frame));
4836
e7b07cee
HW
4837 timing_out->h_border_left = 0;
4838 timing_out->h_border_right = 0;
4839 timing_out->v_border_top = 0;
4840 timing_out->v_border_bottom = 0;
4841 /* TODO: un-hardcode */
fe61a2f1 4842 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4843 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4844 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4845 else if (drm_mode_is_420_also(info, mode_in)
4846 && aconnector->force_yuv420_output)
4847 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4848 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4849 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4850 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4851 else
4852 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4853
4854 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4855 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4856 connector,
4857 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4858 requested_bpc);
e7b07cee
HW
4859 timing_out->scan_type = SCANNING_TYPE_NODATA;
4860 timing_out->hdmi_vic = 0;
b333730d
BL
4861
4862 if(old_stream) {
4863 timing_out->vic = old_stream->timing.vic;
4864 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4865 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4866 } else {
4867 timing_out->vic = drm_match_cea_mode(mode_in);
4868 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4869 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4870 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4871 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4872 }
e7b07cee 4873
1cb1d477
WL
4874 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4875 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4876 timing_out->vic = avi_frame.video_code;
4877 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4878 timing_out->hdmi_vic = hv_frame.vic;
4879 }
4880
e7b07cee
HW
4881 timing_out->h_addressable = mode_in->crtc_hdisplay;
4882 timing_out->h_total = mode_in->crtc_htotal;
4883 timing_out->h_sync_width =
4884 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4885 timing_out->h_front_porch =
4886 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4887 timing_out->v_total = mode_in->crtc_vtotal;
4888 timing_out->v_addressable = mode_in->crtc_vdisplay;
4889 timing_out->v_front_porch =
4890 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4891 timing_out->v_sync_width =
4892 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4893 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4894 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4895
4896 stream->output_color_space = get_output_color_space(timing_out);
4897
e43a432c
AK
4898 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4899 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4900 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4901 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4902 drm_mode_is_420_also(info, mode_in) &&
4903 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4904 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905 adjust_colour_depth_from_display_info(timing_out, info);
4906 }
4907 }
e7b07cee
HW
4908}
4909
3ee6b26b
AD
4910static void fill_audio_info(struct audio_info *audio_info,
4911 const struct drm_connector *drm_connector,
4912 const struct dc_sink *dc_sink)
e7b07cee
HW
4913{
4914 int i = 0;
4915 int cea_revision = 0;
4916 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4917
4918 audio_info->manufacture_id = edid_caps->manufacturer_id;
4919 audio_info->product_id = edid_caps->product_id;
4920
4921 cea_revision = drm_connector->display_info.cea_rev;
4922
090afc1e 4923 strscpy(audio_info->display_name,
d2b2562c 4924 edid_caps->display_name,
090afc1e 4925 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4926
b830ebc9 4927 if (cea_revision >= 3) {
e7b07cee
HW
4928 audio_info->mode_count = edid_caps->audio_mode_count;
4929
4930 for (i = 0; i < audio_info->mode_count; ++i) {
4931 audio_info->modes[i].format_code =
4932 (enum audio_format_code)
4933 (edid_caps->audio_modes[i].format_code);
4934 audio_info->modes[i].channel_count =
4935 edid_caps->audio_modes[i].channel_count;
4936 audio_info->modes[i].sample_rates.all =
4937 edid_caps->audio_modes[i].sample_rate;
4938 audio_info->modes[i].sample_size =
4939 edid_caps->audio_modes[i].sample_size;
4940 }
4941 }
4942
4943 audio_info->flags.all = edid_caps->speaker_flags;
4944
4945 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4946 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4947 audio_info->video_latency = drm_connector->video_latency[0];
4948 audio_info->audio_latency = drm_connector->audio_latency[0];
4949 }
4950
4951 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4952
4953}
4954
3ee6b26b
AD
4955static void
4956copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4957 struct drm_display_mode *dst_mode)
e7b07cee
HW
4958{
4959 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4960 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4961 dst_mode->crtc_clock = src_mode->crtc_clock;
4962 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4963 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4964 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4965 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4966 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4967 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4968 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4969 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4970 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4971 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4972 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4973}
4974
3ee6b26b
AD
4975static void
4976decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4977 const struct drm_display_mode *native_mode,
4978 bool scale_enabled)
e7b07cee
HW
4979{
4980 if (scale_enabled) {
4981 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4982 } else if (native_mode->clock == drm_mode->clock &&
4983 native_mode->htotal == drm_mode->htotal &&
4984 native_mode->vtotal == drm_mode->vtotal) {
4985 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4986 } else {
4987 /* no scaling nor amdgpu inserted, no need to patch */
4988 }
4989}
4990
aed15309
ML
4991static struct dc_sink *
4992create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4993{
2e0ac3d6 4994 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4995 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4996 sink_init_data.link = aconnector->dc_link;
4997 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4998
4999 sink = dc_sink_create(&sink_init_data);
423788c7 5000 if (!sink) {
2e0ac3d6 5001 DRM_ERROR("Failed to create sink!\n");
aed15309 5002 return NULL;
423788c7 5003 }
2e0ac3d6 5004 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5005
aed15309 5006 return sink;
2e0ac3d6
HW
5007}
5008
fa2123db
ML
5009static void set_multisync_trigger_params(
5010 struct dc_stream_state *stream)
5011{
5012 if (stream->triggered_crtc_reset.enabled) {
5013 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5014 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5015 }
5016}
5017
5018static void set_master_stream(struct dc_stream_state *stream_set[],
5019 int stream_count)
5020{
5021 int j, highest_rfr = 0, master_stream = 0;
5022
5023 for (j = 0; j < stream_count; j++) {
5024 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5025 int refresh_rate = 0;
5026
380604e2 5027 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5028 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5029 if (refresh_rate > highest_rfr) {
5030 highest_rfr = refresh_rate;
5031 master_stream = j;
5032 }
5033 }
5034 }
5035 for (j = 0; j < stream_count; j++) {
03736f4c 5036 if (stream_set[j])
fa2123db
ML
5037 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5038 }
5039}
5040
5041static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5042{
5043 int i = 0;
5044
5045 if (context->stream_count < 2)
5046 return;
5047 for (i = 0; i < context->stream_count ; i++) {
5048 if (!context->streams[i])
5049 continue;
1f6010a9
DF
5050 /*
5051 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5052 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5053 * For now it's set to false
fa2123db
ML
5054 */
5055 set_multisync_trigger_params(context->streams[i]);
5056 }
5057 set_master_stream(context->streams, context->stream_count);
5058}
5059
3ee6b26b
AD
5060static struct dc_stream_state *
5061create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5062 const struct drm_display_mode *drm_mode,
b333730d 5063 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5064 const struct dc_stream_state *old_stream,
5065 int requested_bpc)
e7b07cee
HW
5066{
5067 struct drm_display_mode *preferred_mode = NULL;
391ef035 5068 struct drm_connector *drm_connector;
42ba01fc
NK
5069 const struct drm_connector_state *con_state =
5070 dm_state ? &dm_state->base : NULL;
0971c40e 5071 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5072 struct drm_display_mode mode = *drm_mode;
5073 bool native_mode_found = false;
b333730d
BL
5074 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5075 int mode_refresh;
58124bf8 5076 int preferred_refresh = 0;
defeb878 5077#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 5078 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 5079#endif
df2f1015 5080 uint32_t link_bandwidth_kbps;
b333730d 5081
aed15309 5082 struct dc_sink *sink = NULL;
b830ebc9 5083 if (aconnector == NULL) {
e7b07cee 5084 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5085 return stream;
e7b07cee
HW
5086 }
5087
e7b07cee 5088 drm_connector = &aconnector->base;
2e0ac3d6 5089
f4ac176e 5090 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5091 sink = create_fake_sink(aconnector);
5092 if (!sink)
5093 return stream;
aed15309
ML
5094 } else {
5095 sink = aconnector->dc_sink;
dcd5fb82 5096 dc_sink_retain(sink);
f4ac176e 5097 }
2e0ac3d6 5098
aed15309 5099 stream = dc_create_stream_for_sink(sink);
4562236b 5100
b830ebc9 5101 if (stream == NULL) {
e7b07cee 5102 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5103 goto finish;
e7b07cee
HW
5104 }
5105
ceb3dbb4
JL
5106 stream->dm_stream_context = aconnector;
5107
4a36fcba
WL
5108 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5109 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5110
e7b07cee
HW
5111 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5112 /* Search for preferred mode */
5113 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5114 native_mode_found = true;
5115 break;
5116 }
5117 }
5118 if (!native_mode_found)
5119 preferred_mode = list_first_entry_or_null(
5120 &aconnector->base.modes,
5121 struct drm_display_mode,
5122 head);
5123
b333730d
BL
5124 mode_refresh = drm_mode_vrefresh(&mode);
5125
b830ebc9 5126 if (preferred_mode == NULL) {
1f6010a9
DF
5127 /*
5128 * This may not be an error, the use case is when we have no
e7b07cee
HW
5129 * usermode calls to reset and set mode upon hotplug. In this
5130 * case, we call set mode ourselves to restore the previous mode
5131 * and the modelist may not be filled in in time.
5132 */
f1ad2f5e 5133 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
5134 } else {
5135 decide_crtc_timing_for_drm_display_mode(
5136 &mode, preferred_mode,
f4791779 5137 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5138 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5139 }
5140
f783577c
JFZ
5141 if (!dm_state)
5142 drm_mode_set_crtcinfo(&mode, 0);
5143
b333730d
BL
5144 /*
5145 * If scaling is enabled and refresh rate didn't change
5146 * we copy the vic and polarities of the old timings
5147 */
5148 if (!scale || mode_refresh != preferred_refresh)
5149 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5150 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
5151 else
5152 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5153 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5154
df2f1015
DF
5155 stream->timing.flags.DSC = 0;
5156
5157 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5158#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5159 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5160 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5161 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015 5162 &dsc_caps);
defeb878 5163#endif
df2f1015
DF
5164 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5165 dc_link_get_link_cap(aconnector->dc_link));
5166
defeb878 5167#if defined(CONFIG_DRM_AMD_DC_DCN)
0749ddeb 5168 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5169 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5170 dc_dsc_policy_set_enable_dsc_when_not_needed(
5171 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5172
0417df16 5173 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5174 &dsc_caps,
0417df16 5175 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5176 0,
df2f1015
DF
5177 link_bandwidth_kbps,
5178 &stream->timing,
5179 &stream->timing.dsc_cfg))
5180 stream->timing.flags.DSC = 1;
27e84dd7 5181 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5182 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5183 stream->timing.flags.DSC = 1;
734e4c97 5184
28b2f656
EB
5185 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5186 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5187
28b2f656
EB
5188 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5189 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5190
5191 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5192 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5193 }
39a4eb85 5194#endif
df2f1015 5195 }
39a4eb85 5196
e7b07cee
HW
5197 update_stream_scaling_settings(&mode, dm_state, stream);
5198
5199 fill_audio_info(
5200 &stream->audio_info,
5201 drm_connector,
aed15309 5202 sink);
e7b07cee 5203
ceb3dbb4 5204 update_stream_signal(stream, sink);
9182b4cb 5205
d832fc3b 5206 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5207 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5208
8a488f5d
RL
5209 if (stream->link->psr_settings.psr_feature_enabled) {
5210 //
5211 // should decide stream support vsc sdp colorimetry capability
5212 // before building vsc info packet
5213 //
5214 stream->use_vsc_sdp_for_colorimetry = false;
5215 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5216 stream->use_vsc_sdp_for_colorimetry =
5217 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5218 } else {
5219 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5220 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5221 }
8a488f5d 5222 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5223 }
aed15309 5224finish:
dcd5fb82 5225 dc_sink_release(sink);
9e3efe3e 5226
e7b07cee
HW
5227 return stream;
5228}
5229
7578ecda 5230static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5231{
5232 drm_crtc_cleanup(crtc);
5233 kfree(crtc);
5234}
5235
5236static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5237 struct drm_crtc_state *state)
e7b07cee
HW
5238{
5239 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5240
5241 /* TODO Destroy dc_stream objects are stream object is flattened */
5242 if (cur->stream)
5243 dc_stream_release(cur->stream);
5244
5245
5246 __drm_atomic_helper_crtc_destroy_state(state);
5247
5248
5249 kfree(state);
5250}
5251
5252static void dm_crtc_reset_state(struct drm_crtc *crtc)
5253{
5254 struct dm_crtc_state *state;
5255
5256 if (crtc->state)
5257 dm_crtc_destroy_state(crtc, crtc->state);
5258
5259 state = kzalloc(sizeof(*state), GFP_KERNEL);
5260 if (WARN_ON(!state))
5261 return;
5262
1f8a52ec 5263 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5264}
5265
5266static struct drm_crtc_state *
5267dm_crtc_duplicate_state(struct drm_crtc *crtc)
5268{
5269 struct dm_crtc_state *state, *cur;
5270
5271 cur = to_dm_crtc_state(crtc->state);
5272
5273 if (WARN_ON(!crtc->state))
5274 return NULL;
5275
2004f45e 5276 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5277 if (!state)
5278 return NULL;
e7b07cee
HW
5279
5280 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5281
5282 if (cur->stream) {
5283 state->stream = cur->stream;
5284 dc_stream_retain(state->stream);
5285 }
5286
d6ef9b41 5287 state->active_planes = cur->active_planes;
98e6436d 5288 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5289 state->abm_level = cur->abm_level;
bb47de73
NK
5290 state->vrr_supported = cur->vrr_supported;
5291 state->freesync_config = cur->freesync_config;
14b25846 5292 state->crc_src = cur->crc_src;
cf020d49
NK
5293 state->cm_has_degamma = cur->cm_has_degamma;
5294 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
c920888c
WL
5295#ifdef CONFIG_DEBUG_FS
5296 state->crc_window = cur->crc_window;
5297#endif
e7b07cee
HW
5298 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5299
5300 return &state->base;
5301}
5302
c920888c
WL
5303#ifdef CONFIG_DEBUG_FS
5304int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5305 struct drm_crtc_state *crtc_state,
5306 struct drm_property *property,
5307 uint64_t val)
5308{
5309 struct drm_device *dev = crtc->dev;
5310 struct amdgpu_device *adev = drm_to_adev(dev);
5311 struct dm_crtc_state *dm_new_state =
5312 to_dm_crtc_state(crtc_state);
5313
5314 if (property == adev->dm.crc_win_x_start_property)
5315 dm_new_state->crc_window.x_start = val;
5316 else if (property == adev->dm.crc_win_y_start_property)
5317 dm_new_state->crc_window.y_start = val;
5318 else if (property == adev->dm.crc_win_x_end_property)
5319 dm_new_state->crc_window.x_end = val;
5320 else if (property == adev->dm.crc_win_y_end_property)
5321 dm_new_state->crc_window.y_end = val;
5322 else
5323 return -EINVAL;
5324
5325 return 0;
5326}
5327
5328int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5329 const struct drm_crtc_state *state,
5330 struct drm_property *property,
5331 uint64_t *val)
5332{
5333 struct drm_device *dev = crtc->dev;
5334 struct amdgpu_device *adev = drm_to_adev(dev);
5335 struct dm_crtc_state *dm_state =
5336 to_dm_crtc_state(state);
5337
5338 if (property == adev->dm.crc_win_x_start_property)
5339 *val = dm_state->crc_window.x_start;
5340 else if (property == adev->dm.crc_win_y_start_property)
5341 *val = dm_state->crc_window.y_start;
5342 else if (property == adev->dm.crc_win_x_end_property)
5343 *val = dm_state->crc_window.x_end;
5344 else if (property == adev->dm.crc_win_y_end_property)
5345 *val = dm_state->crc_window.y_end;
5346 else
5347 return -EINVAL;
5348
5349 return 0;
5350}
5351#endif
5352
d2574c33
MK
5353static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5354{
5355 enum dc_irq_source irq_source;
5356 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5357 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5358 int rc;
5359
5360 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5361
5362 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5363
5364 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5365 acrtc->crtc_id, enable ? "en" : "dis", rc);
5366 return rc;
5367}
589d2739
HW
5368
5369static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5370{
5371 enum dc_irq_source irq_source;
5372 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5373 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5374 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5375 int rc = 0;
5376
5377 if (enable) {
5378 /* vblank irq on -> Only need vupdate irq in vrr mode */
5379 if (amdgpu_dm_vrr_active(acrtc_state))
5380 rc = dm_set_vupdate_irq(crtc, true);
5381 } else {
5382 /* vblank irq off -> vupdate irq off */
5383 rc = dm_set_vupdate_irq(crtc, false);
5384 }
5385
5386 if (rc)
5387 return rc;
589d2739
HW
5388
5389 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 5390 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
5391}
5392
5393static int dm_enable_vblank(struct drm_crtc *crtc)
5394{
5395 return dm_set_vblank(crtc, true);
5396}
5397
5398static void dm_disable_vblank(struct drm_crtc *crtc)
5399{
5400 dm_set_vblank(crtc, false);
5401}
5402
e7b07cee
HW
5403/* Implemented only the options currently availible for the driver */
5404static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5405 .reset = dm_crtc_reset_state,
5406 .destroy = amdgpu_dm_crtc_destroy,
5407 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5408 .set_config = drm_atomic_helper_set_config,
5409 .page_flip = drm_atomic_helper_page_flip,
5410 .atomic_duplicate_state = dm_crtc_duplicate_state,
5411 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5412 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5413 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5414 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5415 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5416 .enable_vblank = dm_enable_vblank,
5417 .disable_vblank = dm_disable_vblank,
e3eff4b5 5418 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
c920888c
WL
5419#ifdef CONFIG_DEBUG_FS
5420 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5421 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5422#endif
e7b07cee
HW
5423};
5424
5425static enum drm_connector_status
5426amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5427{
5428 bool connected;
c84dec2f 5429 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5430
1f6010a9
DF
5431 /*
5432 * Notes:
e7b07cee
HW
5433 * 1. This interface is NOT called in context of HPD irq.
5434 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5435 * makes it a bad place for *any* MST-related activity.
5436 */
e7b07cee 5437
8580d60b
HW
5438 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5439 !aconnector->fake_enable)
e7b07cee
HW
5440 connected = (aconnector->dc_sink != NULL);
5441 else
5442 connected = (aconnector->base.force == DRM_FORCE_ON);
5443
0f877894
OV
5444 update_subconnector_property(aconnector);
5445
e7b07cee
HW
5446 return (connected ? connector_status_connected :
5447 connector_status_disconnected);
5448}
5449
3ee6b26b
AD
5450int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5451 struct drm_connector_state *connector_state,
5452 struct drm_property *property,
5453 uint64_t val)
e7b07cee
HW
5454{
5455 struct drm_device *dev = connector->dev;
1348969a 5456 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5457 struct dm_connector_state *dm_old_state =
5458 to_dm_connector_state(connector->state);
5459 struct dm_connector_state *dm_new_state =
5460 to_dm_connector_state(connector_state);
5461
5462 int ret = -EINVAL;
5463
5464 if (property == dev->mode_config.scaling_mode_property) {
5465 enum amdgpu_rmx_type rmx_type;
5466
5467 switch (val) {
5468 case DRM_MODE_SCALE_CENTER:
5469 rmx_type = RMX_CENTER;
5470 break;
5471 case DRM_MODE_SCALE_ASPECT:
5472 rmx_type = RMX_ASPECT;
5473 break;
5474 case DRM_MODE_SCALE_FULLSCREEN:
5475 rmx_type = RMX_FULL;
5476 break;
5477 case DRM_MODE_SCALE_NONE:
5478 default:
5479 rmx_type = RMX_OFF;
5480 break;
5481 }
5482
5483 if (dm_old_state->scaling == rmx_type)
5484 return 0;
5485
5486 dm_new_state->scaling = rmx_type;
5487 ret = 0;
5488 } else if (property == adev->mode_info.underscan_hborder_property) {
5489 dm_new_state->underscan_hborder = val;
5490 ret = 0;
5491 } else if (property == adev->mode_info.underscan_vborder_property) {
5492 dm_new_state->underscan_vborder = val;
5493 ret = 0;
5494 } else if (property == adev->mode_info.underscan_property) {
5495 dm_new_state->underscan_enable = val;
5496 ret = 0;
c1ee92f9
DF
5497 } else if (property == adev->mode_info.abm_level_property) {
5498 dm_new_state->abm_level = val;
5499 ret = 0;
e7b07cee
HW
5500 }
5501
5502 return ret;
5503}
5504
3ee6b26b
AD
5505int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5506 const struct drm_connector_state *state,
5507 struct drm_property *property,
5508 uint64_t *val)
e7b07cee
HW
5509{
5510 struct drm_device *dev = connector->dev;
1348969a 5511 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5512 struct dm_connector_state *dm_state =
5513 to_dm_connector_state(state);
5514 int ret = -EINVAL;
5515
5516 if (property == dev->mode_config.scaling_mode_property) {
5517 switch (dm_state->scaling) {
5518 case RMX_CENTER:
5519 *val = DRM_MODE_SCALE_CENTER;
5520 break;
5521 case RMX_ASPECT:
5522 *val = DRM_MODE_SCALE_ASPECT;
5523 break;
5524 case RMX_FULL:
5525 *val = DRM_MODE_SCALE_FULLSCREEN;
5526 break;
5527 case RMX_OFF:
5528 default:
5529 *val = DRM_MODE_SCALE_NONE;
5530 break;
5531 }
5532 ret = 0;
5533 } else if (property == adev->mode_info.underscan_hborder_property) {
5534 *val = dm_state->underscan_hborder;
5535 ret = 0;
5536 } else if (property == adev->mode_info.underscan_vborder_property) {
5537 *val = dm_state->underscan_vborder;
5538 ret = 0;
5539 } else if (property == adev->mode_info.underscan_property) {
5540 *val = dm_state->underscan_enable;
5541 ret = 0;
c1ee92f9
DF
5542 } else if (property == adev->mode_info.abm_level_property) {
5543 *val = dm_state->abm_level;
5544 ret = 0;
e7b07cee 5545 }
c1ee92f9 5546
e7b07cee
HW
5547 return ret;
5548}
5549
526c654a
ED
5550static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5551{
5552 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5553
5554 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5555}
5556
7578ecda 5557static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5558{
c84dec2f 5559 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5560 const struct dc_link *link = aconnector->dc_link;
1348969a 5561 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5562 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5563
5dff80bd
AG
5564 /*
5565 * Call only if mst_mgr was iniitalized before since it's not done
5566 * for all connector types.
5567 */
5568 if (aconnector->mst_mgr.dev)
5569 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5570
e7b07cee
HW
5571#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5572 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5573
89fc8d4e 5574 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5575 link->type != dc_connection_none &&
5576 dm->backlight_dev) {
5577 backlight_device_unregister(dm->backlight_dev);
5578 dm->backlight_dev = NULL;
e7b07cee
HW
5579 }
5580#endif
dcd5fb82
MF
5581
5582 if (aconnector->dc_em_sink)
5583 dc_sink_release(aconnector->dc_em_sink);
5584 aconnector->dc_em_sink = NULL;
5585 if (aconnector->dc_sink)
5586 dc_sink_release(aconnector->dc_sink);
5587 aconnector->dc_sink = NULL;
5588
e86e8947 5589 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5590 drm_connector_unregister(connector);
5591 drm_connector_cleanup(connector);
526c654a
ED
5592 if (aconnector->i2c) {
5593 i2c_del_adapter(&aconnector->i2c->base);
5594 kfree(aconnector->i2c);
5595 }
7daec99f 5596 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5597
e7b07cee
HW
5598 kfree(connector);
5599}
5600
5601void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5602{
5603 struct dm_connector_state *state =
5604 to_dm_connector_state(connector->state);
5605
df099b9b
LSL
5606 if (connector->state)
5607 __drm_atomic_helper_connector_destroy_state(connector->state);
5608
e7b07cee
HW
5609 kfree(state);
5610
5611 state = kzalloc(sizeof(*state), GFP_KERNEL);
5612
5613 if (state) {
5614 state->scaling = RMX_OFF;
5615 state->underscan_enable = false;
5616 state->underscan_hborder = 0;
5617 state->underscan_vborder = 0;
01933ba4 5618 state->base.max_requested_bpc = 8;
3261e013
ML
5619 state->vcpi_slots = 0;
5620 state->pbn = 0;
c3e50f89
NK
5621 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5622 state->abm_level = amdgpu_dm_abm_level;
5623
df099b9b 5624 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5625 }
5626}
5627
3ee6b26b
AD
5628struct drm_connector_state *
5629amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5630{
5631 struct dm_connector_state *state =
5632 to_dm_connector_state(connector->state);
5633
5634 struct dm_connector_state *new_state =
5635 kmemdup(state, sizeof(*state), GFP_KERNEL);
5636
98e6436d
AK
5637 if (!new_state)
5638 return NULL;
e7b07cee 5639
98e6436d
AK
5640 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5641
5642 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5643 new_state->abm_level = state->abm_level;
922454c2
NK
5644 new_state->scaling = state->scaling;
5645 new_state->underscan_enable = state->underscan_enable;
5646 new_state->underscan_hborder = state->underscan_hborder;
5647 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5648 new_state->vcpi_slots = state->vcpi_slots;
5649 new_state->pbn = state->pbn;
98e6436d 5650 return &new_state->base;
e7b07cee
HW
5651}
5652
14f04fa4
AD
5653static int
5654amdgpu_dm_connector_late_register(struct drm_connector *connector)
5655{
5656 struct amdgpu_dm_connector *amdgpu_dm_connector =
5657 to_amdgpu_dm_connector(connector);
00a8037e 5658 int r;
14f04fa4 5659
00a8037e
AD
5660 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5661 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5662 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5663 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5664 if (r)
5665 return r;
5666 }
5667
5668#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5669 connector_debugfs_init(amdgpu_dm_connector);
5670#endif
5671
5672 return 0;
5673}
5674
e7b07cee
HW
5675static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5676 .reset = amdgpu_dm_connector_funcs_reset,
5677 .detect = amdgpu_dm_connector_detect,
5678 .fill_modes = drm_helper_probe_single_connector_modes,
5679 .destroy = amdgpu_dm_connector_destroy,
5680 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5681 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5682 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5683 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5684 .late_register = amdgpu_dm_connector_late_register,
526c654a 5685 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5686};
5687
e7b07cee
HW
5688static int get_modes(struct drm_connector *connector)
5689{
5690 return amdgpu_dm_connector_get_modes(connector);
5691}
5692
c84dec2f 5693static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5694{
5695 struct dc_sink_init_data init_params = {
5696 .link = aconnector->dc_link,
5697 .sink_signal = SIGNAL_TYPE_VIRTUAL
5698 };
70e8ffc5 5699 struct edid *edid;
e7b07cee 5700
a89ff457 5701 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5702 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5703 aconnector->base.name);
5704
5705 aconnector->base.force = DRM_FORCE_OFF;
5706 aconnector->base.override_edid = false;
5707 return;
5708 }
5709
70e8ffc5
HW
5710 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5711
e7b07cee
HW
5712 aconnector->edid = edid;
5713
5714 aconnector->dc_em_sink = dc_link_add_remote_sink(
5715 aconnector->dc_link,
5716 (uint8_t *)edid,
5717 (edid->extensions + 1) * EDID_LENGTH,
5718 &init_params);
5719
dcd5fb82 5720 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5721 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5722 aconnector->dc_link->local_sink :
5723 aconnector->dc_em_sink;
dcd5fb82
MF
5724 dc_sink_retain(aconnector->dc_sink);
5725 }
e7b07cee
HW
5726}
5727
c84dec2f 5728static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5729{
5730 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5731
1f6010a9
DF
5732 /*
5733 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5734 * Those settings have to be != 0 to get initial modeset
5735 */
5736 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5737 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5738 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5739 }
5740
5741
5742 aconnector->base.override_edid = true;
5743 create_eml_sink(aconnector);
5744}
5745
cbd14ae7
SW
5746static struct dc_stream_state *
5747create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5748 const struct drm_display_mode *drm_mode,
5749 const struct dm_connector_state *dm_state,
5750 const struct dc_stream_state *old_stream)
5751{
5752 struct drm_connector *connector = &aconnector->base;
1348969a 5753 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5754 struct dc_stream_state *stream;
4b7da34b
SW
5755 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5756 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5757 enum dc_status dc_result = DC_OK;
5758
5759 do {
5760 stream = create_stream_for_sink(aconnector, drm_mode,
5761 dm_state, old_stream,
5762 requested_bpc);
5763 if (stream == NULL) {
5764 DRM_ERROR("Failed to create stream for sink!\n");
5765 break;
5766 }
5767
5768 dc_result = dc_validate_stream(adev->dm.dc, stream);
5769
5770 if (dc_result != DC_OK) {
74a16675 5771 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5772 drm_mode->hdisplay,
5773 drm_mode->vdisplay,
5774 drm_mode->clock,
74a16675
RS
5775 dc_result,
5776 dc_status_to_str(dc_result));
cbd14ae7
SW
5777
5778 dc_stream_release(stream);
5779 stream = NULL;
5780 requested_bpc -= 2; /* lower bpc to retry validation */
5781 }
5782
5783 } while (stream == NULL && requested_bpc >= 6);
5784
5785 return stream;
5786}
5787
ba9ca088 5788enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5789 struct drm_display_mode *mode)
e7b07cee
HW
5790{
5791 int result = MODE_ERROR;
5792 struct dc_sink *dc_sink;
e7b07cee 5793 /* TODO: Unhardcode stream count */
0971c40e 5794 struct dc_stream_state *stream;
c84dec2f 5795 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5796
5797 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5798 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5799 return result;
5800
1f6010a9
DF
5801 /*
5802 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5803 * EDID mgmt
5804 */
5805 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5806 !aconnector->dc_em_sink)
5807 handle_edid_mgmt(aconnector);
5808
c84dec2f 5809 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5810
ad975f44
VL
5811 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5812 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5813 DRM_ERROR("dc_sink is NULL!\n");
5814 goto fail;
5815 }
5816
cbd14ae7
SW
5817 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5818 if (stream) {
5819 dc_stream_release(stream);
e7b07cee 5820 result = MODE_OK;
cbd14ae7 5821 }
e7b07cee
HW
5822
5823fail:
5824 /* TODO: error handling*/
5825 return result;
5826}
5827
88694af9
NK
5828static int fill_hdr_info_packet(const struct drm_connector_state *state,
5829 struct dc_info_packet *out)
5830{
5831 struct hdmi_drm_infoframe frame;
5832 unsigned char buf[30]; /* 26 + 4 */
5833 ssize_t len;
5834 int ret, i;
5835
5836 memset(out, 0, sizeof(*out));
5837
5838 if (!state->hdr_output_metadata)
5839 return 0;
5840
5841 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5842 if (ret)
5843 return ret;
5844
5845 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5846 if (len < 0)
5847 return (int)len;
5848
5849 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5850 if (len != 30)
5851 return -EINVAL;
5852
5853 /* Prepare the infopacket for DC. */
5854 switch (state->connector->connector_type) {
5855 case DRM_MODE_CONNECTOR_HDMIA:
5856 out->hb0 = 0x87; /* type */
5857 out->hb1 = 0x01; /* version */
5858 out->hb2 = 0x1A; /* length */
5859 out->sb[0] = buf[3]; /* checksum */
5860 i = 1;
5861 break;
5862
5863 case DRM_MODE_CONNECTOR_DisplayPort:
5864 case DRM_MODE_CONNECTOR_eDP:
5865 out->hb0 = 0x00; /* sdp id, zero */
5866 out->hb1 = 0x87; /* type */
5867 out->hb2 = 0x1D; /* payload len - 1 */
5868 out->hb3 = (0x13 << 2); /* sdp version */
5869 out->sb[0] = 0x01; /* version */
5870 out->sb[1] = 0x1A; /* length */
5871 i = 2;
5872 break;
5873
5874 default:
5875 return -EINVAL;
5876 }
5877
5878 memcpy(&out->sb[i], &buf[4], 26);
5879 out->valid = true;
5880
5881 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5882 sizeof(out->sb), false);
5883
5884 return 0;
5885}
5886
5887static bool
5888is_hdr_metadata_different(const struct drm_connector_state *old_state,
5889 const struct drm_connector_state *new_state)
5890{
5891 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5892 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5893
5894 if (old_blob != new_blob) {
5895 if (old_blob && new_blob &&
5896 old_blob->length == new_blob->length)
5897 return memcmp(old_blob->data, new_blob->data,
5898 old_blob->length);
5899
5900 return true;
5901 }
5902
5903 return false;
5904}
5905
5906static int
5907amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5908 struct drm_atomic_state *state)
88694af9 5909{
51e857af
SP
5910 struct drm_connector_state *new_con_state =
5911 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5912 struct drm_connector_state *old_con_state =
5913 drm_atomic_get_old_connector_state(state, conn);
5914 struct drm_crtc *crtc = new_con_state->crtc;
5915 struct drm_crtc_state *new_crtc_state;
5916 int ret;
5917
e8a98235
RS
5918 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5919
88694af9
NK
5920 if (!crtc)
5921 return 0;
5922
5923 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5924 struct dc_info_packet hdr_infopacket;
5925
5926 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5927 if (ret)
5928 return ret;
5929
5930 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5931 if (IS_ERR(new_crtc_state))
5932 return PTR_ERR(new_crtc_state);
5933
5934 /*
5935 * DC considers the stream backends changed if the
5936 * static metadata changes. Forcing the modeset also
5937 * gives a simple way for userspace to switch from
b232d4ed
NK
5938 * 8bpc to 10bpc when setting the metadata to enter
5939 * or exit HDR.
5940 *
5941 * Changing the static metadata after it's been
5942 * set is permissible, however. So only force a
5943 * modeset if we're entering or exiting HDR.
88694af9 5944 */
b232d4ed
NK
5945 new_crtc_state->mode_changed =
5946 !old_con_state->hdr_output_metadata ||
5947 !new_con_state->hdr_output_metadata;
88694af9
NK
5948 }
5949
5950 return 0;
5951}
5952
e7b07cee
HW
5953static const struct drm_connector_helper_funcs
5954amdgpu_dm_connector_helper_funcs = {
5955 /*
1f6010a9 5956 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5957 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5958 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5959 * in get_modes call back, not just return the modes count
5960 */
e7b07cee
HW
5961 .get_modes = get_modes,
5962 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5963 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5964};
5965
5966static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5967{
5968}
5969
d6ef9b41 5970static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5971{
5972 struct drm_atomic_state *state = new_crtc_state->state;
5973 struct drm_plane *plane;
5974 int num_active = 0;
5975
5976 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5977 struct drm_plane_state *new_plane_state;
5978
5979 /* Cursor planes are "fake". */
5980 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5981 continue;
5982
5983 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5984
5985 if (!new_plane_state) {
5986 /*
5987 * The plane is enable on the CRTC and hasn't changed
5988 * state. This means that it previously passed
5989 * validation and is therefore enabled.
5990 */
5991 num_active += 1;
5992 continue;
5993 }
5994
5995 /* We need a framebuffer to be considered enabled. */
5996 num_active += (new_plane_state->fb != NULL);
5997 }
5998
d6ef9b41
NK
5999 return num_active;
6000}
6001
8fe684e9
NK
6002static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6003 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6004{
6005 struct dm_crtc_state *dm_new_crtc_state =
6006 to_dm_crtc_state(new_crtc_state);
6007
6008 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6009
6010 if (!dm_new_crtc_state->stream)
6011 return;
6012
6013 dm_new_crtc_state->active_planes =
6014 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6015}
6016
3ee6b26b 6017static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6018 struct drm_atomic_state *state)
e7b07cee 6019{
29b77ad7
MR
6020 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6021 crtc);
1348969a 6022 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6023 struct dc *dc = adev->dm.dc;
29b77ad7 6024 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6025 int ret = -EINVAL;
6026
5b8c5969 6027 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6028
29b77ad7 6029 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6030
9b690ef3 6031 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6032 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6033 WARN_ON(1);
6034 return ret;
6035 }
6036
bc92c065 6037 /*
b836a274
MD
6038 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6039 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6040 * planes are disabled, which is not supported by the hardware. And there is legacy
6041 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6042 */
29b77ad7
MR
6043 if (crtc_state->enable &&
6044 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
c14a005c
NK
6045 return -EINVAL;
6046
b836a274
MD
6047 /* In some use cases, like reset, no stream is attached */
6048 if (!dm_crtc_state->stream)
6049 return 0;
6050
62c933f9 6051 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6052 return 0;
6053
6054 return ret;
6055}
6056
3ee6b26b
AD
6057static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6058 const struct drm_display_mode *mode,
6059 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6060{
6061 return true;
6062}
6063
6064static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6065 .disable = dm_crtc_helper_disable,
6066 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6067 .mode_fixup = dm_crtc_helper_mode_fixup,
6068 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6069};
6070
6071static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6072{
6073
6074}
6075
3261e013
ML
6076static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6077{
6078 switch (display_color_depth) {
6079 case COLOR_DEPTH_666:
6080 return 6;
6081 case COLOR_DEPTH_888:
6082 return 8;
6083 case COLOR_DEPTH_101010:
6084 return 10;
6085 case COLOR_DEPTH_121212:
6086 return 12;
6087 case COLOR_DEPTH_141414:
6088 return 14;
6089 case COLOR_DEPTH_161616:
6090 return 16;
6091 default:
6092 break;
6093 }
6094 return 0;
6095}
6096
3ee6b26b
AD
6097static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6098 struct drm_crtc_state *crtc_state,
6099 struct drm_connector_state *conn_state)
e7b07cee 6100{
3261e013
ML
6101 struct drm_atomic_state *state = crtc_state->state;
6102 struct drm_connector *connector = conn_state->connector;
6103 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6104 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6105 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6106 struct drm_dp_mst_topology_mgr *mst_mgr;
6107 struct drm_dp_mst_port *mst_port;
6108 enum dc_color_depth color_depth;
6109 int clock, bpp = 0;
1bc22f20 6110 bool is_y420 = false;
3261e013
ML
6111
6112 if (!aconnector->port || !aconnector->dc_sink)
6113 return 0;
6114
6115 mst_port = aconnector->port;
6116 mst_mgr = &aconnector->mst_port->mst_mgr;
6117
6118 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6119 return 0;
6120
6121 if (!state->duplicated) {
cbd14ae7 6122 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6123 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6124 aconnector->force_yuv420_output;
cbd14ae7
SW
6125 color_depth = convert_color_depth_from_display_info(connector,
6126 is_y420,
6127 max_bpc);
3261e013
ML
6128 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6129 clock = adjusted_mode->clock;
dc48529f 6130 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6131 }
6132 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6133 mst_mgr,
6134 mst_port,
1c6c1cb5 6135 dm_new_connector_state->pbn,
03ca9600 6136 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6137 if (dm_new_connector_state->vcpi_slots < 0) {
6138 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6139 return dm_new_connector_state->vcpi_slots;
6140 }
e7b07cee
HW
6141 return 0;
6142}
6143
6144const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6145 .disable = dm_encoder_helper_disable,
6146 .atomic_check = dm_encoder_helper_atomic_check
6147};
6148
d9fe1a4c 6149#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6150static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6151 struct dc_state *dc_state)
6152{
6153 struct dc_stream_state *stream = NULL;
6154 struct drm_connector *connector;
6155 struct drm_connector_state *new_con_state, *old_con_state;
6156 struct amdgpu_dm_connector *aconnector;
6157 struct dm_connector_state *dm_conn_state;
6158 int i, j, clock, bpp;
6159 int vcpi, pbn_div, pbn = 0;
6160
6161 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6162
6163 aconnector = to_amdgpu_dm_connector(connector);
6164
6165 if (!aconnector->port)
6166 continue;
6167
6168 if (!new_con_state || !new_con_state->crtc)
6169 continue;
6170
6171 dm_conn_state = to_dm_connector_state(new_con_state);
6172
6173 for (j = 0; j < dc_state->stream_count; j++) {
6174 stream = dc_state->streams[j];
6175 if (!stream)
6176 continue;
6177
6178 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6179 break;
6180
6181 stream = NULL;
6182 }
6183
6184 if (!stream)
6185 continue;
6186
6187 if (stream->timing.flags.DSC != 1) {
6188 drm_dp_mst_atomic_enable_dsc(state,
6189 aconnector->port,
6190 dm_conn_state->pbn,
6191 0,
6192 false);
6193 continue;
6194 }
6195
6196 pbn_div = dm_mst_get_pbn_divider(stream->link);
6197 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6198 clock = stream->timing.pix_clk_100hz / 10;
6199 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6200 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6201 aconnector->port,
6202 pbn, pbn_div,
6203 true);
6204 if (vcpi < 0)
6205 return vcpi;
6206
6207 dm_conn_state->pbn = pbn;
6208 dm_conn_state->vcpi_slots = vcpi;
6209 }
6210 return 0;
6211}
d9fe1a4c 6212#endif
29b9ba74 6213
e7b07cee
HW
6214static void dm_drm_plane_reset(struct drm_plane *plane)
6215{
6216 struct dm_plane_state *amdgpu_state = NULL;
6217
6218 if (plane->state)
6219 plane->funcs->atomic_destroy_state(plane, plane->state);
6220
6221 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6222 WARN_ON(amdgpu_state == NULL);
1f6010a9 6223
7ddaef96
NK
6224 if (amdgpu_state)
6225 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6226}
6227
6228static struct drm_plane_state *
6229dm_drm_plane_duplicate_state(struct drm_plane *plane)
6230{
6231 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6232
6233 old_dm_plane_state = to_dm_plane_state(plane->state);
6234 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6235 if (!dm_plane_state)
6236 return NULL;
6237
6238 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6239
3be5262e
HW
6240 if (old_dm_plane_state->dc_state) {
6241 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6242 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6243 }
6244
6245 return &dm_plane_state->base;
6246}
6247
dfd84d90 6248static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6249 struct drm_plane_state *state)
e7b07cee
HW
6250{
6251 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6252
3be5262e
HW
6253 if (dm_plane_state->dc_state)
6254 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6255
0627bbd3 6256 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6257}
6258
6259static const struct drm_plane_funcs dm_plane_funcs = {
6260 .update_plane = drm_atomic_helper_update_plane,
6261 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6262 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6263 .reset = dm_drm_plane_reset,
6264 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6265 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6266 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6267};
6268
3ee6b26b
AD
6269static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6270 struct drm_plane_state *new_state)
e7b07cee
HW
6271{
6272 struct amdgpu_framebuffer *afb;
6273 struct drm_gem_object *obj;
5d43be0c 6274 struct amdgpu_device *adev;
e7b07cee 6275 struct amdgpu_bo *rbo;
e7b07cee 6276 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6277 struct list_head list;
6278 struct ttm_validate_buffer tv;
6279 struct ww_acquire_ctx ticket;
5d43be0c
CK
6280 uint32_t domain;
6281 int r;
e7b07cee
HW
6282
6283 if (!new_state->fb) {
f1ad2f5e 6284 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6285 return 0;
6286 }
6287
6288 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6289 obj = new_state->fb->obj[0];
e7b07cee 6290 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6291 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6292 INIT_LIST_HEAD(&list);
6293
6294 tv.bo = &rbo->tbo;
6295 tv.num_shared = 1;
6296 list_add(&tv.head, &list);
6297
9165fb87 6298 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6299 if (r) {
6300 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6301 return r;
0f257b09 6302 }
e7b07cee 6303
5d43be0c 6304 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6305 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6306 else
6307 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6308
7b7c6c81 6309 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6310 if (unlikely(r != 0)) {
30b7c614
HW
6311 if (r != -ERESTARTSYS)
6312 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6313 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6314 return r;
6315 }
6316
bb812f1e
JZ
6317 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6318 if (unlikely(r != 0)) {
6319 amdgpu_bo_unpin(rbo);
0f257b09 6320 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6321 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6322 return r;
6323 }
7df7e505 6324
0f257b09 6325 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6326
7b7c6c81 6327 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6328
6329 amdgpu_bo_ref(rbo);
6330
cf322b49
NK
6331 /**
6332 * We don't do surface updates on planes that have been newly created,
6333 * but we also don't have the afb->address during atomic check.
6334 *
6335 * Fill in buffer attributes depending on the address here, but only on
6336 * newly created planes since they're not being used by DC yet and this
6337 * won't modify global state.
6338 */
6339 dm_plane_state_old = to_dm_plane_state(plane->state);
6340 dm_plane_state_new = to_dm_plane_state(new_state);
6341
3be5262e 6342 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6343 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6344 struct dc_plane_state *plane_state =
6345 dm_plane_state_new->dc_state;
6346 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6347
320932bf 6348 fill_plane_buffer_attributes(
695af5f9 6349 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6350 afb->tiling_flags,
cf322b49
NK
6351 &plane_state->tiling_info, &plane_state->plane_size,
6352 &plane_state->dcc, &plane_state->address,
6eed95b0 6353 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6354 }
6355
e7b07cee
HW
6356 return 0;
6357}
6358
3ee6b26b
AD
6359static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6360 struct drm_plane_state *old_state)
e7b07cee
HW
6361{
6362 struct amdgpu_bo *rbo;
e7b07cee
HW
6363 int r;
6364
6365 if (!old_state->fb)
6366 return;
6367
e68d14dd 6368 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6369 r = amdgpu_bo_reserve(rbo, false);
6370 if (unlikely(r)) {
6371 DRM_ERROR("failed to reserve rbo before unpin\n");
6372 return;
b830ebc9
HW
6373 }
6374
6375 amdgpu_bo_unpin(rbo);
6376 amdgpu_bo_unreserve(rbo);
6377 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6378}
6379
8c44515b
AP
6380static int dm_plane_helper_check_state(struct drm_plane_state *state,
6381 struct drm_crtc_state *new_crtc_state)
6382{
6383 int max_downscale = 0;
6384 int max_upscale = INT_MAX;
6385
6386 /* TODO: These should be checked against DC plane caps */
6387 return drm_atomic_helper_check_plane_state(
6388 state, new_crtc_state, max_downscale, max_upscale, true, true);
6389}
6390
7578ecda
AD
6391static int dm_plane_atomic_check(struct drm_plane *plane,
6392 struct drm_plane_state *state)
cbd19488 6393{
1348969a 6394 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6395 struct dc *dc = adev->dm.dc;
78171832 6396 struct dm_plane_state *dm_plane_state;
695af5f9 6397 struct dc_scaling_info scaling_info;
8c44515b 6398 struct drm_crtc_state *new_crtc_state;
695af5f9 6399 int ret;
78171832 6400
e8a98235
RS
6401 trace_amdgpu_dm_plane_atomic_check(state);
6402
78171832 6403 dm_plane_state = to_dm_plane_state(state);
cbd19488 6404
3be5262e 6405 if (!dm_plane_state->dc_state)
9a3329b1 6406 return 0;
cbd19488 6407
8c44515b
AP
6408 new_crtc_state =
6409 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6410 if (!new_crtc_state)
6411 return -EINVAL;
6412
6413 ret = dm_plane_helper_check_state(state, new_crtc_state);
6414 if (ret)
6415 return ret;
6416
695af5f9
NK
6417 ret = fill_dc_scaling_info(state, &scaling_info);
6418 if (ret)
6419 return ret;
a05bcff1 6420
62c933f9 6421 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6422 return 0;
6423
6424 return -EINVAL;
6425}
6426
674e78ac
NK
6427static int dm_plane_atomic_async_check(struct drm_plane *plane,
6428 struct drm_plane_state *new_plane_state)
6429{
6430 /* Only support async updates on cursor planes. */
6431 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6432 return -EINVAL;
6433
6434 return 0;
6435}
6436
6437static void dm_plane_atomic_async_update(struct drm_plane *plane,
6438 struct drm_plane_state *new_state)
6439{
6440 struct drm_plane_state *old_state =
6441 drm_atomic_get_old_plane_state(new_state->state, plane);
6442
e8a98235
RS
6443 trace_amdgpu_dm_atomic_update_cursor(new_state);
6444
332af874 6445 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6446
6447 plane->state->src_x = new_state->src_x;
6448 plane->state->src_y = new_state->src_y;
6449 plane->state->src_w = new_state->src_w;
6450 plane->state->src_h = new_state->src_h;
6451 plane->state->crtc_x = new_state->crtc_x;
6452 plane->state->crtc_y = new_state->crtc_y;
6453 plane->state->crtc_w = new_state->crtc_w;
6454 plane->state->crtc_h = new_state->crtc_h;
6455
6456 handle_cursor_update(plane, old_state);
6457}
6458
e7b07cee
HW
6459static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6460 .prepare_fb = dm_plane_helper_prepare_fb,
6461 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6462 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6463 .atomic_async_check = dm_plane_atomic_async_check,
6464 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6465};
6466
6467/*
6468 * TODO: these are currently initialized to rgb formats only.
6469 * For future use cases we should either initialize them dynamically based on
6470 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6471 * check will succeed, and let DC implement proper check
e7b07cee 6472 */
d90371b0 6473static const uint32_t rgb_formats[] = {
e7b07cee
HW
6474 DRM_FORMAT_XRGB8888,
6475 DRM_FORMAT_ARGB8888,
6476 DRM_FORMAT_RGBA8888,
6477 DRM_FORMAT_XRGB2101010,
6478 DRM_FORMAT_XBGR2101010,
6479 DRM_FORMAT_ARGB2101010,
6480 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6481 DRM_FORMAT_XBGR8888,
6482 DRM_FORMAT_ABGR8888,
46dd9ff7 6483 DRM_FORMAT_RGB565,
e7b07cee
HW
6484};
6485
0d579c7e
NK
6486static const uint32_t overlay_formats[] = {
6487 DRM_FORMAT_XRGB8888,
6488 DRM_FORMAT_ARGB8888,
6489 DRM_FORMAT_RGBA8888,
6490 DRM_FORMAT_XBGR8888,
6491 DRM_FORMAT_ABGR8888,
7267a1a9 6492 DRM_FORMAT_RGB565
e7b07cee
HW
6493};
6494
6495static const u32 cursor_formats[] = {
6496 DRM_FORMAT_ARGB8888
6497};
6498
37c6a93b
NK
6499static int get_plane_formats(const struct drm_plane *plane,
6500 const struct dc_plane_cap *plane_cap,
6501 uint32_t *formats, int max_formats)
e7b07cee 6502{
37c6a93b
NK
6503 int i, num_formats = 0;
6504
6505 /*
6506 * TODO: Query support for each group of formats directly from
6507 * DC plane caps. This will require adding more formats to the
6508 * caps list.
6509 */
e7b07cee 6510
f180b4bc 6511 switch (plane->type) {
e7b07cee 6512 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6513 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6514 if (num_formats >= max_formats)
6515 break;
6516
6517 formats[num_formats++] = rgb_formats[i];
6518 }
6519
ea36ad34 6520 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6521 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6522 if (plane_cap && plane_cap->pixel_format_support.p010)
6523 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6524 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6525 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6526 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6527 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6528 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6529 }
e7b07cee 6530 break;
37c6a93b 6531
e7b07cee 6532 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6533 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6534 if (num_formats >= max_formats)
6535 break;
6536
6537 formats[num_formats++] = overlay_formats[i];
6538 }
e7b07cee 6539 break;
37c6a93b 6540
e7b07cee 6541 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6542 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6543 if (num_formats >= max_formats)
6544 break;
6545
6546 formats[num_formats++] = cursor_formats[i];
6547 }
e7b07cee
HW
6548 break;
6549 }
6550
37c6a93b
NK
6551 return num_formats;
6552}
6553
6554static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6555 struct drm_plane *plane,
6556 unsigned long possible_crtcs,
6557 const struct dc_plane_cap *plane_cap)
6558{
6559 uint32_t formats[32];
6560 int num_formats;
6561 int res = -EPERM;
ecc874a6 6562 unsigned int supported_rotations;
faa37f54 6563 uint64_t *modifiers = NULL;
37c6a93b
NK
6564
6565 num_formats = get_plane_formats(plane, plane_cap, formats,
6566 ARRAY_SIZE(formats));
6567
faa37f54
BN
6568 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6569 if (res)
6570 return res;
6571
4a580877 6572 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6573 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6574 modifiers, plane->type, NULL);
6575 kfree(modifiers);
37c6a93b
NK
6576 if (res)
6577 return res;
6578
cc1fec57
NK
6579 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6580 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6581 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6582 BIT(DRM_MODE_BLEND_PREMULTI);
6583
6584 drm_plane_create_alpha_property(plane);
6585 drm_plane_create_blend_mode_property(plane, blend_caps);
6586 }
6587
fc8e5230 6588 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6589 plane_cap &&
6590 (plane_cap->pixel_format_support.nv12 ||
6591 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6592 /* This only affects YUV formats. */
6593 drm_plane_create_color_properties(
6594 plane,
6595 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6596 BIT(DRM_COLOR_YCBCR_BT709) |
6597 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6598 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6599 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6600 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6601 }
6602
ecc874a6
PLG
6603 supported_rotations =
6604 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6605 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6606
f784112f
MR
6607 if (dm->adev->asic_type >= CHIP_BONAIRE)
6608 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6609 supported_rotations);
ecc874a6 6610
f180b4bc 6611 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6612
96719c54 6613 /* Create (reset) the plane state */
f180b4bc
HW
6614 if (plane->funcs->reset)
6615 plane->funcs->reset(plane);
96719c54 6616
37c6a93b 6617 return 0;
e7b07cee
HW
6618}
6619
c920888c
WL
6620#ifdef CONFIG_DEBUG_FS
6621static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6622 struct amdgpu_crtc *acrtc)
6623{
6624 drm_object_attach_property(&acrtc->base.base,
6625 dm->crc_win_x_start_property,
6626 0);
6627 drm_object_attach_property(&acrtc->base.base,
6628 dm->crc_win_y_start_property,
6629 0);
6630 drm_object_attach_property(&acrtc->base.base,
6631 dm->crc_win_x_end_property,
6632 0);
6633 drm_object_attach_property(&acrtc->base.base,
6634 dm->crc_win_y_end_property,
6635 0);
6636}
6637#endif
6638
7578ecda
AD
6639static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6640 struct drm_plane *plane,
6641 uint32_t crtc_index)
e7b07cee
HW
6642{
6643 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6644 struct drm_plane *cursor_plane;
e7b07cee
HW
6645
6646 int res = -ENOMEM;
6647
6648 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6649 if (!cursor_plane)
6650 goto fail;
6651
f180b4bc 6652 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6653 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6654
6655 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6656 if (!acrtc)
6657 goto fail;
6658
6659 res = drm_crtc_init_with_planes(
6660 dm->ddev,
6661 &acrtc->base,
6662 plane,
f180b4bc 6663 cursor_plane,
e7b07cee
HW
6664 &amdgpu_dm_crtc_funcs, NULL);
6665
6666 if (res)
6667 goto fail;
6668
6669 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6670
96719c54
HW
6671 /* Create (reset) the plane state */
6672 if (acrtc->base.funcs->reset)
6673 acrtc->base.funcs->reset(&acrtc->base);
6674
e7b07cee
HW
6675 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6676 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6677
6678 acrtc->crtc_id = crtc_index;
6679 acrtc->base.enabled = false;
c37e2d29 6680 acrtc->otg_inst = -1;
e7b07cee
HW
6681
6682 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6683 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6684 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6685 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
c920888c
WL
6686#ifdef CONFIG_DEBUG_FS
6687 attach_crtc_crc_properties(dm, acrtc);
6688#endif
e7b07cee
HW
6689 return 0;
6690
6691fail:
b830ebc9
HW
6692 kfree(acrtc);
6693 kfree(cursor_plane);
e7b07cee
HW
6694 return res;
6695}
6696
6697
6698static int to_drm_connector_type(enum signal_type st)
6699{
6700 switch (st) {
6701 case SIGNAL_TYPE_HDMI_TYPE_A:
6702 return DRM_MODE_CONNECTOR_HDMIA;
6703 case SIGNAL_TYPE_EDP:
6704 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6705 case SIGNAL_TYPE_LVDS:
6706 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6707 case SIGNAL_TYPE_RGB:
6708 return DRM_MODE_CONNECTOR_VGA;
6709 case SIGNAL_TYPE_DISPLAY_PORT:
6710 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6711 return DRM_MODE_CONNECTOR_DisplayPort;
6712 case SIGNAL_TYPE_DVI_DUAL_LINK:
6713 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6714 return DRM_MODE_CONNECTOR_DVID;
6715 case SIGNAL_TYPE_VIRTUAL:
6716 return DRM_MODE_CONNECTOR_VIRTUAL;
6717
6718 default:
6719 return DRM_MODE_CONNECTOR_Unknown;
6720 }
6721}
6722
2b4c1c05
DV
6723static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6724{
62afb4ad
JRS
6725 struct drm_encoder *encoder;
6726
6727 /* There is only one encoder per connector */
6728 drm_connector_for_each_possible_encoder(connector, encoder)
6729 return encoder;
6730
6731 return NULL;
2b4c1c05
DV
6732}
6733
e7b07cee
HW
6734static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6735{
e7b07cee
HW
6736 struct drm_encoder *encoder;
6737 struct amdgpu_encoder *amdgpu_encoder;
6738
2b4c1c05 6739 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6740
6741 if (encoder == NULL)
6742 return;
6743
6744 amdgpu_encoder = to_amdgpu_encoder(encoder);
6745
6746 amdgpu_encoder->native_mode.clock = 0;
6747
6748 if (!list_empty(&connector->probed_modes)) {
6749 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6750
e7b07cee 6751 list_for_each_entry(preferred_mode,
b830ebc9
HW
6752 &connector->probed_modes,
6753 head) {
6754 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6755 amdgpu_encoder->native_mode = *preferred_mode;
6756
e7b07cee
HW
6757 break;
6758 }
6759
6760 }
6761}
6762
3ee6b26b
AD
6763static struct drm_display_mode *
6764amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6765 char *name,
6766 int hdisplay, int vdisplay)
e7b07cee
HW
6767{
6768 struct drm_device *dev = encoder->dev;
6769 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6770 struct drm_display_mode *mode = NULL;
6771 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6772
6773 mode = drm_mode_duplicate(dev, native_mode);
6774
b830ebc9 6775 if (mode == NULL)
e7b07cee
HW
6776 return NULL;
6777
6778 mode->hdisplay = hdisplay;
6779 mode->vdisplay = vdisplay;
6780 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6781 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6782
6783 return mode;
6784
6785}
6786
6787static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6788 struct drm_connector *connector)
e7b07cee
HW
6789{
6790 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6791 struct drm_display_mode *mode = NULL;
6792 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6793 struct amdgpu_dm_connector *amdgpu_dm_connector =
6794 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6795 int i;
6796 int n;
6797 struct mode_size {
6798 char name[DRM_DISPLAY_MODE_LEN];
6799 int w;
6800 int h;
b830ebc9 6801 } common_modes[] = {
e7b07cee
HW
6802 { "640x480", 640, 480},
6803 { "800x600", 800, 600},
6804 { "1024x768", 1024, 768},
6805 { "1280x720", 1280, 720},
6806 { "1280x800", 1280, 800},
6807 {"1280x1024", 1280, 1024},
6808 { "1440x900", 1440, 900},
6809 {"1680x1050", 1680, 1050},
6810 {"1600x1200", 1600, 1200},
6811 {"1920x1080", 1920, 1080},
6812 {"1920x1200", 1920, 1200}
6813 };
6814
b830ebc9 6815 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6816
6817 for (i = 0; i < n; i++) {
6818 struct drm_display_mode *curmode = NULL;
6819 bool mode_existed = false;
6820
6821 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6822 common_modes[i].h > native_mode->vdisplay ||
6823 (common_modes[i].w == native_mode->hdisplay &&
6824 common_modes[i].h == native_mode->vdisplay))
6825 continue;
e7b07cee
HW
6826
6827 list_for_each_entry(curmode, &connector->probed_modes, head) {
6828 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6829 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6830 mode_existed = true;
6831 break;
6832 }
6833 }
6834
6835 if (mode_existed)
6836 continue;
6837
6838 mode = amdgpu_dm_create_common_mode(encoder,
6839 common_modes[i].name, common_modes[i].w,
6840 common_modes[i].h);
6841 drm_mode_probed_add(connector, mode);
c84dec2f 6842 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6843 }
6844}
6845
3ee6b26b
AD
6846static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6847 struct edid *edid)
e7b07cee 6848{
c84dec2f
HW
6849 struct amdgpu_dm_connector *amdgpu_dm_connector =
6850 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6851
6852 if (edid) {
6853 /* empty probed_modes */
6854 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6855 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6856 drm_add_edid_modes(connector, edid);
6857
f1e5e913
YMM
6858 /* sorting the probed modes before calling function
6859 * amdgpu_dm_get_native_mode() since EDID can have
6860 * more than one preferred mode. The modes that are
6861 * later in the probed mode list could be of higher
6862 * and preferred resolution. For example, 3840x2160
6863 * resolution in base EDID preferred timing and 4096x2160
6864 * preferred resolution in DID extension block later.
6865 */
6866 drm_mode_sort(&connector->probed_modes);
e7b07cee 6867 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6868 } else {
c84dec2f 6869 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6870 }
e7b07cee
HW
6871}
6872
7578ecda 6873static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6874{
c84dec2f
HW
6875 struct amdgpu_dm_connector *amdgpu_dm_connector =
6876 to_amdgpu_dm_connector(connector);
e7b07cee 6877 struct drm_encoder *encoder;
c84dec2f 6878 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6879
2b4c1c05 6880 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6881
5c0e6840 6882 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
6883 amdgpu_dm_connector->num_modes =
6884 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6885 } else {
6886 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6887 amdgpu_dm_connector_add_common_modes(encoder, connector);
6888 }
3e332d3a 6889 amdgpu_dm_fbc_init(connector);
5099114b 6890
c84dec2f 6891 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6892}
6893
3ee6b26b
AD
6894void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6895 struct amdgpu_dm_connector *aconnector,
6896 int connector_type,
6897 struct dc_link *link,
6898 int link_index)
e7b07cee 6899{
1348969a 6900 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 6901
f04bee34
NK
6902 /*
6903 * Some of the properties below require access to state, like bpc.
6904 * Allocate some default initial connector state with our reset helper.
6905 */
6906 if (aconnector->base.funcs->reset)
6907 aconnector->base.funcs->reset(&aconnector->base);
6908
e7b07cee
HW
6909 aconnector->connector_id = link_index;
6910 aconnector->dc_link = link;
6911 aconnector->base.interlace_allowed = false;
6912 aconnector->base.doublescan_allowed = false;
6913 aconnector->base.stereo_allowed = false;
6914 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6915 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6916 aconnector->audio_inst = -1;
e7b07cee
HW
6917 mutex_init(&aconnector->hpd_lock);
6918
1f6010a9
DF
6919 /*
6920 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6921 * which means HPD hot plug not supported
6922 */
e7b07cee
HW
6923 switch (connector_type) {
6924 case DRM_MODE_CONNECTOR_HDMIA:
6925 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6926 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6927 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6928 break;
6929 case DRM_MODE_CONNECTOR_DisplayPort:
6930 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6931 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6932 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6933 break;
6934 case DRM_MODE_CONNECTOR_DVID:
6935 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6936 break;
6937 default:
6938 break;
6939 }
6940
6941 drm_object_attach_property(&aconnector->base.base,
6942 dm->ddev->mode_config.scaling_mode_property,
6943 DRM_MODE_SCALE_NONE);
6944
6945 drm_object_attach_property(&aconnector->base.base,
6946 adev->mode_info.underscan_property,
6947 UNDERSCAN_OFF);
6948 drm_object_attach_property(&aconnector->base.base,
6949 adev->mode_info.underscan_hborder_property,
6950 0);
6951 drm_object_attach_property(&aconnector->base.base,
6952 adev->mode_info.underscan_vborder_property,
6953 0);
1825fd34 6954
8c61b31e
JFZ
6955 if (!aconnector->mst_port)
6956 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6957
4a8ca46b
RL
6958 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6959 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6960 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6961
c1ee92f9 6962 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 6963 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
6964 drm_object_attach_property(&aconnector->base.base,
6965 adev->mode_info.abm_level_property, 0);
6966 }
bb47de73
NK
6967
6968 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6969 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6970 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6971 drm_object_attach_property(
6972 &aconnector->base.base,
6973 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6974
8c61b31e
JFZ
6975 if (!aconnector->mst_port)
6976 drm_connector_attach_vrr_capable_property(&aconnector->base);
6977
0c8620d6 6978#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6979 if (adev->dm.hdcp_workqueue)
53e108aa 6980 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6981#endif
bb47de73 6982 }
e7b07cee
HW
6983}
6984
7578ecda
AD
6985static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6986 struct i2c_msg *msgs, int num)
e7b07cee
HW
6987{
6988 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6989 struct ddc_service *ddc_service = i2c->ddc_service;
6990 struct i2c_command cmd;
6991 int i;
6992 int result = -EIO;
6993
b830ebc9 6994 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6995
6996 if (!cmd.payloads)
6997 return result;
6998
6999 cmd.number_of_payloads = num;
7000 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7001 cmd.speed = 100;
7002
7003 for (i = 0; i < num; i++) {
7004 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7005 cmd.payloads[i].address = msgs[i].addr;
7006 cmd.payloads[i].length = msgs[i].len;
7007 cmd.payloads[i].data = msgs[i].buf;
7008 }
7009
c85e6e54
DF
7010 if (dc_submit_i2c(
7011 ddc_service->ctx->dc,
7012 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7013 &cmd))
7014 result = num;
7015
7016 kfree(cmd.payloads);
7017 return result;
7018}
7019
7578ecda 7020static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7021{
7022 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7023}
7024
7025static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7026 .master_xfer = amdgpu_dm_i2c_xfer,
7027 .functionality = amdgpu_dm_i2c_func,
7028};
7029
3ee6b26b
AD
7030static struct amdgpu_i2c_adapter *
7031create_i2c(struct ddc_service *ddc_service,
7032 int link_index,
7033 int *res)
e7b07cee
HW
7034{
7035 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7036 struct amdgpu_i2c_adapter *i2c;
7037
b830ebc9 7038 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7039 if (!i2c)
7040 return NULL;
e7b07cee
HW
7041 i2c->base.owner = THIS_MODULE;
7042 i2c->base.class = I2C_CLASS_DDC;
7043 i2c->base.dev.parent = &adev->pdev->dev;
7044 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7045 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7046 i2c_set_adapdata(&i2c->base, i2c);
7047 i2c->ddc_service = ddc_service;
c85e6e54 7048 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7049
7050 return i2c;
7051}
7052
89fc8d4e 7053
1f6010a9
DF
7054/*
7055 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7056 * dc_link which will be represented by this aconnector.
7057 */
7578ecda
AD
7058static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7059 struct amdgpu_dm_connector *aconnector,
7060 uint32_t link_index,
7061 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7062{
7063 int res = 0;
7064 int connector_type;
7065 struct dc *dc = dm->dc;
7066 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7067 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7068
7069 link->priv = aconnector;
e7b07cee 7070
f1ad2f5e 7071 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7072
7073 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7074 if (!i2c) {
7075 DRM_ERROR("Failed to create i2c adapter data\n");
7076 return -ENOMEM;
7077 }
7078
e7b07cee
HW
7079 aconnector->i2c = i2c;
7080 res = i2c_add_adapter(&i2c->base);
7081
7082 if (res) {
7083 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7084 goto out_free;
7085 }
7086
7087 connector_type = to_drm_connector_type(link->connector_signal);
7088
17165de2 7089 res = drm_connector_init_with_ddc(
e7b07cee
HW
7090 dm->ddev,
7091 &aconnector->base,
7092 &amdgpu_dm_connector_funcs,
17165de2
AP
7093 connector_type,
7094 &i2c->base);
e7b07cee
HW
7095
7096 if (res) {
7097 DRM_ERROR("connector_init failed\n");
7098 aconnector->connector_id = -1;
7099 goto out_free;
7100 }
7101
7102 drm_connector_helper_add(
7103 &aconnector->base,
7104 &amdgpu_dm_connector_helper_funcs);
7105
7106 amdgpu_dm_connector_init_helper(
7107 dm,
7108 aconnector,
7109 connector_type,
7110 link,
7111 link_index);
7112
cde4c44d 7113 drm_connector_attach_encoder(
e7b07cee
HW
7114 &aconnector->base, &aencoder->base);
7115
e7b07cee
HW
7116 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7117 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7118 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7119
e7b07cee
HW
7120out_free:
7121 if (res) {
7122 kfree(i2c);
7123 aconnector->i2c = NULL;
7124 }
7125 return res;
7126}
7127
7128int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7129{
7130 switch (adev->mode_info.num_crtc) {
7131 case 1:
7132 return 0x1;
7133 case 2:
7134 return 0x3;
7135 case 3:
7136 return 0x7;
7137 case 4:
7138 return 0xf;
7139 case 5:
7140 return 0x1f;
7141 case 6:
7142 default:
7143 return 0x3f;
7144 }
7145}
7146
7578ecda
AD
7147static int amdgpu_dm_encoder_init(struct drm_device *dev,
7148 struct amdgpu_encoder *aencoder,
7149 uint32_t link_index)
e7b07cee 7150{
1348969a 7151 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7152
7153 int res = drm_encoder_init(dev,
7154 &aencoder->base,
7155 &amdgpu_dm_encoder_funcs,
7156 DRM_MODE_ENCODER_TMDS,
7157 NULL);
7158
7159 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7160
7161 if (!res)
7162 aencoder->encoder_id = link_index;
7163 else
7164 aencoder->encoder_id = -1;
7165
7166 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7167
7168 return res;
7169}
7170
3ee6b26b
AD
7171static void manage_dm_interrupts(struct amdgpu_device *adev,
7172 struct amdgpu_crtc *acrtc,
7173 bool enable)
e7b07cee
HW
7174{
7175 /*
8fe684e9
NK
7176 * We have no guarantee that the frontend index maps to the same
7177 * backend index - some even map to more than one.
7178 *
7179 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7180 */
7181 int irq_type =
734dd01d 7182 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7183 adev,
7184 acrtc->crtc_id);
7185
7186 if (enable) {
7187 drm_crtc_vblank_on(&acrtc->base);
7188 amdgpu_irq_get(
7189 adev,
7190 &adev->pageflip_irq,
7191 irq_type);
7192 } else {
7193
7194 amdgpu_irq_put(
7195 adev,
7196 &adev->pageflip_irq,
7197 irq_type);
7198 drm_crtc_vblank_off(&acrtc->base);
7199 }
7200}
7201
8fe684e9
NK
7202static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7203 struct amdgpu_crtc *acrtc)
7204{
7205 int irq_type =
7206 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7207
7208 /**
7209 * This reads the current state for the IRQ and force reapplies
7210 * the setting to hardware.
7211 */
7212 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7213}
7214
3ee6b26b
AD
7215static bool
7216is_scaling_state_different(const struct dm_connector_state *dm_state,
7217 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7218{
7219 if (dm_state->scaling != old_dm_state->scaling)
7220 return true;
7221 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7222 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7223 return true;
7224 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7225 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7226 return true;
b830ebc9
HW
7227 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7228 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7229 return true;
e7b07cee
HW
7230 return false;
7231}
7232
0c8620d6
BL
7233#ifdef CONFIG_DRM_AMD_DC_HDCP
7234static bool is_content_protection_different(struct drm_connector_state *state,
7235 const struct drm_connector_state *old_state,
7236 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7237{
7238 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7239 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7240
31c0ed90 7241 /* Handle: Type0/1 change */
53e108aa
BL
7242 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7243 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7244 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7245 return true;
7246 }
7247
31c0ed90
BL
7248 /* CP is being re enabled, ignore this
7249 *
7250 * Handles: ENABLED -> DESIRED
7251 */
0c8620d6
BL
7252 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7253 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7254 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7255 return false;
7256 }
7257
31c0ed90
BL
7258 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7259 *
7260 * Handles: UNDESIRED -> ENABLED
7261 */
0c8620d6
BL
7262 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7263 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7264 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7265
7266 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7267 * hot-plug, headless s3, dpms
31c0ed90
BL
7268 *
7269 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7270 */
97f6c917
BL
7271 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7272 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7273 dm_con_state->update_hdcp = false;
0c8620d6 7274 return true;
97f6c917 7275 }
0c8620d6 7276
31c0ed90
BL
7277 /*
7278 * Handles: UNDESIRED -> UNDESIRED
7279 * DESIRED -> DESIRED
7280 * ENABLED -> ENABLED
7281 */
0c8620d6
BL
7282 if (old_state->content_protection == state->content_protection)
7283 return false;
7284
31c0ed90
BL
7285 /*
7286 * Handles: UNDESIRED -> DESIRED
7287 * DESIRED -> UNDESIRED
7288 * ENABLED -> UNDESIRED
7289 */
97f6c917 7290 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7291 return true;
7292
31c0ed90
BL
7293 /*
7294 * Handles: DESIRED -> ENABLED
7295 */
0c8620d6
BL
7296 return false;
7297}
7298
0c8620d6 7299#endif
3ee6b26b
AD
7300static void remove_stream(struct amdgpu_device *adev,
7301 struct amdgpu_crtc *acrtc,
7302 struct dc_stream_state *stream)
e7b07cee
HW
7303{
7304 /* this is the update mode case */
e7b07cee
HW
7305
7306 acrtc->otg_inst = -1;
7307 acrtc->enabled = false;
7308}
7309
7578ecda
AD
7310static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7311 struct dc_cursor_position *position)
2a8f6ccb 7312{
f4c2cc43 7313 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7314 int x, y;
7315 int xorigin = 0, yorigin = 0;
7316
e371e19c
NK
7317 position->enable = false;
7318 position->x = 0;
7319 position->y = 0;
7320
7321 if (!crtc || !plane->state->fb)
2a8f6ccb 7322 return 0;
2a8f6ccb
HW
7323
7324 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7325 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7326 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7327 __func__,
7328 plane->state->crtc_w,
7329 plane->state->crtc_h);
7330 return -EINVAL;
7331 }
7332
7333 x = plane->state->crtc_x;
7334 y = plane->state->crtc_y;
c14a005c 7335
e371e19c
NK
7336 if (x <= -amdgpu_crtc->max_cursor_width ||
7337 y <= -amdgpu_crtc->max_cursor_height)
7338 return 0;
7339
2a8f6ccb
HW
7340 if (x < 0) {
7341 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7342 x = 0;
7343 }
7344 if (y < 0) {
7345 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7346 y = 0;
7347 }
7348 position->enable = true;
d243b6ff 7349 position->translate_by_source = true;
2a8f6ccb
HW
7350 position->x = x;
7351 position->y = y;
7352 position->x_hotspot = xorigin;
7353 position->y_hotspot = yorigin;
7354
7355 return 0;
7356}
7357
3ee6b26b
AD
7358static void handle_cursor_update(struct drm_plane *plane,
7359 struct drm_plane_state *old_plane_state)
e7b07cee 7360{
1348969a 7361 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7362 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7363 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7364 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7365 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7366 uint64_t address = afb ? afb->address : 0;
7367 struct dc_cursor_position position;
7368 struct dc_cursor_attributes attributes;
7369 int ret;
7370
e7b07cee
HW
7371 if (!plane->state->fb && !old_plane_state->fb)
7372 return;
7373
f1ad2f5e 7374 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7375 __func__,
7376 amdgpu_crtc->crtc_id,
7377 plane->state->crtc_w,
7378 plane->state->crtc_h);
2a8f6ccb
HW
7379
7380 ret = get_cursor_position(plane, crtc, &position);
7381 if (ret)
7382 return;
7383
7384 if (!position.enable) {
7385 /* turn off cursor */
674e78ac
NK
7386 if (crtc_state && crtc_state->stream) {
7387 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7388 dc_stream_set_cursor_position(crtc_state->stream,
7389 &position);
674e78ac
NK
7390 mutex_unlock(&adev->dm.dc_lock);
7391 }
2a8f6ccb 7392 return;
e7b07cee 7393 }
e7b07cee 7394
2a8f6ccb
HW
7395 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7396 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7397
c1cefe11 7398 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7399 attributes.address.high_part = upper_32_bits(address);
7400 attributes.address.low_part = lower_32_bits(address);
7401 attributes.width = plane->state->crtc_w;
7402 attributes.height = plane->state->crtc_h;
7403 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7404 attributes.rotation_angle = 0;
7405 attributes.attribute_flags.value = 0;
7406
7407 attributes.pitch = attributes.width;
7408
886daac9 7409 if (crtc_state->stream) {
674e78ac 7410 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7411 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7412 &attributes))
7413 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7414
2a8f6ccb
HW
7415 if (!dc_stream_set_cursor_position(crtc_state->stream,
7416 &position))
7417 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7418 mutex_unlock(&adev->dm.dc_lock);
886daac9 7419 }
2a8f6ccb 7420}
e7b07cee
HW
7421
7422static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7423{
7424
7425 assert_spin_locked(&acrtc->base.dev->event_lock);
7426 WARN_ON(acrtc->event);
7427
7428 acrtc->event = acrtc->base.state->event;
7429
7430 /* Set the flip status */
7431 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7432
7433 /* Mark this event as consumed */
7434 acrtc->base.state->event = NULL;
7435
7436 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7437 acrtc->crtc_id);
7438}
7439
bb47de73
NK
7440static void update_freesync_state_on_stream(
7441 struct amdgpu_display_manager *dm,
7442 struct dm_crtc_state *new_crtc_state,
180db303
NK
7443 struct dc_stream_state *new_stream,
7444 struct dc_plane_state *surface,
7445 u32 flip_timestamp_in_us)
bb47de73 7446{
09aef2c4 7447 struct mod_vrr_params vrr_params;
bb47de73 7448 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7449 struct amdgpu_device *adev = dm->adev;
585d450c 7450 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7451 unsigned long flags;
bb47de73
NK
7452
7453 if (!new_stream)
7454 return;
7455
7456 /*
7457 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7458 * For now it's sufficient to just guard against these conditions.
7459 */
7460
7461 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7462 return;
7463
4a580877 7464 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7465 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7466
180db303
NK
7467 if (surface) {
7468 mod_freesync_handle_preflip(
7469 dm->freesync_module,
7470 surface,
7471 new_stream,
7472 flip_timestamp_in_us,
7473 &vrr_params);
09aef2c4
MK
7474
7475 if (adev->family < AMDGPU_FAMILY_AI &&
7476 amdgpu_dm_vrr_active(new_crtc_state)) {
7477 mod_freesync_handle_v_update(dm->freesync_module,
7478 new_stream, &vrr_params);
e63e2491
EB
7479
7480 /* Need to call this before the frame ends. */
7481 dc_stream_adjust_vmin_vmax(dm->dc,
7482 new_crtc_state->stream,
7483 &vrr_params.adjust);
09aef2c4 7484 }
180db303 7485 }
bb47de73
NK
7486
7487 mod_freesync_build_vrr_infopacket(
7488 dm->freesync_module,
7489 new_stream,
180db303 7490 &vrr_params,
ecd0136b
HT
7491 PACKET_TYPE_VRR,
7492 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7493 &vrr_infopacket);
7494
8a48b44c 7495 new_crtc_state->freesync_timing_changed |=
585d450c 7496 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7497 &vrr_params.adjust,
7498 sizeof(vrr_params.adjust)) != 0);
bb47de73 7499
8a48b44c 7500 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7501 (memcmp(&new_crtc_state->vrr_infopacket,
7502 &vrr_infopacket,
7503 sizeof(vrr_infopacket)) != 0);
7504
585d450c 7505 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7506 new_crtc_state->vrr_infopacket = vrr_infopacket;
7507
585d450c 7508 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7509 new_stream->vrr_infopacket = vrr_infopacket;
7510
7511 if (new_crtc_state->freesync_vrr_info_changed)
7512 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7513 new_crtc_state->base.crtc->base.id,
7514 (int)new_crtc_state->base.vrr_enabled,
180db303 7515 (int)vrr_params.state);
09aef2c4 7516
4a580877 7517 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7518}
7519
585d450c 7520static void update_stream_irq_parameters(
e854194c
MK
7521 struct amdgpu_display_manager *dm,
7522 struct dm_crtc_state *new_crtc_state)
7523{
7524 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7525 struct mod_vrr_params vrr_params;
e854194c 7526 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7527 struct amdgpu_device *adev = dm->adev;
585d450c 7528 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7529 unsigned long flags;
e854194c
MK
7530
7531 if (!new_stream)
7532 return;
7533
7534 /*
7535 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7536 * For now it's sufficient to just guard against these conditions.
7537 */
7538 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7539 return;
7540
4a580877 7541 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7542 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7543
e854194c
MK
7544 if (new_crtc_state->vrr_supported &&
7545 config.min_refresh_in_uhz &&
7546 config.max_refresh_in_uhz) {
7547 config.state = new_crtc_state->base.vrr_enabled ?
7548 VRR_STATE_ACTIVE_VARIABLE :
7549 VRR_STATE_INACTIVE;
7550 } else {
7551 config.state = VRR_STATE_UNSUPPORTED;
7552 }
7553
7554 mod_freesync_build_vrr_params(dm->freesync_module,
7555 new_stream,
7556 &config, &vrr_params);
7557
7558 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7559 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7560 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7561
585d450c
AP
7562 new_crtc_state->freesync_config = config;
7563 /* Copy state for access from DM IRQ handler */
7564 acrtc->dm_irq_params.freesync_config = config;
7565 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7566 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7567 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7568}
7569
66b0c973
MK
7570static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7571 struct dm_crtc_state *new_state)
7572{
7573 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7574 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7575
7576 if (!old_vrr_active && new_vrr_active) {
7577 /* Transition VRR inactive -> active:
7578 * While VRR is active, we must not disable vblank irq, as a
7579 * reenable after disable would compute bogus vblank/pflip
7580 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7581 *
7582 * We also need vupdate irq for the actual core vblank handling
7583 * at end of vblank.
66b0c973 7584 */
d2574c33 7585 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7586 drm_crtc_vblank_get(new_state->base.crtc);
7587 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7588 __func__, new_state->base.crtc->base.id);
7589 } else if (old_vrr_active && !new_vrr_active) {
7590 /* Transition VRR active -> inactive:
7591 * Allow vblank irq disable again for fixed refresh rate.
7592 */
d2574c33 7593 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7594 drm_crtc_vblank_put(new_state->base.crtc);
7595 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7596 __func__, new_state->base.crtc->base.id);
7597 }
7598}
7599
8ad27806
NK
7600static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7601{
7602 struct drm_plane *plane;
7603 struct drm_plane_state *old_plane_state, *new_plane_state;
7604 int i;
7605
7606 /*
7607 * TODO: Make this per-stream so we don't issue redundant updates for
7608 * commits with multiple streams.
7609 */
7610 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7611 new_plane_state, i)
7612 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7613 handle_cursor_update(plane, old_plane_state);
7614}
7615
3be5262e 7616static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7617 struct dc_state *dc_state,
3ee6b26b
AD
7618 struct drm_device *dev,
7619 struct amdgpu_display_manager *dm,
7620 struct drm_crtc *pcrtc,
420cd472 7621 bool wait_for_vblank)
e7b07cee 7622{
570c91d5 7623 uint32_t i;
8a48b44c 7624 uint64_t timestamp_ns;
e7b07cee 7625 struct drm_plane *plane;
0bc9706d 7626 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7627 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7628 struct drm_crtc_state *new_pcrtc_state =
7629 drm_atomic_get_new_crtc_state(state, pcrtc);
7630 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7631 struct dm_crtc_state *dm_old_crtc_state =
7632 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7633 int planes_count = 0, vpos, hpos;
570c91d5 7634 long r;
e7b07cee 7635 unsigned long flags;
8a48b44c 7636 struct amdgpu_bo *abo;
fdd1fe57
MK
7637 uint32_t target_vblank, last_flip_vblank;
7638 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7639 bool pflip_present = false;
bc7f670e
DF
7640 struct {
7641 struct dc_surface_update surface_updates[MAX_SURFACES];
7642 struct dc_plane_info plane_infos[MAX_SURFACES];
7643 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7644 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7645 struct dc_stream_update stream_update;
74aa7bd4 7646 } *bundle;
bc7f670e 7647
74aa7bd4 7648 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7649
74aa7bd4
DF
7650 if (!bundle) {
7651 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7652 goto cleanup;
7653 }
e7b07cee 7654
8ad27806
NK
7655 /*
7656 * Disable the cursor first if we're disabling all the planes.
7657 * It'll remain on the screen after the planes are re-enabled
7658 * if we don't.
7659 */
7660 if (acrtc_state->active_planes == 0)
7661 amdgpu_dm_commit_cursors(state);
7662
e7b07cee 7663 /* update planes when needed */
0bc9706d
LSL
7664 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7665 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7666 struct drm_crtc_state *new_crtc_state;
0bc9706d 7667 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7668 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7669 bool plane_needs_flip;
c7af5f77 7670 struct dc_plane_state *dc_plane;
54d76575 7671 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7672
80c218d5
NK
7673 /* Cursor plane is handled after stream updates */
7674 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7675 continue;
e7b07cee 7676
f5ba60fe
DD
7677 if (!fb || !crtc || pcrtc != crtc)
7678 continue;
7679
7680 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7681 if (!new_crtc_state->active)
e7b07cee
HW
7682 continue;
7683
bc7f670e 7684 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7685
74aa7bd4 7686 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7687 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7688 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7689 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7690 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7691 }
8a48b44c 7692
695af5f9
NK
7693 fill_dc_scaling_info(new_plane_state,
7694 &bundle->scaling_infos[planes_count]);
8a48b44c 7695
695af5f9
NK
7696 bundle->surface_updates[planes_count].scaling_info =
7697 &bundle->scaling_infos[planes_count];
8a48b44c 7698
f5031000 7699 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7700
f5031000 7701 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7702
f5031000
DF
7703 if (!plane_needs_flip) {
7704 planes_count += 1;
7705 continue;
7706 }
8a48b44c 7707
2fac0f53
CK
7708 abo = gem_to_amdgpu_bo(fb->obj[0]);
7709
f8308898
AG
7710 /*
7711 * Wait for all fences on this FB. Do limited wait to avoid
7712 * deadlock during GPU reset when this fence will not signal
7713 * but we hold reservation lock for the BO.
7714 */
52791eee 7715 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7716 false,
f8308898
AG
7717 msecs_to_jiffies(5000));
7718 if (unlikely(r <= 0))
ed8a5fb2 7719 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7720
695af5f9 7721 fill_dc_plane_info_and_addr(
8ce5d842 7722 dm->adev, new_plane_state,
6eed95b0 7723 afb->tiling_flags,
695af5f9 7724 &bundle->plane_infos[planes_count],
87b7ebc2 7725 &bundle->flip_addrs[planes_count].address,
6eed95b0 7726 afb->tmz_surface, false);
87b7ebc2
RS
7727
7728 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7729 new_plane_state->plane->index,
7730 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7731
7732 bundle->surface_updates[planes_count].plane_info =
7733 &bundle->plane_infos[planes_count];
8a48b44c 7734
caff0e66
NK
7735 /*
7736 * Only allow immediate flips for fast updates that don't
7737 * change FB pitch, DCC state, rotation or mirroing.
7738 */
f5031000 7739 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7740 crtc->state->async_flip &&
caff0e66 7741 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7742
f5031000
DF
7743 timestamp_ns = ktime_get_ns();
7744 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7745 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7746 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7747
f5031000
DF
7748 if (!bundle->surface_updates[planes_count].surface) {
7749 DRM_ERROR("No surface for CRTC: id=%d\n",
7750 acrtc_attach->crtc_id);
7751 continue;
bc7f670e
DF
7752 }
7753
f5031000
DF
7754 if (plane == pcrtc->primary)
7755 update_freesync_state_on_stream(
7756 dm,
7757 acrtc_state,
7758 acrtc_state->stream,
7759 dc_plane,
7760 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7761
f5031000
DF
7762 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7763 __func__,
7764 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7765 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7766
7767 planes_count += 1;
7768
8a48b44c
DF
7769 }
7770
74aa7bd4 7771 if (pflip_present) {
634092b1
MK
7772 if (!vrr_active) {
7773 /* Use old throttling in non-vrr fixed refresh rate mode
7774 * to keep flip scheduling based on target vblank counts
7775 * working in a backwards compatible way, e.g., for
7776 * clients using the GLX_OML_sync_control extension or
7777 * DRI3/Present extension with defined target_msc.
7778 */
e3eff4b5 7779 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7780 }
7781 else {
7782 /* For variable refresh rate mode only:
7783 * Get vblank of last completed flip to avoid > 1 vrr
7784 * flips per video frame by use of throttling, but allow
7785 * flip programming anywhere in the possibly large
7786 * variable vrr vblank interval for fine-grained flip
7787 * timing control and more opportunity to avoid stutter
7788 * on late submission of flips.
7789 */
7790 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7791 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7792 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7793 }
7794
fdd1fe57 7795 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7796
7797 /*
7798 * Wait until we're out of the vertical blank period before the one
7799 * targeted by the flip
7800 */
7801 while ((acrtc_attach->enabled &&
7802 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7803 0, &vpos, &hpos, NULL,
7804 NULL, &pcrtc->hwmode)
7805 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7806 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7807 (int)(target_vblank -
e3eff4b5 7808 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7809 usleep_range(1000, 1100);
7810 }
7811
8fe684e9
NK
7812 /**
7813 * Prepare the flip event for the pageflip interrupt to handle.
7814 *
7815 * This only works in the case where we've already turned on the
7816 * appropriate hardware blocks (eg. HUBP) so in the transition case
7817 * from 0 -> n planes we have to skip a hardware generated event
7818 * and rely on sending it from software.
7819 */
7820 if (acrtc_attach->base.state->event &&
7821 acrtc_state->active_planes > 0) {
8a48b44c
DF
7822 drm_crtc_vblank_get(pcrtc);
7823
7824 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7825
7826 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7827 prepare_flip_isr(acrtc_attach);
7828
7829 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7830 }
7831
7832 if (acrtc_state->stream) {
8a48b44c 7833 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7834 bundle->stream_update.vrr_infopacket =
8a48b44c 7835 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7836 }
e7b07cee
HW
7837 }
7838
bc92c065 7839 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7840 if ((planes_count || acrtc_state->active_planes == 0) &&
7841 acrtc_state->stream) {
b6e881c9 7842 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7843 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7844 bundle->stream_update.src = acrtc_state->stream->src;
7845 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7846 }
7847
cf020d49
NK
7848 if (new_pcrtc_state->color_mgmt_changed) {
7849 /*
7850 * TODO: This isn't fully correct since we've actually
7851 * already modified the stream in place.
7852 */
7853 bundle->stream_update.gamut_remap =
7854 &acrtc_state->stream->gamut_remap_matrix;
7855 bundle->stream_update.output_csc_transform =
7856 &acrtc_state->stream->csc_color_matrix;
7857 bundle->stream_update.out_transfer_func =
7858 acrtc_state->stream->out_transfer_func;
7859 }
bc7f670e 7860
8a48b44c 7861 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7862 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7863 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7864
e63e2491
EB
7865 /*
7866 * If FreeSync state on the stream has changed then we need to
7867 * re-adjust the min/max bounds now that DC doesn't handle this
7868 * as part of commit.
7869 */
7870 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7871 amdgpu_dm_vrr_active(acrtc_state)) {
7872 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7873 dc_stream_adjust_vmin_vmax(
7874 dm->dc, acrtc_state->stream,
585d450c 7875 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7876 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7877 }
bc7f670e 7878 mutex_lock(&dm->dc_lock);
8c322309 7879 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7880 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7881 amdgpu_dm_psr_disable(acrtc_state->stream);
7882
bc7f670e 7883 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7884 bundle->surface_updates,
bc7f670e
DF
7885 planes_count,
7886 acrtc_state->stream,
74aa7bd4 7887 &bundle->stream_update,
bc7f670e 7888 dc_state);
8c322309 7889
8fe684e9
NK
7890 /**
7891 * Enable or disable the interrupts on the backend.
7892 *
7893 * Most pipes are put into power gating when unused.
7894 *
7895 * When power gating is enabled on a pipe we lose the
7896 * interrupt enablement state when power gating is disabled.
7897 *
7898 * So we need to update the IRQ control state in hardware
7899 * whenever the pipe turns on (since it could be previously
7900 * power gated) or off (since some pipes can't be power gated
7901 * on some ASICs).
7902 */
7903 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
7904 dm_update_pflip_irq_state(drm_to_adev(dev),
7905 acrtc_attach);
8fe684e9 7906
8c322309 7907 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7908 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7909 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7910 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7911 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7912 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7913 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7914 amdgpu_dm_psr_enable(acrtc_state->stream);
7915 }
7916
bc7f670e 7917 mutex_unlock(&dm->dc_lock);
e7b07cee 7918 }
4b510503 7919
8ad27806
NK
7920 /*
7921 * Update cursor state *after* programming all the planes.
7922 * This avoids redundant programming in the case where we're going
7923 * to be disabling a single plane - those pipes are being disabled.
7924 */
7925 if (acrtc_state->active_planes)
7926 amdgpu_dm_commit_cursors(state);
80c218d5 7927
4b510503 7928cleanup:
74aa7bd4 7929 kfree(bundle);
e7b07cee
HW
7930}
7931
6ce8f316
NK
7932static void amdgpu_dm_commit_audio(struct drm_device *dev,
7933 struct drm_atomic_state *state)
7934{
1348969a 7935 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
7936 struct amdgpu_dm_connector *aconnector;
7937 struct drm_connector *connector;
7938 struct drm_connector_state *old_con_state, *new_con_state;
7939 struct drm_crtc_state *new_crtc_state;
7940 struct dm_crtc_state *new_dm_crtc_state;
7941 const struct dc_stream_status *status;
7942 int i, inst;
7943
7944 /* Notify device removals. */
7945 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7946 if (old_con_state->crtc != new_con_state->crtc) {
7947 /* CRTC changes require notification. */
7948 goto notify;
7949 }
7950
7951 if (!new_con_state->crtc)
7952 continue;
7953
7954 new_crtc_state = drm_atomic_get_new_crtc_state(
7955 state, new_con_state->crtc);
7956
7957 if (!new_crtc_state)
7958 continue;
7959
7960 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7961 continue;
7962
7963 notify:
7964 aconnector = to_amdgpu_dm_connector(connector);
7965
7966 mutex_lock(&adev->dm.audio_lock);
7967 inst = aconnector->audio_inst;
7968 aconnector->audio_inst = -1;
7969 mutex_unlock(&adev->dm.audio_lock);
7970
7971 amdgpu_dm_audio_eld_notify(adev, inst);
7972 }
7973
7974 /* Notify audio device additions. */
7975 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7976 if (!new_con_state->crtc)
7977 continue;
7978
7979 new_crtc_state = drm_atomic_get_new_crtc_state(
7980 state, new_con_state->crtc);
7981
7982 if (!new_crtc_state)
7983 continue;
7984
7985 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7986 continue;
7987
7988 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7989 if (!new_dm_crtc_state->stream)
7990 continue;
7991
7992 status = dc_stream_get_status(new_dm_crtc_state->stream);
7993 if (!status)
7994 continue;
7995
7996 aconnector = to_amdgpu_dm_connector(connector);
7997
7998 mutex_lock(&adev->dm.audio_lock);
7999 inst = status->audio_inst;
8000 aconnector->audio_inst = inst;
8001 mutex_unlock(&adev->dm.audio_lock);
8002
8003 amdgpu_dm_audio_eld_notify(adev, inst);
8004 }
8005}
8006
1f6010a9 8007/*
27b3f4fc
LSL
8008 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8009 * @crtc_state: the DRM CRTC state
8010 * @stream_state: the DC stream state.
8011 *
8012 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8013 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8014 */
8015static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8016 struct dc_stream_state *stream_state)
8017{
b9952f93 8018 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8019}
e7b07cee 8020
7578ecda
AD
8021static int amdgpu_dm_atomic_commit(struct drm_device *dev,
8022 struct drm_atomic_state *state,
8023 bool nonblock)
e7b07cee 8024{
1f6010a9
DF
8025 /*
8026 * Add check here for SoC's that support hardware cursor plane, to
8027 * unset legacy_cursor_update
8028 */
e7b07cee
HW
8029
8030 return drm_atomic_helper_commit(dev, state, nonblock);
8031
8032 /*TODO Handle EINTR, reenable IRQ*/
8033}
8034
b8592b48
LL
8035/**
8036 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8037 * @state: The atomic state to commit
8038 *
8039 * This will tell DC to commit the constructed DC state from atomic_check,
8040 * programming the hardware. Any failures here implies a hardware failure, since
8041 * atomic check should have filtered anything non-kosher.
8042 */
7578ecda 8043static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8044{
8045 struct drm_device *dev = state->dev;
1348969a 8046 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8047 struct amdgpu_display_manager *dm = &adev->dm;
8048 struct dm_atomic_state *dm_state;
eb3dc897 8049 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8050 uint32_t i, j;
5cc6dcbd 8051 struct drm_crtc *crtc;
0bc9706d 8052 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8053 unsigned long flags;
8054 bool wait_for_vblank = true;
8055 struct drm_connector *connector;
c2cea706 8056 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8057 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8058 int crtc_disable_count = 0;
6ee90e88 8059 bool mode_set_reset_required = false;
e7b07cee 8060
e8a98235
RS
8061 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8062
e7b07cee
HW
8063 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8064
eb3dc897
NK
8065 dm_state = dm_atomic_get_new_state(state);
8066 if (dm_state && dm_state->context) {
8067 dc_state = dm_state->context;
8068 } else {
8069 /* No state changes, retain current state. */
813d20dc 8070 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8071 ASSERT(dc_state_temp);
8072 dc_state = dc_state_temp;
8073 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8074 }
e7b07cee 8075
6d90a208
AP
8076 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8077 new_crtc_state, i) {
8078 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8079
8080 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8081
8082 if (old_crtc_state->active &&
8083 (!new_crtc_state->active ||
8084 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8085 manage_dm_interrupts(adev, acrtc, false);
8086 dc_stream_release(dm_old_crtc_state->stream);
8087 }
8088 }
8089
8976f73b
RS
8090 drm_atomic_helper_calc_timestamping_constants(state);
8091
e7b07cee 8092 /* update changed items */
0bc9706d 8093 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8094 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8095
54d76575
LSL
8096 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8097 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8098
f1ad2f5e 8099 DRM_DEBUG_DRIVER(
e7b07cee
HW
8100 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8101 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8102 "connectors_changed:%d\n",
8103 acrtc->crtc_id,
0bc9706d
LSL
8104 new_crtc_state->enable,
8105 new_crtc_state->active,
8106 new_crtc_state->planes_changed,
8107 new_crtc_state->mode_changed,
8108 new_crtc_state->active_changed,
8109 new_crtc_state->connectors_changed);
e7b07cee 8110
5c68c652
VL
8111 /* Disable cursor if disabling crtc */
8112 if (old_crtc_state->active && !new_crtc_state->active) {
8113 struct dc_cursor_position position;
8114
8115 memset(&position, 0, sizeof(position));
8116 mutex_lock(&dm->dc_lock);
8117 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8118 mutex_unlock(&dm->dc_lock);
8119 }
8120
27b3f4fc
LSL
8121 /* Copy all transient state flags into dc state */
8122 if (dm_new_crtc_state->stream) {
8123 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8124 dm_new_crtc_state->stream);
8125 }
8126
e7b07cee
HW
8127 /* handles headless hotplug case, updating new_state and
8128 * aconnector as needed
8129 */
8130
54d76575 8131 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8132
f1ad2f5e 8133 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8134
54d76575 8135 if (!dm_new_crtc_state->stream) {
e7b07cee 8136 /*
b830ebc9
HW
8137 * this could happen because of issues with
8138 * userspace notifications delivery.
8139 * In this case userspace tries to set mode on
1f6010a9
DF
8140 * display which is disconnected in fact.
8141 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8142 * We expect reset mode will come soon.
8143 *
8144 * This can also happen when unplug is done
8145 * during resume sequence ended
8146 *
8147 * In this case, we want to pretend we still
8148 * have a sink to keep the pipe running so that
8149 * hw state is consistent with the sw state
8150 */
f1ad2f5e 8151 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8152 __func__, acrtc->base.base.id);
8153 continue;
8154 }
8155
54d76575
LSL
8156 if (dm_old_crtc_state->stream)
8157 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8158
97028037
LP
8159 pm_runtime_get_noresume(dev->dev);
8160
e7b07cee 8161 acrtc->enabled = true;
0bc9706d
LSL
8162 acrtc->hw_mode = new_crtc_state->mode;
8163 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8164 mode_set_reset_required = true;
0bc9706d 8165 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8166 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8167 /* i.e. reset mode */
6ee90e88 8168 if (dm_old_crtc_state->stream)
54d76575 8169 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8170 mode_set_reset_required = true;
e7b07cee
HW
8171 }
8172 } /* for_each_crtc_in_state() */
8173
eb3dc897 8174 if (dc_state) {
6ee90e88 8175 /* if there mode set or reset, disable eDP PSR */
8176 if (mode_set_reset_required)
8177 amdgpu_dm_psr_disable_all(dm);
8178
eb3dc897 8179 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8180 mutex_lock(&dm->dc_lock);
eb3dc897 8181 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8182 mutex_unlock(&dm->dc_lock);
fa2123db 8183 }
e7b07cee 8184
0bc9706d 8185 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8186 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8187
54d76575 8188 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8189
54d76575 8190 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8191 const struct dc_stream_status *status =
54d76575 8192 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8193
eb3dc897 8194 if (!status)
09f609c3
LL
8195 status = dc_stream_get_status_from_state(dc_state,
8196 dm_new_crtc_state->stream);
e7b07cee 8197 if (!status)
54d76575 8198 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8199 else
8200 acrtc->otg_inst = status->primary_otg_inst;
8201 }
8202 }
0c8620d6
BL
8203#ifdef CONFIG_DRM_AMD_DC_HDCP
8204 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8205 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8206 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8207 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8208
8209 new_crtc_state = NULL;
8210
8211 if (acrtc)
8212 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8213
8214 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8215
8216 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8217 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8218 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8219 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8220 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8221 continue;
8222 }
8223
8224 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8225 hdcp_update_display(
8226 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8227 new_con_state->hdcp_content_type,
b1abe558
BL
8228 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8229 : false);
0c8620d6
BL
8230 }
8231#endif
e7b07cee 8232
02d6a6fc 8233 /* Handle connector state changes */
c2cea706 8234 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8235 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8236 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8237 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
8238 struct dc_surface_update dummy_updates[MAX_SURFACES];
8239 struct dc_stream_update stream_update;
b232d4ed 8240 struct dc_info_packet hdr_packet;
e7b07cee 8241 struct dc_stream_status *status = NULL;
b232d4ed 8242 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8243
19afd799
NC
8244 memset(&dummy_updates, 0, sizeof(dummy_updates));
8245 memset(&stream_update, 0, sizeof(stream_update));
8246
44d09c6a 8247 if (acrtc) {
0bc9706d 8248 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8249 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8250 }
0bc9706d 8251
e7b07cee 8252 /* Skip any modesets/resets */
0bc9706d 8253 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8254 continue;
8255
54d76575 8256 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8257 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8258
b232d4ed
NK
8259 scaling_changed = is_scaling_state_different(dm_new_con_state,
8260 dm_old_con_state);
8261
8262 abm_changed = dm_new_crtc_state->abm_level !=
8263 dm_old_crtc_state->abm_level;
8264
8265 hdr_changed =
8266 is_hdr_metadata_different(old_con_state, new_con_state);
8267
8268 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8269 continue;
e7b07cee 8270
b6e881c9 8271 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8272 if (scaling_changed) {
02d6a6fc 8273 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8274 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8275
02d6a6fc
DF
8276 stream_update.src = dm_new_crtc_state->stream->src;
8277 stream_update.dst = dm_new_crtc_state->stream->dst;
8278 }
8279
b232d4ed 8280 if (abm_changed) {
02d6a6fc
DF
8281 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8282
8283 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8284 }
70e8ffc5 8285
b232d4ed
NK
8286 if (hdr_changed) {
8287 fill_hdr_info_packet(new_con_state, &hdr_packet);
8288 stream_update.hdr_static_metadata = &hdr_packet;
8289 }
8290
54d76575 8291 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8292 WARN_ON(!status);
3be5262e 8293 WARN_ON(!status->plane_count);
e7b07cee 8294
02d6a6fc
DF
8295 /*
8296 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8297 * Here we create an empty update on each plane.
8298 * To fix this, DC should permit updating only stream properties.
8299 */
8300 for (j = 0; j < status->plane_count; j++)
8301 dummy_updates[j].surface = status->plane_states[0];
8302
8303
8304 mutex_lock(&dm->dc_lock);
8305 dc_commit_updates_for_stream(dm->dc,
8306 dummy_updates,
8307 status->plane_count,
8308 dm_new_crtc_state->stream,
8309 &stream_update,
8310 dc_state);
8311 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8312 }
8313
b5e83f6f 8314 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8315 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8316 new_crtc_state, i) {
fe2a1965
LP
8317 if (old_crtc_state->active && !new_crtc_state->active)
8318 crtc_disable_count++;
8319
54d76575 8320 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8321 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8322
585d450c
AP
8323 /* For freesync config update on crtc state and params for irq */
8324 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8325
66b0c973
MK
8326 /* Handle vrr on->off / off->on transitions */
8327 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8328 dm_new_crtc_state);
e7b07cee
HW
8329 }
8330
8fe684e9
NK
8331 /**
8332 * Enable interrupts for CRTCs that are newly enabled or went through
8333 * a modeset. It was intentionally deferred until after the front end
8334 * state was modified to wait until the OTG was on and so the IRQ
8335 * handlers didn't access stale or invalid state.
8336 */
8337 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8338 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
c920888c 8339 bool configure_crc = false;
8fe684e9 8340
585d450c
AP
8341 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8342
8fe684e9
NK
8343 if (new_crtc_state->active &&
8344 (!old_crtc_state->active ||
8345 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8346 dc_stream_retain(dm_new_crtc_state->stream);
8347 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8348 manage_dm_interrupts(adev, acrtc, true);
c920888c 8349 }
8fe684e9 8350#ifdef CONFIG_DEBUG_FS
c920888c
WL
8351 if (new_crtc_state->active &&
8352 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8fe684e9
NK
8353 /**
8354 * Frontend may have changed so reapply the CRC capture
8355 * settings for the stream.
8356 */
8357 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8358 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8fe684e9 8359
c920888c
WL
8360 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8361 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8362 configure_crc = true;
8363 } else {
8364 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8365 configure_crc = true;
8fe684e9 8366 }
c920888c
WL
8367
8368 if (configure_crc)
8369 amdgpu_dm_crtc_configure_crc_source(
8370 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8fe684e9 8371 }
c920888c 8372#endif
8fe684e9 8373 }
e7b07cee 8374
420cd472 8375 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8376 if (new_crtc_state->async_flip)
420cd472
DF
8377 wait_for_vblank = false;
8378
e7b07cee 8379 /* update planes when needed per crtc*/
5cc6dcbd 8380 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8381 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8382
54d76575 8383 if (dm_new_crtc_state->stream)
eb3dc897 8384 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8385 dm, crtc, wait_for_vblank);
e7b07cee
HW
8386 }
8387
6ce8f316
NK
8388 /* Update audio instances for each connector. */
8389 amdgpu_dm_commit_audio(dev, state);
8390
e7b07cee
HW
8391 /*
8392 * send vblank event on all events not handled in flip and
8393 * mark consumed event for drm_atomic_helper_commit_hw_done
8394 */
4a580877 8395 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8396 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8397
0bc9706d
LSL
8398 if (new_crtc_state->event)
8399 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8400
0bc9706d 8401 new_crtc_state->event = NULL;
e7b07cee 8402 }
4a580877 8403 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8404
29c8f234
LL
8405 /* Signal HW programming completion */
8406 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8407
8408 if (wait_for_vblank)
320a1274 8409 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8410
8411 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8412
1f6010a9
DF
8413 /*
8414 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8415 * so we can put the GPU into runtime suspend if we're not driving any
8416 * displays anymore
8417 */
fe2a1965
LP
8418 for (i = 0; i < crtc_disable_count; i++)
8419 pm_runtime_put_autosuspend(dev->dev);
97028037 8420 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8421
8422 if (dc_state_temp)
8423 dc_release_state(dc_state_temp);
e7b07cee
HW
8424}
8425
8426
8427static int dm_force_atomic_commit(struct drm_connector *connector)
8428{
8429 int ret = 0;
8430 struct drm_device *ddev = connector->dev;
8431 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8432 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8433 struct drm_plane *plane = disconnected_acrtc->base.primary;
8434 struct drm_connector_state *conn_state;
8435 struct drm_crtc_state *crtc_state;
8436 struct drm_plane_state *plane_state;
8437
8438 if (!state)
8439 return -ENOMEM;
8440
8441 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8442
8443 /* Construct an atomic state to restore previous display setting */
8444
8445 /*
8446 * Attach connectors to drm_atomic_state
8447 */
8448 conn_state = drm_atomic_get_connector_state(state, connector);
8449
8450 ret = PTR_ERR_OR_ZERO(conn_state);
8451 if (ret)
8452 goto err;
8453
8454 /* Attach crtc to drm_atomic_state*/
8455 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8456
8457 ret = PTR_ERR_OR_ZERO(crtc_state);
8458 if (ret)
8459 goto err;
8460
8461 /* force a restore */
8462 crtc_state->mode_changed = true;
8463
8464 /* Attach plane to drm_atomic_state */
8465 plane_state = drm_atomic_get_plane_state(state, plane);
8466
8467 ret = PTR_ERR_OR_ZERO(plane_state);
8468 if (ret)
8469 goto err;
8470
8471
8472 /* Call commit internally with the state we just constructed */
8473 ret = drm_atomic_commit(state);
8474 if (!ret)
8475 return 0;
8476
8477err:
8478 DRM_ERROR("Restoring old state failed with %i\n", ret);
8479 drm_atomic_state_put(state);
8480
8481 return ret;
8482}
8483
8484/*
1f6010a9
DF
8485 * This function handles all cases when set mode does not come upon hotplug.
8486 * This includes when a display is unplugged then plugged back into the
8487 * same port and when running without usermode desktop manager supprot
e7b07cee 8488 */
3ee6b26b
AD
8489void dm_restore_drm_connector_state(struct drm_device *dev,
8490 struct drm_connector *connector)
e7b07cee 8491{
c84dec2f 8492 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8493 struct amdgpu_crtc *disconnected_acrtc;
8494 struct dm_crtc_state *acrtc_state;
8495
8496 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8497 return;
8498
8499 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8500 if (!disconnected_acrtc)
8501 return;
e7b07cee 8502
70e8ffc5
HW
8503 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8504 if (!acrtc_state->stream)
e7b07cee
HW
8505 return;
8506
8507 /*
8508 * If the previous sink is not released and different from the current,
8509 * we deduce we are in a state where we can not rely on usermode call
8510 * to turn on the display, so we do it here
8511 */
8512 if (acrtc_state->stream->sink != aconnector->dc_sink)
8513 dm_force_atomic_commit(&aconnector->base);
8514}
8515
1f6010a9 8516/*
e7b07cee
HW
8517 * Grabs all modesetting locks to serialize against any blocking commits,
8518 * Waits for completion of all non blocking commits.
8519 */
3ee6b26b
AD
8520static int do_aquire_global_lock(struct drm_device *dev,
8521 struct drm_atomic_state *state)
e7b07cee
HW
8522{
8523 struct drm_crtc *crtc;
8524 struct drm_crtc_commit *commit;
8525 long ret;
8526
1f6010a9
DF
8527 /*
8528 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8529 * ensure that when the framework release it the
8530 * extra locks we are locking here will get released to
8531 */
8532 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8533 if (ret)
8534 return ret;
8535
8536 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8537 spin_lock(&crtc->commit_lock);
8538 commit = list_first_entry_or_null(&crtc->commit_list,
8539 struct drm_crtc_commit, commit_entry);
8540 if (commit)
8541 drm_crtc_commit_get(commit);
8542 spin_unlock(&crtc->commit_lock);
8543
8544 if (!commit)
8545 continue;
8546
1f6010a9
DF
8547 /*
8548 * Make sure all pending HW programming completed and
e7b07cee
HW
8549 * page flips done
8550 */
8551 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8552
8553 if (ret > 0)
8554 ret = wait_for_completion_interruptible_timeout(
8555 &commit->flip_done, 10*HZ);
8556
8557 if (ret == 0)
8558 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8559 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8560
8561 drm_crtc_commit_put(commit);
8562 }
8563
8564 return ret < 0 ? ret : 0;
8565}
8566
bb47de73
NK
8567static void get_freesync_config_for_crtc(
8568 struct dm_crtc_state *new_crtc_state,
8569 struct dm_connector_state *new_con_state)
98e6436d
AK
8570{
8571 struct mod_freesync_config config = {0};
98e6436d
AK
8572 struct amdgpu_dm_connector *aconnector =
8573 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8574 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8575 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8576
a057ec46 8577 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8578 vrefresh >= aconnector->min_vfreq &&
8579 vrefresh <= aconnector->max_vfreq;
bb47de73 8580
a057ec46
IB
8581 if (new_crtc_state->vrr_supported) {
8582 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8583 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8584 VRR_STATE_ACTIVE_VARIABLE :
8585 VRR_STATE_INACTIVE;
8586 config.min_refresh_in_uhz =
8587 aconnector->min_vfreq * 1000000;
8588 config.max_refresh_in_uhz =
8589 aconnector->max_vfreq * 1000000;
69ff8845 8590 config.vsif_supported = true;
180db303 8591 config.btr = true;
98e6436d
AK
8592 }
8593
bb47de73
NK
8594 new_crtc_state->freesync_config = config;
8595}
98e6436d 8596
bb47de73
NK
8597static void reset_freesync_config_for_crtc(
8598 struct dm_crtc_state *new_crtc_state)
8599{
8600 new_crtc_state->vrr_supported = false;
98e6436d 8601
bb47de73
NK
8602 memset(&new_crtc_state->vrr_infopacket, 0,
8603 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8604}
8605
4b9674e5
LL
8606static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8607 struct drm_atomic_state *state,
8608 struct drm_crtc *crtc,
8609 struct drm_crtc_state *old_crtc_state,
8610 struct drm_crtc_state *new_crtc_state,
8611 bool enable,
8612 bool *lock_and_validation_needed)
e7b07cee 8613{
eb3dc897 8614 struct dm_atomic_state *dm_state = NULL;
54d76575 8615 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8616 struct dc_stream_state *new_stream;
62f55537 8617 int ret = 0;
d4d4a645 8618
1f6010a9
DF
8619 /*
8620 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8621 * update changed items
8622 */
4b9674e5
LL
8623 struct amdgpu_crtc *acrtc = NULL;
8624 struct amdgpu_dm_connector *aconnector = NULL;
8625 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8626 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8627
4b9674e5 8628 new_stream = NULL;
9635b754 8629
4b9674e5
LL
8630 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8631 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8632 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8633 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8634
4b9674e5
LL
8635 /* TODO This hack should go away */
8636 if (aconnector && enable) {
8637 /* Make sure fake sink is created in plug-in scenario */
8638 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8639 &aconnector->base);
8640 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8641 &aconnector->base);
19f89e23 8642
4b9674e5
LL
8643 if (IS_ERR(drm_new_conn_state)) {
8644 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8645 goto fail;
8646 }
19f89e23 8647
4b9674e5
LL
8648 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8649 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8650
02d35a67
JFZ
8651 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8652 goto skip_modeset;
8653
cbd14ae7
SW
8654 new_stream = create_validate_stream_for_sink(aconnector,
8655 &new_crtc_state->mode,
8656 dm_new_conn_state,
8657 dm_old_crtc_state->stream);
19f89e23 8658
4b9674e5
LL
8659 /*
8660 * we can have no stream on ACTION_SET if a display
8661 * was disconnected during S3, in this case it is not an
8662 * error, the OS will be updated after detection, and
8663 * will do the right thing on next atomic commit
8664 */
19f89e23 8665
4b9674e5
LL
8666 if (!new_stream) {
8667 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8668 __func__, acrtc->base.base.id);
8669 ret = -ENOMEM;
8670 goto fail;
8671 }
e7b07cee 8672
3d4e52d0
VL
8673 /*
8674 * TODO: Check VSDB bits to decide whether this should
8675 * be enabled or not.
8676 */
8677 new_stream->triggered_crtc_reset.enabled =
8678 dm->force_timing_sync;
8679
4b9674e5 8680 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8681
88694af9
NK
8682 ret = fill_hdr_info_packet(drm_new_conn_state,
8683 &new_stream->hdr_static_metadata);
8684 if (ret)
8685 goto fail;
8686
7e930949
NK
8687 /*
8688 * If we already removed the old stream from the context
8689 * (and set the new stream to NULL) then we can't reuse
8690 * the old stream even if the stream and scaling are unchanged.
8691 * We'll hit the BUG_ON and black screen.
8692 *
8693 * TODO: Refactor this function to allow this check to work
8694 * in all conditions.
8695 */
8696 if (dm_new_crtc_state->stream &&
8697 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8698 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8699 new_crtc_state->mode_changed = false;
8700 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8701 new_crtc_state->mode_changed);
62f55537 8702 }
4b9674e5 8703 }
b830ebc9 8704
02d35a67 8705 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8706 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8707 goto skip_modeset;
e7b07cee 8708
4b9674e5
LL
8709 DRM_DEBUG_DRIVER(
8710 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8711 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8712 "connectors_changed:%d\n",
8713 acrtc->crtc_id,
8714 new_crtc_state->enable,
8715 new_crtc_state->active,
8716 new_crtc_state->planes_changed,
8717 new_crtc_state->mode_changed,
8718 new_crtc_state->active_changed,
8719 new_crtc_state->connectors_changed);
62f55537 8720
4b9674e5
LL
8721 /* Remove stream for any changed/disabled CRTC */
8722 if (!enable) {
62f55537 8723
4b9674e5
LL
8724 if (!dm_old_crtc_state->stream)
8725 goto skip_modeset;
eb3dc897 8726
4b9674e5
LL
8727 ret = dm_atomic_get_state(state, &dm_state);
8728 if (ret)
8729 goto fail;
e7b07cee 8730
4b9674e5
LL
8731 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8732 crtc->base.id);
62f55537 8733
4b9674e5
LL
8734 /* i.e. reset mode */
8735 if (dc_remove_stream_from_ctx(
8736 dm->dc,
8737 dm_state->context,
8738 dm_old_crtc_state->stream) != DC_OK) {
8739 ret = -EINVAL;
8740 goto fail;
8741 }
62f55537 8742
4b9674e5
LL
8743 dc_stream_release(dm_old_crtc_state->stream);
8744 dm_new_crtc_state->stream = NULL;
bb47de73 8745
4b9674e5 8746 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8747
4b9674e5 8748 *lock_and_validation_needed = true;
62f55537 8749
4b9674e5
LL
8750 } else {/* Add stream for any updated/enabled CRTC */
8751 /*
8752 * Quick fix to prevent NULL pointer on new_stream when
8753 * added MST connectors not found in existing crtc_state in the chained mode
8754 * TODO: need to dig out the root cause of that
8755 */
8756 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8757 goto skip_modeset;
62f55537 8758
4b9674e5
LL
8759 if (modereset_required(new_crtc_state))
8760 goto skip_modeset;
62f55537 8761
4b9674e5
LL
8762 if (modeset_required(new_crtc_state, new_stream,
8763 dm_old_crtc_state->stream)) {
62f55537 8764
4b9674e5 8765 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8766
4b9674e5
LL
8767 ret = dm_atomic_get_state(state, &dm_state);
8768 if (ret)
8769 goto fail;
27b3f4fc 8770
4b9674e5 8771 dm_new_crtc_state->stream = new_stream;
62f55537 8772
4b9674e5 8773 dc_stream_retain(new_stream);
1dc90497 8774
4b9674e5
LL
8775 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8776 crtc->base.id);
1dc90497 8777
4b9674e5
LL
8778 if (dc_add_stream_to_ctx(
8779 dm->dc,
8780 dm_state->context,
8781 dm_new_crtc_state->stream) != DC_OK) {
8782 ret = -EINVAL;
8783 goto fail;
9b690ef3
BL
8784 }
8785
4b9674e5
LL
8786 *lock_and_validation_needed = true;
8787 }
8788 }
e277adc5 8789
4b9674e5
LL
8790skip_modeset:
8791 /* Release extra reference */
8792 if (new_stream)
8793 dc_stream_release(new_stream);
e277adc5 8794
4b9674e5
LL
8795 /*
8796 * We want to do dc stream updates that do not require a
8797 * full modeset below.
8798 */
2afda735 8799 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8800 return 0;
8801 /*
8802 * Given above conditions, the dc state cannot be NULL because:
8803 * 1. We're in the process of enabling CRTCs (just been added
8804 * to the dc context, or already is on the context)
8805 * 2. Has a valid connector attached, and
8806 * 3. Is currently active and enabled.
8807 * => The dc stream state currently exists.
8808 */
8809 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8810
4b9674e5
LL
8811 /* Scaling or underscan settings */
8812 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8813 update_stream_scaling_settings(
8814 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8815
b05e2c5e
DF
8816 /* ABM settings */
8817 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8818
4b9674e5
LL
8819 /*
8820 * Color management settings. We also update color properties
8821 * when a modeset is needed, to ensure it gets reprogrammed.
8822 */
8823 if (dm_new_crtc_state->base.color_mgmt_changed ||
8824 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8825 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8826 if (ret)
8827 goto fail;
62f55537 8828 }
e7b07cee 8829
4b9674e5
LL
8830 /* Update Freesync settings. */
8831 get_freesync_config_for_crtc(dm_new_crtc_state,
8832 dm_new_conn_state);
8833
62f55537 8834 return ret;
9635b754
DS
8835
8836fail:
8837 if (new_stream)
8838 dc_stream_release(new_stream);
8839 return ret;
62f55537 8840}
9b690ef3 8841
f6ff2a08
NK
8842static bool should_reset_plane(struct drm_atomic_state *state,
8843 struct drm_plane *plane,
8844 struct drm_plane_state *old_plane_state,
8845 struct drm_plane_state *new_plane_state)
8846{
8847 struct drm_plane *other;
8848 struct drm_plane_state *old_other_state, *new_other_state;
8849 struct drm_crtc_state *new_crtc_state;
8850 int i;
8851
70a1efac
NK
8852 /*
8853 * TODO: Remove this hack once the checks below are sufficient
8854 * enough to determine when we need to reset all the planes on
8855 * the stream.
8856 */
8857 if (state->allow_modeset)
8858 return true;
8859
f6ff2a08
NK
8860 /* Exit early if we know that we're adding or removing the plane. */
8861 if (old_plane_state->crtc != new_plane_state->crtc)
8862 return true;
8863
8864 /* old crtc == new_crtc == NULL, plane not in context. */
8865 if (!new_plane_state->crtc)
8866 return false;
8867
8868 new_crtc_state =
8869 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8870
8871 if (!new_crtc_state)
8872 return true;
8873
7316c4ad
NK
8874 /* CRTC Degamma changes currently require us to recreate planes. */
8875 if (new_crtc_state->color_mgmt_changed)
8876 return true;
8877
f6ff2a08
NK
8878 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8879 return true;
8880
8881 /*
8882 * If there are any new primary or overlay planes being added or
8883 * removed then the z-order can potentially change. To ensure
8884 * correct z-order and pipe acquisition the current DC architecture
8885 * requires us to remove and recreate all existing planes.
8886 *
8887 * TODO: Come up with a more elegant solution for this.
8888 */
8889 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 8890 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
8891 if (other->type == DRM_PLANE_TYPE_CURSOR)
8892 continue;
8893
8894 if (old_other_state->crtc != new_plane_state->crtc &&
8895 new_other_state->crtc != new_plane_state->crtc)
8896 continue;
8897
8898 if (old_other_state->crtc != new_other_state->crtc)
8899 return true;
8900
dc4cb30d
NK
8901 /* Src/dst size and scaling updates. */
8902 if (old_other_state->src_w != new_other_state->src_w ||
8903 old_other_state->src_h != new_other_state->src_h ||
8904 old_other_state->crtc_w != new_other_state->crtc_w ||
8905 old_other_state->crtc_h != new_other_state->crtc_h)
8906 return true;
8907
8908 /* Rotation / mirroring updates. */
8909 if (old_other_state->rotation != new_other_state->rotation)
8910 return true;
8911
8912 /* Blending updates. */
8913 if (old_other_state->pixel_blend_mode !=
8914 new_other_state->pixel_blend_mode)
8915 return true;
8916
8917 /* Alpha updates. */
8918 if (old_other_state->alpha != new_other_state->alpha)
8919 return true;
8920
8921 /* Colorspace changes. */
8922 if (old_other_state->color_range != new_other_state->color_range ||
8923 old_other_state->color_encoding != new_other_state->color_encoding)
8924 return true;
8925
9a81cc60
NK
8926 /* Framebuffer checks fall at the end. */
8927 if (!old_other_state->fb || !new_other_state->fb)
8928 continue;
8929
8930 /* Pixel format changes can require bandwidth updates. */
8931 if (old_other_state->fb->format != new_other_state->fb->format)
8932 return true;
8933
6eed95b0
BN
8934 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8935 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
8936
8937 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
8938 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8939 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
8940 return true;
8941 }
8942
8943 return false;
8944}
8945
9e869063
LL
8946static int dm_update_plane_state(struct dc *dc,
8947 struct drm_atomic_state *state,
8948 struct drm_plane *plane,
8949 struct drm_plane_state *old_plane_state,
8950 struct drm_plane_state *new_plane_state,
8951 bool enable,
8952 bool *lock_and_validation_needed)
62f55537 8953{
eb3dc897
NK
8954
8955 struct dm_atomic_state *dm_state = NULL;
62f55537 8956 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8957 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8958 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8959 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8960 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8961 bool needs_reset;
62f55537 8962 int ret = 0;
e7b07cee 8963
9b690ef3 8964
9e869063
LL
8965 new_plane_crtc = new_plane_state->crtc;
8966 old_plane_crtc = old_plane_state->crtc;
8967 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8968 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8969
626bf90f
SS
8970 /*TODO Implement better atomic check for cursor plane */
8971 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8972 if (!enable || !new_plane_crtc ||
8973 drm_atomic_plane_disabling(plane->state, new_plane_state))
8974 return 0;
8975
8976 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8977
5f581248
SS
8978 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
8979 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8980 return -EINVAL;
8981 }
8982
24f99d2b
SS
8983 if (new_plane_state->fb) {
8984 if (new_plane_state->fb->width > new_acrtc->max_cursor_width ||
8985 new_plane_state->fb->height > new_acrtc->max_cursor_height) {
8986 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8987 new_plane_state->fb->width,
8988 new_plane_state->fb->height);
8989 return -EINVAL;
8990 }
5f581248
SS
8991 if (new_plane_state->src_w != new_plane_state->fb->width << 16 ||
8992 new_plane_state->src_h != new_plane_state->fb->height << 16) {
8993 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8994 return -EINVAL;
8995 }
24f99d2b
SS
8996
8997 switch (new_plane_state->fb->width) {
8998 case 64:
8999 case 128:
9000 case 256:
9001 /* FB width is supported by cursor plane */
9002 break;
9003 default:
9004 DRM_DEBUG_ATOMIC("Bad cursor FB width %d\n",
9005 new_plane_state->fb->width);
9006 return -EINVAL;
9007 }
9008 }
9009
9e869063 9010 return 0;
626bf90f 9011 }
9b690ef3 9012
f6ff2a08
NK
9013 needs_reset = should_reset_plane(state, plane, old_plane_state,
9014 new_plane_state);
9015
9e869063
LL
9016 /* Remove any changed/removed planes */
9017 if (!enable) {
f6ff2a08 9018 if (!needs_reset)
9e869063 9019 return 0;
a7b06724 9020
9e869063
LL
9021 if (!old_plane_crtc)
9022 return 0;
62f55537 9023
9e869063
LL
9024 old_crtc_state = drm_atomic_get_old_crtc_state(
9025 state, old_plane_crtc);
9026 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9027
9e869063
LL
9028 if (!dm_old_crtc_state->stream)
9029 return 0;
62f55537 9030
9e869063
LL
9031 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9032 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9033
9e869063
LL
9034 ret = dm_atomic_get_state(state, &dm_state);
9035 if (ret)
9036 return ret;
eb3dc897 9037
9e869063
LL
9038 if (!dc_remove_plane_from_context(
9039 dc,
9040 dm_old_crtc_state->stream,
9041 dm_old_plane_state->dc_state,
9042 dm_state->context)) {
62f55537 9043
c3537613 9044 return -EINVAL;
9e869063 9045 }
e7b07cee 9046
9b690ef3 9047
9e869063
LL
9048 dc_plane_state_release(dm_old_plane_state->dc_state);
9049 dm_new_plane_state->dc_state = NULL;
1dc90497 9050
9e869063 9051 *lock_and_validation_needed = true;
1dc90497 9052
9e869063
LL
9053 } else { /* Add new planes */
9054 struct dc_plane_state *dc_new_plane_state;
1dc90497 9055
9e869063
LL
9056 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9057 return 0;
e7b07cee 9058
9e869063
LL
9059 if (!new_plane_crtc)
9060 return 0;
e7b07cee 9061
9e869063
LL
9062 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9063 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9064
9e869063
LL
9065 if (!dm_new_crtc_state->stream)
9066 return 0;
62f55537 9067
f6ff2a08 9068 if (!needs_reset)
9e869063 9069 return 0;
62f55537 9070
8c44515b
AP
9071 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9072 if (ret)
9073 return ret;
9074
9e869063 9075 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9076
9e869063
LL
9077 dc_new_plane_state = dc_create_plane_state(dc);
9078 if (!dc_new_plane_state)
9079 return -ENOMEM;
62f55537 9080
9e869063
LL
9081 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9082 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9083
695af5f9 9084 ret = fill_dc_plane_attributes(
1348969a 9085 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9086 dc_new_plane_state,
9087 new_plane_state,
9088 new_crtc_state);
9089 if (ret) {
9090 dc_plane_state_release(dc_new_plane_state);
9091 return ret;
9092 }
62f55537 9093
9e869063
LL
9094 ret = dm_atomic_get_state(state, &dm_state);
9095 if (ret) {
9096 dc_plane_state_release(dc_new_plane_state);
9097 return ret;
9098 }
eb3dc897 9099
9e869063
LL
9100 /*
9101 * Any atomic check errors that occur after this will
9102 * not need a release. The plane state will be attached
9103 * to the stream, and therefore part of the atomic
9104 * state. It'll be released when the atomic state is
9105 * cleaned.
9106 */
9107 if (!dc_add_plane_to_context(
9108 dc,
9109 dm_new_crtc_state->stream,
9110 dc_new_plane_state,
9111 dm_state->context)) {
62f55537 9112
9e869063
LL
9113 dc_plane_state_release(dc_new_plane_state);
9114 return -EINVAL;
9115 }
8c45c5db 9116
9e869063 9117 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9118
9e869063
LL
9119 /* Tell DC to do a full surface update every time there
9120 * is a plane change. Inefficient, but works for now.
9121 */
9122 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9123
9124 *lock_and_validation_needed = true;
62f55537 9125 }
e7b07cee
HW
9126
9127
62f55537
AG
9128 return ret;
9129}
a87fa993 9130
12f4849a
SS
9131static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9132 struct drm_crtc *crtc,
9133 struct drm_crtc_state *new_crtc_state)
9134{
9135 struct drm_plane_state *new_cursor_state, *new_primary_state;
9136 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9137
9138 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9139 * cursor per pipe but it's going to inherit the scaling and
9140 * positioning from the underlying pipe. Check the cursor plane's
9141 * blending properties match the primary plane's. */
9142
9143 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9144 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9145 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9146 return 0;
9147 }
9148
9149 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9150 (new_cursor_state->src_w >> 16);
9151 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9152 (new_cursor_state->src_h >> 16);
9153
9154 primary_scale_w = new_primary_state->crtc_w * 1000 /
9155 (new_primary_state->src_w >> 16);
9156 primary_scale_h = new_primary_state->crtc_h * 1000 /
9157 (new_primary_state->src_h >> 16);
9158
9159 if (cursor_scale_w != primary_scale_w ||
9160 cursor_scale_h != primary_scale_h) {
9161 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9162 return -EINVAL;
9163 }
9164
9165 return 0;
9166}
9167
e10517b3 9168#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9169static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9170{
9171 struct drm_connector *connector;
9172 struct drm_connector_state *conn_state;
9173 struct amdgpu_dm_connector *aconnector = NULL;
9174 int i;
9175 for_each_new_connector_in_state(state, connector, conn_state, i) {
9176 if (conn_state->crtc != crtc)
9177 continue;
9178
9179 aconnector = to_amdgpu_dm_connector(connector);
9180 if (!aconnector->port || !aconnector->mst_port)
9181 aconnector = NULL;
9182 else
9183 break;
9184 }
9185
9186 if (!aconnector)
9187 return 0;
9188
9189 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9190}
e10517b3 9191#endif
44be939f 9192
b8592b48
LL
9193/**
9194 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9195 * @dev: The DRM device
9196 * @state: The atomic state to commit
9197 *
9198 * Validate that the given atomic state is programmable by DC into hardware.
9199 * This involves constructing a &struct dc_state reflecting the new hardware
9200 * state we wish to commit, then querying DC to see if it is programmable. It's
9201 * important not to modify the existing DC state. Otherwise, atomic_check
9202 * may unexpectedly commit hardware changes.
9203 *
9204 * When validating the DC state, it's important that the right locks are
9205 * acquired. For full updates case which removes/adds/updates streams on one
9206 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9207 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9208 * flip using DRMs synchronization events.
b8592b48
LL
9209 *
9210 * Note that DM adds the affected connectors for all CRTCs in state, when that
9211 * might not seem necessary. This is because DC stream creation requires the
9212 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9213 * be possible but non-trivial - a possible TODO item.
9214 *
9215 * Return: -Error code if validation failed.
9216 */
7578ecda
AD
9217static int amdgpu_dm_atomic_check(struct drm_device *dev,
9218 struct drm_atomic_state *state)
62f55537 9219{
1348969a 9220 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9221 struct dm_atomic_state *dm_state = NULL;
62f55537 9222 struct dc *dc = adev->dm.dc;
62f55537 9223 struct drm_connector *connector;
c2cea706 9224 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9225 struct drm_crtc *crtc;
fc9e9920 9226 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9227 struct drm_plane *plane;
9228 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9229 enum dc_status status;
1e88ad0a 9230 int ret, i;
62f55537 9231 bool lock_and_validation_needed = false;
886876ec 9232 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9233
e8a98235 9234 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9235
62f55537 9236 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9237 if (ret)
9238 goto fail;
62f55537 9239
c5892a10
SW
9240 /* Check connector changes */
9241 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9242 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9243 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9244
9245 /* Skip connectors that are disabled or part of modeset already. */
9246 if (!old_con_state->crtc && !new_con_state->crtc)
9247 continue;
9248
9249 if (!new_con_state->crtc)
9250 continue;
9251
9252 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9253 if (IS_ERR(new_crtc_state)) {
9254 ret = PTR_ERR(new_crtc_state);
9255 goto fail;
9256 }
9257
9258 if (dm_old_con_state->abm_level !=
9259 dm_new_con_state->abm_level)
9260 new_crtc_state->connectors_changed = true;
9261 }
9262
e10517b3 9263#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9264 if (adev->asic_type >= CHIP_NAVI10) {
9265 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9266 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9267 ret = add_affected_mst_dsc_crtcs(state, crtc);
9268 if (ret)
9269 goto fail;
9270 }
9271 }
9272 }
e10517b3 9273#endif
1e88ad0a 9274 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9275 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9276
1e88ad0a 9277 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9278 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9279 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9280 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9281 continue;
7bef1af3 9282
1e88ad0a
S
9283 if (!new_crtc_state->enable)
9284 continue;
fc9e9920 9285
1e88ad0a
S
9286 ret = drm_atomic_add_affected_connectors(state, crtc);
9287 if (ret)
9288 return ret;
fc9e9920 9289
1e88ad0a
S
9290 ret = drm_atomic_add_affected_planes(state, crtc);
9291 if (ret)
9292 goto fail;
e7b07cee
HW
9293 }
9294
2d9e6431
NK
9295 /*
9296 * Add all primary and overlay planes on the CRTC to the state
9297 * whenever a plane is enabled to maintain correct z-ordering
9298 * and to enable fast surface updates.
9299 */
9300 drm_for_each_crtc(crtc, dev) {
9301 bool modified = false;
9302
9303 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9304 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9305 continue;
9306
9307 if (new_plane_state->crtc == crtc ||
9308 old_plane_state->crtc == crtc) {
9309 modified = true;
9310 break;
9311 }
9312 }
9313
9314 if (!modified)
9315 continue;
9316
9317 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9318 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9319 continue;
9320
9321 new_plane_state =
9322 drm_atomic_get_plane_state(state, plane);
9323
9324 if (IS_ERR(new_plane_state)) {
9325 ret = PTR_ERR(new_plane_state);
9326 goto fail;
9327 }
9328 }
9329 }
9330
62f55537 9331 /* Remove exiting planes if they are modified */
9e869063
LL
9332 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9333 ret = dm_update_plane_state(dc, state, plane,
9334 old_plane_state,
9335 new_plane_state,
9336 false,
9337 &lock_and_validation_needed);
9338 if (ret)
9339 goto fail;
62f55537
AG
9340 }
9341
9342 /* Disable all crtcs which require disable */
4b9674e5
LL
9343 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9344 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9345 old_crtc_state,
9346 new_crtc_state,
9347 false,
9348 &lock_and_validation_needed);
9349 if (ret)
9350 goto fail;
62f55537
AG
9351 }
9352
9353 /* Enable all crtcs which require enable */
4b9674e5
LL
9354 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9355 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9356 old_crtc_state,
9357 new_crtc_state,
9358 true,
9359 &lock_and_validation_needed);
9360 if (ret)
9361 goto fail;
62f55537
AG
9362 }
9363
9364 /* Add new/modified planes */
9e869063
LL
9365 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9366 ret = dm_update_plane_state(dc, state, plane,
9367 old_plane_state,
9368 new_plane_state,
9369 true,
9370 &lock_and_validation_needed);
9371 if (ret)
9372 goto fail;
62f55537
AG
9373 }
9374
b349f76e
ES
9375 /* Run this here since we want to validate the streams we created */
9376 ret = drm_atomic_helper_check_planes(dev, state);
9377 if (ret)
9378 goto fail;
62f55537 9379
12f4849a
SS
9380 /* Check cursor planes scaling */
9381 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9382 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9383 if (ret)
9384 goto fail;
9385 }
9386
43d10d30
NK
9387 if (state->legacy_cursor_update) {
9388 /*
9389 * This is a fast cursor update coming from the plane update
9390 * helper, check if it can be done asynchronously for better
9391 * performance.
9392 */
9393 state->async_update =
9394 !drm_atomic_helper_async_check(dev, state);
9395
9396 /*
9397 * Skip the remaining global validation if this is an async
9398 * update. Cursor updates can be done without affecting
9399 * state or bandwidth calcs and this avoids the performance
9400 * penalty of locking the private state object and
9401 * allocating a new dc_state.
9402 */
9403 if (state->async_update)
9404 return 0;
9405 }
9406
ebdd27e1 9407 /* Check scaling and underscan changes*/
1f6010a9 9408 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9409 * new stream into context w\o causing full reset. Need to
9410 * decide how to handle.
9411 */
c2cea706 9412 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9413 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9414 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9415 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9416
9417 /* Skip any modesets/resets */
0bc9706d
LSL
9418 if (!acrtc || drm_atomic_crtc_needs_modeset(
9419 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9420 continue;
9421
b830ebc9 9422 /* Skip any thing not scale or underscan changes */
54d76575 9423 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9424 continue;
9425
9426 lock_and_validation_needed = true;
9427 }
9428
f6d7c7fa
NK
9429 /**
9430 * Streams and planes are reset when there are changes that affect
9431 * bandwidth. Anything that affects bandwidth needs to go through
9432 * DC global validation to ensure that the configuration can be applied
9433 * to hardware.
9434 *
9435 * We have to currently stall out here in atomic_check for outstanding
9436 * commits to finish in this case because our IRQ handlers reference
9437 * DRM state directly - we can end up disabling interrupts too early
9438 * if we don't.
9439 *
9440 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9441 */
f6d7c7fa 9442 if (lock_and_validation_needed) {
eb3dc897
NK
9443 ret = dm_atomic_get_state(state, &dm_state);
9444 if (ret)
9445 goto fail;
e7b07cee
HW
9446
9447 ret = do_aquire_global_lock(dev, state);
9448 if (ret)
9449 goto fail;
1dc90497 9450
d9fe1a4c 9451#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9452 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9453 goto fail;
9454
29b9ba74
ML
9455 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9456 if (ret)
9457 goto fail;
d9fe1a4c 9458#endif
29b9ba74 9459
ded58c7b
ZL
9460 /*
9461 * Perform validation of MST topology in the state:
9462 * We need to perform MST atomic check before calling
9463 * dc_validate_global_state(), or there is a chance
9464 * to get stuck in an infinite loop and hang eventually.
9465 */
9466 ret = drm_dp_mst_atomic_check(state);
9467 if (ret)
9468 goto fail;
74a16675
RS
9469 status = dc_validate_global_state(dc, dm_state->context, false);
9470 if (status != DC_OK) {
9471 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9472 dc_status_to_str(status), status);
e7b07cee
HW
9473 ret = -EINVAL;
9474 goto fail;
9475 }
bd200d19 9476 } else {
674e78ac 9477 /*
bd200d19
NK
9478 * The commit is a fast update. Fast updates shouldn't change
9479 * the DC context, affect global validation, and can have their
9480 * commit work done in parallel with other commits not touching
9481 * the same resource. If we have a new DC context as part of
9482 * the DM atomic state from validation we need to free it and
9483 * retain the existing one instead.
fde9f39a
MR
9484 *
9485 * Furthermore, since the DM atomic state only contains the DC
9486 * context and can safely be annulled, we can free the state
9487 * and clear the associated private object now to free
9488 * some memory and avoid a possible use-after-free later.
674e78ac 9489 */
bd200d19 9490
fde9f39a
MR
9491 for (i = 0; i < state->num_private_objs; i++) {
9492 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9493
fde9f39a
MR
9494 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9495 int j = state->num_private_objs-1;
bd200d19 9496
fde9f39a
MR
9497 dm_atomic_destroy_state(obj,
9498 state->private_objs[i].state);
9499
9500 /* If i is not at the end of the array then the
9501 * last element needs to be moved to where i was
9502 * before the array can safely be truncated.
9503 */
9504 if (i != j)
9505 state->private_objs[i] =
9506 state->private_objs[j];
bd200d19 9507
fde9f39a
MR
9508 state->private_objs[j].ptr = NULL;
9509 state->private_objs[j].state = NULL;
9510 state->private_objs[j].old_state = NULL;
9511 state->private_objs[j].new_state = NULL;
9512
9513 state->num_private_objs = j;
9514 break;
9515 }
bd200d19 9516 }
e7b07cee
HW
9517 }
9518
caff0e66
NK
9519 /* Store the overall update type for use later in atomic check. */
9520 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9521 struct dm_crtc_state *dm_new_crtc_state =
9522 to_dm_crtc_state(new_crtc_state);
9523
f6d7c7fa
NK
9524 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9525 UPDATE_TYPE_FULL :
9526 UPDATE_TYPE_FAST;
e7b07cee
HW
9527 }
9528
9529 /* Must be success */
9530 WARN_ON(ret);
e8a98235
RS
9531
9532 trace_amdgpu_dm_atomic_check_finish(state, ret);
9533
e7b07cee
HW
9534 return ret;
9535
9536fail:
9537 if (ret == -EDEADLK)
01e28f9c 9538 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9539 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9540 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9541 else
01e28f9c 9542 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9543
e8a98235
RS
9544 trace_amdgpu_dm_atomic_check_finish(state, ret);
9545
e7b07cee
HW
9546 return ret;
9547}
9548
3ee6b26b
AD
9549static bool is_dp_capable_without_timing_msa(struct dc *dc,
9550 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9551{
9552 uint8_t dpcd_data;
9553 bool capable = false;
9554
c84dec2f 9555 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9556 dm_helpers_dp_read_dpcd(
9557 NULL,
c84dec2f 9558 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9559 DP_DOWN_STREAM_PORT_COUNT,
9560 &dpcd_data,
9561 sizeof(dpcd_data))) {
9562 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9563 }
9564
9565 return capable;
9566}
98e6436d
AK
9567void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9568 struct edid *edid)
e7b07cee
HW
9569{
9570 int i;
e7b07cee
HW
9571 bool edid_check_required;
9572 struct detailed_timing *timing;
9573 struct detailed_non_pixel *data;
9574 struct detailed_data_monitor_range *range;
c84dec2f
HW
9575 struct amdgpu_dm_connector *amdgpu_dm_connector =
9576 to_amdgpu_dm_connector(connector);
bb47de73 9577 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9578
9579 struct drm_device *dev = connector->dev;
1348969a 9580 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9581 bool freesync_capable = false;
b830ebc9 9582
8218d7f1
HW
9583 if (!connector->state) {
9584 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9585 goto update;
8218d7f1
HW
9586 }
9587
98e6436d
AK
9588 if (!edid) {
9589 dm_con_state = to_dm_connector_state(connector->state);
9590
9591 amdgpu_dm_connector->min_vfreq = 0;
9592 amdgpu_dm_connector->max_vfreq = 0;
9593 amdgpu_dm_connector->pixel_clock_mhz = 0;
9594
bb47de73 9595 goto update;
98e6436d
AK
9596 }
9597
8218d7f1
HW
9598 dm_con_state = to_dm_connector_state(connector->state);
9599
e7b07cee 9600 edid_check_required = false;
c84dec2f 9601 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9602 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9603 goto update;
e7b07cee
HW
9604 }
9605 if (!adev->dm.freesync_module)
bb47de73 9606 goto update;
e7b07cee
HW
9607 /*
9608 * if edid non zero restrict freesync only for dp and edp
9609 */
9610 if (edid) {
c84dec2f
HW
9611 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9612 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9613 edid_check_required = is_dp_capable_without_timing_msa(
9614 adev->dm.dc,
c84dec2f 9615 amdgpu_dm_connector);
e7b07cee
HW
9616 }
9617 }
e7b07cee
HW
9618 if (edid_check_required == true && (edid->version > 1 ||
9619 (edid->version == 1 && edid->revision > 1))) {
9620 for (i = 0; i < 4; i++) {
9621
9622 timing = &edid->detailed_timings[i];
9623 data = &timing->data.other_data;
9624 range = &data->data.range;
9625 /*
9626 * Check if monitor has continuous frequency mode
9627 */
9628 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9629 continue;
9630 /*
9631 * Check for flag range limits only. If flag == 1 then
9632 * no additional timing information provided.
9633 * Default GTF, GTF Secondary curve and CVT are not
9634 * supported
9635 */
9636 if (range->flags != 1)
9637 continue;
9638
c84dec2f
HW
9639 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9640 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9641 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9642 range->pixel_clock_mhz * 10;
9643 break;
9644 }
9645
c84dec2f 9646 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9647 amdgpu_dm_connector->min_vfreq > 10) {
9648
bb47de73 9649 freesync_capable = true;
e7b07cee
HW
9650 }
9651 }
bb47de73
NK
9652
9653update:
9654 if (dm_con_state)
9655 dm_con_state->freesync_capable = freesync_capable;
9656
9657 if (connector->vrr_capable_property)
9658 drm_connector_set_vrr_capable_property(connector,
9659 freesync_capable);
e7b07cee
HW
9660}
9661
8c322309
RL
9662static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9663{
9664 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9665
9666 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9667 return;
9668 if (link->type == dc_connection_none)
9669 return;
9670 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9671 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9672 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9673
9674 if (dpcd_data[0] == 0) {
1cfbbdde 9675 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9676 link->psr_settings.psr_feature_enabled = false;
9677 } else {
1cfbbdde 9678 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9679 link->psr_settings.psr_feature_enabled = true;
9680 }
9681
9682 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9683 }
9684}
9685
9686/*
9687 * amdgpu_dm_link_setup_psr() - configure psr link
9688 * @stream: stream state
9689 *
9690 * Return: true if success
9691 */
9692static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9693{
9694 struct dc_link *link = NULL;
9695 struct psr_config psr_config = {0};
9696 struct psr_context psr_context = {0};
8c322309
RL
9697 bool ret = false;
9698
9699 if (stream == NULL)
9700 return false;
9701
9702 link = stream->link;
8c322309 9703
d1ebfdd8 9704 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9705
9706 if (psr_config.psr_version > 0) {
9707 psr_config.psr_exit_link_training_required = 0x1;
9708 psr_config.psr_frame_capture_indication_req = 0;
9709 psr_config.psr_rfb_setup_time = 0x37;
9710 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9711 psr_config.allow_smu_optimizations = 0x0;
9712
9713 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9714
9715 }
d1ebfdd8 9716 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9717
9718 return ret;
9719}
9720
9721/*
9722 * amdgpu_dm_psr_enable() - enable psr f/w
9723 * @stream: stream state
9724 *
9725 * Return: true if success
9726 */
9727bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9728{
9729 struct dc_link *link = stream->link;
5b5abe95
AK
9730 unsigned int vsync_rate_hz = 0;
9731 struct dc_static_screen_params params = {0};
9732 /* Calculate number of static frames before generating interrupt to
9733 * enter PSR.
9734 */
5b5abe95
AK
9735 // Init fail safe of 2 frames static
9736 unsigned int num_frames_static = 2;
8c322309
RL
9737
9738 DRM_DEBUG_DRIVER("Enabling psr...\n");
9739
5b5abe95
AK
9740 vsync_rate_hz = div64_u64(div64_u64((
9741 stream->timing.pix_clk_100hz * 100),
9742 stream->timing.v_total),
9743 stream->timing.h_total);
9744
9745 /* Round up
9746 * Calculate number of frames such that at least 30 ms of time has
9747 * passed.
9748 */
7aa62404
RL
9749 if (vsync_rate_hz != 0) {
9750 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9751 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9752 }
5b5abe95
AK
9753
9754 params.triggers.cursor_update = true;
9755 params.triggers.overlay_update = true;
9756 params.triggers.surface_update = true;
9757 params.num_frames = num_frames_static;
8c322309 9758
5b5abe95 9759 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9760 &stream, 1,
5b5abe95 9761 &params);
8c322309 9762
1d496907 9763 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
9764}
9765
9766/*
9767 * amdgpu_dm_psr_disable() - disable psr f/w
9768 * @stream: stream state
9769 *
9770 * Return: true if success
9771 */
9772static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9773{
9774
9775 DRM_DEBUG_DRIVER("Disabling psr...\n");
9776
1d496907 9777 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 9778}
3d4e52d0 9779
6ee90e88 9780/*
9781 * amdgpu_dm_psr_disable() - disable psr f/w
9782 * if psr is enabled on any stream
9783 *
9784 * Return: true if success
9785 */
9786static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9787{
9788 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9789 return dc_set_psr_allow_active(dm->dc, false);
9790}
9791
3d4e52d0
VL
9792void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9793{
1348969a 9794 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9795 struct dc *dc = adev->dm.dc;
9796 int i;
9797
9798 mutex_lock(&adev->dm.dc_lock);
9799 if (dc->current_state) {
9800 for (i = 0; i < dc->current_state->stream_count; ++i)
9801 dc->current_state->streams[i]
9802 ->triggered_crtc_reset.enabled =
9803 adev->dm.force_timing_sync;
9804
9805 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9806 dc_trigger_sync(dc, dc->current_state);
9807 }
9808 mutex_unlock(&adev->dm.dc_lock);
9809}
9d83722d
RS
9810
9811void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9812 uint32_t value, const char *func_name)
9813{
9814#ifdef DM_CHECK_ADDR_0
9815 if (address == 0) {
9816 DC_ERR("invalid register write. address = 0");
9817 return;
9818 }
9819#endif
9820 cgs_write_register(ctx->cgs_device, address, value);
9821 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9822}
9823
9824uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9825 const char *func_name)
9826{
9827 uint32_t value;
9828#ifdef DM_CHECK_ADDR_0
9829 if (address == 0) {
9830 DC_ERR("invalid register read; address = 0\n");
9831 return 0;
9832 }
9833#endif
9834
9835 if (ctx->dmub_srv &&
9836 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9837 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9838 ASSERT(false);
9839 return 0;
9840 }
9841
9842 value = cgs_read_register(ctx->cgs_device, address);
9843
9844 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9845
9846 return value;
9847}