Revert "drm/amd/display: reuse current context instead of recreating one"
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
9d83722d 37#include "amdgpu_dm_trace.h"
4562236b
HW
38
39#include "vid.h"
40#include "amdgpu.h"
a49dcb88 41#include "amdgpu_display.h"
a94d5569 42#include "amdgpu_ucode.h"
4562236b
HW
43#include "atom.h"
44#include "amdgpu_dm.h"
52704fca
BL
45#ifdef CONFIG_DRM_AMD_DC_HDCP
46#include "amdgpu_dm_hdcp.h"
53e108aa 47#include <drm/drm_hdcp.h>
52704fca 48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
99#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
101#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
103#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
105#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 107
a94d5569
DF
108#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 110
5ea23931
RL
111#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
8c7aea40
NK
114/* Number of bytes in PSP header for firmware. */
115#define PSP_HEADER_BYTES 0x100
116
117/* Number of bytes in PSP footer for firmware. */
118#define PSP_FOOTER_BYTES 0x100
119
b8592b48
LL
120/**
121 * DOC: overview
122 *
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
126 *
127 * The root control structure is &struct amdgpu_display_manager.
128 */
129
7578ecda
AD
130/* basic init/fini API */
131static int amdgpu_dm_init(struct amdgpu_device *adev);
132static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
0f877894
OV
134static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135{
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 default:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
150 }
151}
152
153static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154{
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 return;
161
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
164
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
167 subconnector);
168}
169
1f6010a9
DF
170/*
171 * initializes drm_device display related structures, based on the information
7578ecda
AD
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
174 *
175 * Returns 0 on success
176 */
177static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178/* removes and deallocates the drm structures, created by the above function */
179static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
7578ecda 181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 182 struct drm_plane *plane,
cc1fec57
NK
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
7578ecda
AD
185static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
190 uint32_t link_index,
191 struct amdgpu_encoder *amdgpu_encoder);
192static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
195
196static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
7578ecda
AD
198static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
202
674e78ac
NK
203static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
7578ecda 205
8c322309
RL
206static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 210static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 211
dfbbfe3c
BN
212static const struct drm_format_info *
213amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
4562236b
HW
215/*
216 * dm_vblank_get_counter
217 *
218 * @brief
219 * Get counter for number of vertical blanks
220 *
221 * @param
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
224 *
225 * @return
226 * Counter for vertical blanks
227 */
228static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229{
230 if (crtc >= adev->mode_info.num_crtc)
231 return 0;
232 else {
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
585d450c 235 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 crtc);
4562236b
HW
238 return 0;
239 }
240
585d450c 241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
242 }
243}
244
245static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 246 u32 *vbl, u32 *position)
4562236b 247{
81c50963
ST
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
4562236b
HW
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 return -EINVAL;
252 else {
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
585d450c 255 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 crtc);
4562236b
HW
258 return 0;
259 }
260
81c50963
ST
261 /*
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
264 */
585d450c 265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
266 &v_blank_start,
267 &v_blank_end,
268 &h_position,
269 &v_position);
270
e806208d
AG
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
273 }
274
275 return 0;
276}
277
278static bool dm_is_idle(void *handle)
279{
280 /* XXX todo */
281 return true;
282}
283
284static int dm_wait_for_idle(void *handle)
285{
286 /* XXX todo */
287 return 0;
288}
289
290static bool dm_check_soft_reset(void *handle)
291{
292 return false;
293}
294
295static int dm_soft_reset(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
3ee6b26b
AD
301static struct amdgpu_crtc *
302get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 int otg_inst)
4562236b 304{
4a580877 305 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
308
4562236b
HW
309 if (otg_inst == -1) {
310 WARN_ON(1);
311 return adev->mode_info.crtcs[0];
312 }
313
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317 if (amdgpu_crtc->otg_inst == otg_inst)
318 return amdgpu_crtc;
319 }
320
321 return NULL;
322}
323
585d450c
AP
324static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325{
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
330}
331
66b0c973
MK
332static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333{
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336}
337
b8e8c934
HW
338/**
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
341 *
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
344 */
4562236b
HW
345static void dm_pflip_high_irq(void *interrupt_params)
346{
4562236b
HW
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
350 unsigned long flags;
71bbe51a 351 struct drm_pending_vblank_event *e;
71bbe51a
MK
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 bool vrr_active;
4562236b
HW
354
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357 /* IRQ could occur when in initial stage */
1f6010a9 358 /* TODO work and BO cleanup */
4562236b
HW
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 return;
362 }
363
4a580877 364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
365
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
371 amdgpu_crtc);
4a580877 372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
373 return;
374 }
375
71bbe51a
MK
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
4562236b 379
71bbe51a
MK
380 if (!e)
381 WARN_ON(1);
1159898a 382
585d450c 383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
384
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 if (!vrr_active ||
585d450c 387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
393 */
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 395
71bbe51a
MK
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
398 */
399 if (e) {
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
404 }
405 } else if (e) {
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
412 *
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
417 */
418
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
422
4a580877 423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
424 e = NULL;
425 }
4562236b 426
fdd1fe57
MK
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
431 */
5d1c59c4 432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 434
54f5499a 435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 437
71bbe51a
MK
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
4562236b
HW
441}
442
d2574c33
MK
443static void dm_vupdate_high_irq(void *interrupt_params)
444{
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
09aef2c4 448 unsigned long flags;
585d450c 449 int vrr_active;
d2574c33
MK
450
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453 if (acrtc) {
585d450c 454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 455
7f2be468
LP
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 acrtc->crtc_id,
585d450c 458 vrr_active);
d2574c33
MK
459
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
465 */
585d450c 466 if (vrr_active) {
d2574c33 467 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
468
469 /* BTR processing for pre-DCE12 ASICs */
585d450c 470 if (acrtc->dm_irq_params.stream &&
09aef2c4 471 adev->family < AMDGPU_FAMILY_AI) {
4a580877 472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
585d450c
AP
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
477
478 dc_stream_adjust_vmin_vmax(
479 adev->dm.dc,
585d450c
AP
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
483 }
484 }
d2574c33
MK
485 }
486}
487
b8e8c934
HW
488/**
489 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 490 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
491 *
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493 * event handler.
494 */
4562236b
HW
495static void dm_crtc_high_irq(void *interrupt_params)
496{
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
4562236b 499 struct amdgpu_crtc *acrtc;
09aef2c4 500 unsigned long flags;
585d450c 501 int vrr_active;
4562236b 502
b57de80a 503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
504 if (!acrtc)
505 return;
506
585d450c 507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 508
2b5aed9a 509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 510 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 511
2346ef47
NK
512 /**
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
517 */
585d450c 518 if (!vrr_active)
2346ef47
NK
519 drm_crtc_handle_vblank(&acrtc->base);
520
521 /**
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
524 */
16f17eda 525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
526
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
529 return;
16f17eda 530
4a580877 531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 532
585d450c
AP
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 537 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
16f17eda 540
585d450c
AP
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
543 }
544
2b5aed9a
MK
545 /*
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
550 *
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
554 */
2346ef47
NK
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 557 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
558 if (acrtc->event) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 acrtc->event = NULL;
561 drm_crtc_vblank_put(&acrtc->base);
562 }
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 }
565
4a580877 566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
567}
568
4562236b
HW
569static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
571{
572 return 0;
573}
574
575static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
577{
578 return 0;
579}
580
581/* Prototypes of private functions */
582static int dm_early_init(void* handle);
583
a32e24b4 584/* Allocate memory for FBC compressed data */
3e332d3a 585static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 586{
3e332d3a 587 struct drm_device *dev = connector->dev;
1348969a 588 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 589 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
42e67c3b
RL
592 unsigned long max_size = 0;
593
594 if (adev->dm.dc->fbc_compressor == NULL)
595 return;
a32e24b4 596
3e332d3a 597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
598 return;
599
3e332d3a
RL
600 if (compressor->bo_ptr)
601 return;
42e67c3b 602
42e67c3b 603
3e332d3a
RL
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
607 }
608
609 if (max_size) {
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 612 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
613
614 if (r)
42e67c3b
RL
615 DRM_ERROR("DM: Failed to initialize FBC\n");
616 else {
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 }
620
a32e24b4
RL
621 }
622
623}
a32e24b4 624
6ce8f316
NK
625static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
628{
629 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 630 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
634 int ret = 0;
635
636 *enabled = false;
637
638 mutex_lock(&adev->dm.audio_lock);
639
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
644 continue;
645
646 *enabled = true;
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650 break;
651 }
652 drm_connector_list_iter_end(&conn_iter);
653
654 mutex_unlock(&adev->dm.audio_lock);
655
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658 return ret;
659}
660
661static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
663};
664
665static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
667{
668 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 669 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
670 struct drm_audio_component *acomp = data;
671
672 acomp->ops = &amdgpu_dm_audio_component_ops;
673 acomp->dev = kdev;
674 adev->dm.audio_component = acomp;
675
676 return 0;
677}
678
679static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
681{
682 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 683 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
684 struct drm_audio_component *acomp = data;
685
686 acomp->ops = NULL;
687 acomp->dev = NULL;
688 adev->dm.audio_component = NULL;
689}
690
691static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
694};
695
696static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697{
698 int i, ret;
699
700 if (!amdgpu_audio)
701 return 0;
702
703 adev->mode_info.audio.enabled = true;
704
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
717 }
718
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 if (ret < 0)
721 return ret;
722
723 adev->dm.audio_registered = true;
724
725 return 0;
726}
727
728static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729{
730 if (!amdgpu_audio)
731 return;
732
733 if (!adev->mode_info.audio.enabled)
734 return;
735
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
739 }
740
741 /* TODO: Disable audio? */
742
743 adev->mode_info.audio.enabled = false;
744}
745
dfd84d90 746static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
747{
748 struct drm_audio_component *acomp = adev->dm.audio_component;
749
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 pin, -1);
755 }
756}
757
743b9786
NK
758static int dm_dmub_hw_init(struct amdgpu_device *adev)
759{
743b9786
NK
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
770 bool has_hw_support;
771
772 if (!dmub_srv)
773 /* DMUB isn't supported on the ASIC. */
774 return 0;
775
8c7aea40
NK
776 if (!fb_info) {
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
778 return -EINVAL;
779 }
780
743b9786
NK
781 if (!dmub_fw) {
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
784 return -EINVAL;
785 }
786
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 return -EINVAL;
791 }
792
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
795 return 0;
796 }
797
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
743b9786
NK
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 802 PSP_HEADER_BYTES;
743b9786
NK
803
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
807
808 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
ddde28a5
HW
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
818 */
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 fw_inst_const_size);
822 }
823
a576b345
NK
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
827
828 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 adev->bios_size);
831
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
841
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
846
31a7f4bb
HW
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
850
743b9786
NK
851 if (dmcu)
852 hw_params.psp_version = dmcu->psp_version;
853
8c7aea40
NK
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
856
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 return -EINVAL;
861 }
862
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868 /* Init DMCU and ABM if available. */
869 if (dmcu && abm) {
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 }
873
9a71c7d3
NK
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 return -ENOMEM;
878 }
879
743b9786
NK
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
882
883 return 0;
884}
885
e6cd859d 886#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 887static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 888{
c0fb85ae
YZ
889 uint64_t pt_base;
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 894
c0fb85ae
YZ
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 897
c0fb85ae
YZ
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 /*
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
904 */
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 else
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 908
c0fb85ae
YZ
909 agp_base = 0;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 912
c44a22b3 913
c0fb85ae
YZ
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 920
c0fb85ae
YZ
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936 pa_config->is_hvm_enabled = 0;
c44a22b3 937
c44a22b3 938}
e6cd859d 939#endif
c44a22b3 940
7578ecda 941static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
942{
943 struct dc_init_data init_data;
52704fca
BL
944#ifdef CONFIG_DRM_AMD_DC_HDCP
945 struct dc_callback_init init_params;
946#endif
743b9786 947 int r;
52704fca 948
4a580877 949 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
950 adev->dm.adev = adev;
951
4562236b
HW
952 /* Zero all the fields */
953 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
954#ifdef CONFIG_DRM_AMD_DC_HDCP
955 memset(&init_params, 0, sizeof(init_params));
956#endif
4562236b 957
674e78ac 958 mutex_init(&adev->dm.dc_lock);
6ce8f316 959 mutex_init(&adev->dm.audio_lock);
674e78ac 960
4562236b
HW
961 if(amdgpu_dm_irq_init(adev)) {
962 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
963 goto error;
964 }
965
966 init_data.asic_id.chip_family = adev->family;
967
2dc31ca1 968 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
969 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
970
770d13b1 971 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
972 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
973 init_data.asic_id.atombios_base_address =
974 adev->mode_info.atom_context->bios;
975
976 init_data.driver = adev;
977
978 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
979
980 if (!adev->dm.cgs_device) {
981 DRM_ERROR("amdgpu: failed to create cgs device.\n");
982 goto error;
983 }
984
985 init_data.cgs_device = adev->dm.cgs_device;
986
4562236b
HW
987 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
988
60fb100b
AD
989 switch (adev->asic_type) {
990 case CHIP_CARRIZO:
991 case CHIP_STONEY:
992 case CHIP_RAVEN:
fe3db437 993 case CHIP_RENOIR:
6e227308 994 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
995 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996 init_data.flags.disable_dmcu = true;
60fb100b 997 break;
6df9218a
CL
998#if defined(CONFIG_DRM_AMD_DC_DCN)
999 case CHIP_VANGOGH:
1000 init_data.flags.gpu_vm_support = true;
1001 break;
1002#endif
60fb100b
AD
1003 default:
1004 break;
1005 }
6e227308 1006
04b94af4
AD
1007 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 init_data.flags.fbc_support = true;
1009
d99f38ae
AD
1010 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 init_data.flags.multi_mon_pp_mclk_switch = true;
1012
eaf56410
LL
1013 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 init_data.flags.disable_fractional_pwm = true;
1015
27eaa492 1016 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1017
4562236b
HW
1018 /* Display Core create. */
1019 adev->dm.dc = dc_create(&init_data);
1020
423788c7 1021 if (adev->dm.dc) {
76121231 1022 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1023 } else {
76121231 1024 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1025 goto error;
1026 }
4562236b 1027
8a791dab
HW
1028 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1029 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1030 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1031 }
1032
f99d8762
HW
1033 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1034 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1035
8a791dab
HW
1036 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1037 adev->dm.dc->debug.disable_stutter = true;
1038
1039 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1040 adev->dm.dc->debug.disable_dsc = true;
1041
1042 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1043 adev->dm.dc->debug.disable_clock_gate = true;
1044
743b9786
NK
1045 r = dm_dmub_hw_init(adev);
1046 if (r) {
1047 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1048 goto error;
1049 }
1050
bb6785c1
NK
1051 dc_hardware_init(adev->dm.dc);
1052
0b08c54b 1053#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1054 if (adev->apu_flags) {
e6cd859d
AD
1055 struct dc_phy_addr_space_config pa_config;
1056
0b08c54b 1057 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1058
0b08c54b
YZ
1059 // Call the DC init_memory func
1060 dc_setup_system_context(adev->dm.dc, &pa_config);
1061 }
1062#endif
c0fb85ae 1063
4562236b
HW
1064 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1065 if (!adev->dm.freesync_module) {
1066 DRM_ERROR(
1067 "amdgpu: failed to initialize freesync_module.\n");
1068 } else
f1ad2f5e 1069 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1070 adev->dm.freesync_module);
1071
e277adc5
LSL
1072 amdgpu_dm_init_color_mod();
1073
52704fca 1074#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1075 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1076 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1077
96a3b32e
BL
1078 if (!adev->dm.hdcp_workqueue)
1079 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1080 else
1081 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1082
96a3b32e
BL
1083 dc_init_callbacks(adev->dm.dc, &init_params);
1084 }
52704fca 1085#endif
4562236b
HW
1086 if (amdgpu_dm_initialize_drm_device(adev)) {
1087 DRM_ERROR(
1088 "amdgpu: failed to initialize sw for display support.\n");
1089 goto error;
1090 }
1091
f74367e4
AD
1092 /* create fake encoders for MST */
1093 dm_dp_create_fake_mst_encoders(adev);
1094
4562236b
HW
1095 /* TODO: Add_display_info? */
1096
1097 /* TODO use dynamic cursor width */
4a580877
LT
1098 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1099 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1100
4a580877 1101 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1102 DRM_ERROR(
1103 "amdgpu: failed to initialize sw for display support.\n");
1104 goto error;
1105 }
1106
c0fb85ae 1107
f1ad2f5e 1108 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1109
1110 return 0;
1111error:
1112 amdgpu_dm_fini(adev);
1113
59d0f396 1114 return -EINVAL;
4562236b
HW
1115}
1116
7578ecda 1117static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1118{
f74367e4
AD
1119 int i;
1120
1121 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1122 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1123 }
1124
6ce8f316
NK
1125 amdgpu_dm_audio_fini(adev);
1126
4562236b 1127 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1128
52704fca
BL
1129#ifdef CONFIG_DRM_AMD_DC_HDCP
1130 if (adev->dm.hdcp_workqueue) {
e96b1b29 1131 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1132 adev->dm.hdcp_workqueue = NULL;
1133 }
1134
1135 if (adev->dm.dc)
1136 dc_deinit_callbacks(adev->dm.dc);
1137#endif
9a71c7d3
NK
1138 if (adev->dm.dc->ctx->dmub_srv) {
1139 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1140 adev->dm.dc->ctx->dmub_srv = NULL;
1141 }
1142
743b9786
NK
1143 if (adev->dm.dmub_bo)
1144 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1145 &adev->dm.dmub_bo_gpu_addr,
1146 &adev->dm.dmub_bo_cpu_addr);
52704fca 1147
c8bdf2b6
ED
1148 /* DC Destroy TODO: Replace destroy DAL */
1149 if (adev->dm.dc)
1150 dc_destroy(&adev->dm.dc);
4562236b
HW
1151 /*
1152 * TODO: pageflip, vlank interrupt
1153 *
1154 * amdgpu_dm_irq_fini(adev);
1155 */
1156
1157 if (adev->dm.cgs_device) {
1158 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1159 adev->dm.cgs_device = NULL;
1160 }
1161 if (adev->dm.freesync_module) {
1162 mod_freesync_destroy(adev->dm.freesync_module);
1163 adev->dm.freesync_module = NULL;
1164 }
674e78ac 1165
6ce8f316 1166 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1167 mutex_destroy(&adev->dm.dc_lock);
1168
4562236b
HW
1169 return;
1170}
1171
a94d5569 1172static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1173{
a7669aff 1174 const char *fw_name_dmcu = NULL;
a94d5569
DF
1175 int r;
1176 const struct dmcu_firmware_header_v1_0 *hdr;
1177
1178 switch(adev->asic_type) {
55e56389
MR
1179#if defined(CONFIG_DRM_AMD_DC_SI)
1180 case CHIP_TAHITI:
1181 case CHIP_PITCAIRN:
1182 case CHIP_VERDE:
1183 case CHIP_OLAND:
1184#endif
a94d5569
DF
1185 case CHIP_BONAIRE:
1186 case CHIP_HAWAII:
1187 case CHIP_KAVERI:
1188 case CHIP_KABINI:
1189 case CHIP_MULLINS:
1190 case CHIP_TONGA:
1191 case CHIP_FIJI:
1192 case CHIP_CARRIZO:
1193 case CHIP_STONEY:
1194 case CHIP_POLARIS11:
1195 case CHIP_POLARIS10:
1196 case CHIP_POLARIS12:
1197 case CHIP_VEGAM:
1198 case CHIP_VEGA10:
1199 case CHIP_VEGA12:
1200 case CHIP_VEGA20:
476e955d 1201 case CHIP_NAVI10:
baebcf2e 1202 case CHIP_NAVI14:
30221ad8 1203 case CHIP_RENOIR:
79037324 1204 case CHIP_SIENNA_CICHLID:
a6c5308f 1205 case CHIP_NAVY_FLOUNDER:
2a411205 1206 case CHIP_DIMGREY_CAVEFISH:
469989ca 1207 case CHIP_VANGOGH:
a94d5569 1208 return 0;
5ea23931
RL
1209 case CHIP_NAVI12:
1210 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1211 break;
a94d5569 1212 case CHIP_RAVEN:
a7669aff
HW
1213 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1214 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1215 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1216 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217 else
a7669aff 1218 return 0;
a94d5569
DF
1219 break;
1220 default:
1221 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1222 return -EINVAL;
a94d5569
DF
1223 }
1224
1225 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1226 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1227 return 0;
1228 }
1229
1230 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1231 if (r == -ENOENT) {
1232 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1233 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1234 adev->dm.fw_dmcu = NULL;
1235 return 0;
1236 }
1237 if (r) {
1238 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1239 fw_name_dmcu);
1240 return r;
1241 }
1242
1243 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1244 if (r) {
1245 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1246 fw_name_dmcu);
1247 release_firmware(adev->dm.fw_dmcu);
1248 adev->dm.fw_dmcu = NULL;
1249 return r;
1250 }
1251
1252 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1253 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1254 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1255 adev->firmware.fw_size +=
1256 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1257
1258 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1259 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1260 adev->firmware.fw_size +=
1261 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1262
ee6e89c0
DF
1263 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1264
a94d5569
DF
1265 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1266
4562236b
HW
1267 return 0;
1268}
1269
743b9786
NK
1270static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1271{
1272 struct amdgpu_device *adev = ctx;
1273
1274 return dm_read_reg(adev->dm.dc->ctx, address);
1275}
1276
1277static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1278 uint32_t value)
1279{
1280 struct amdgpu_device *adev = ctx;
1281
1282 return dm_write_reg(adev->dm.dc->ctx, address, value);
1283}
1284
1285static int dm_dmub_sw_init(struct amdgpu_device *adev)
1286{
1287 struct dmub_srv_create_params create_params;
8c7aea40
NK
1288 struct dmub_srv_region_params region_params;
1289 struct dmub_srv_region_info region_info;
1290 struct dmub_srv_fb_params fb_params;
1291 struct dmub_srv_fb_info *fb_info;
1292 struct dmub_srv *dmub_srv;
743b9786
NK
1293 const struct dmcub_firmware_header_v1_0 *hdr;
1294 const char *fw_name_dmub;
1295 enum dmub_asic dmub_asic;
1296 enum dmub_status status;
1297 int r;
1298
1299 switch (adev->asic_type) {
1300 case CHIP_RENOIR:
1301 dmub_asic = DMUB_ASIC_DCN21;
1302 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1303 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1304 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1305 break;
79037324
BL
1306 case CHIP_SIENNA_CICHLID:
1307 dmub_asic = DMUB_ASIC_DCN30;
1308 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1309 break;
5ce868fc
BL
1310 case CHIP_NAVY_FLOUNDER:
1311 dmub_asic = DMUB_ASIC_DCN30;
1312 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1313 break;
469989ca
RL
1314 case CHIP_VANGOGH:
1315 dmub_asic = DMUB_ASIC_DCN301;
1316 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1317 break;
2a411205
BL
1318 case CHIP_DIMGREY_CAVEFISH:
1319 dmub_asic = DMUB_ASIC_DCN302;
1320 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1321 break;
743b9786
NK
1322
1323 default:
1324 /* ASIC doesn't support DMUB. */
1325 return 0;
1326 }
1327
743b9786
NK
1328 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 if (r) {
1330 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 return 0;
1332 }
1333
1334 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 if (r) {
1336 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 return 0;
1338 }
1339
743b9786 1340 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1341
9a6ed547
NK
1342 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1343 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1344 AMDGPU_UCODE_ID_DMCUB;
1345 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1346 adev->dm.dmub_fw;
1347 adev->firmware.fw_size +=
1348 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1349
9a6ed547
NK
1350 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1351 adev->dm.dmcub_fw_version);
1352 }
1353
1354 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1355
8c7aea40
NK
1356 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 dmub_srv = adev->dm.dmub_srv;
1358
1359 if (!dmub_srv) {
1360 DRM_ERROR("Failed to allocate DMUB service!\n");
1361 return -ENOMEM;
1362 }
1363
1364 memset(&create_params, 0, sizeof(create_params));
1365 create_params.user_ctx = adev;
1366 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 create_params.asic = dmub_asic;
1369
1370 /* Create the DMUB service. */
1371 status = dmub_srv_create(dmub_srv, &create_params);
1372 if (status != DMUB_STATUS_OK) {
1373 DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 return -EINVAL;
1375 }
1376
1377 /* Calculate the size of all the regions for the DMUB service. */
1378 memset(&region_params, 0, sizeof(region_params));
1379
1380 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 region_params.vbios_size = adev->bios_size;
0922b899 1384 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1385 adev->dm.dmub_fw->data +
1386 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1387 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1388 region_params.fw_inst_const =
1389 adev->dm.dmub_fw->data +
1390 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 PSP_HEADER_BYTES;
8c7aea40
NK
1392
1393 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394 &region_info);
1395
1396 if (status != DMUB_STATUS_OK) {
1397 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398 return -EINVAL;
1399 }
1400
1401 /*
1402 * Allocate a framebuffer based on the total size of all the regions.
1403 * TODO: Move this into GART.
1404 */
1405 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 &adev->dm.dmub_bo_gpu_addr,
1408 &adev->dm.dmub_bo_cpu_addr);
1409 if (r)
1410 return r;
1411
1412 /* Rebase the regions on the framebuffer address. */
1413 memset(&fb_params, 0, sizeof(fb_params));
1414 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 fb_params.region_info = &region_info;
1417
1418 adev->dm.dmub_fb_info =
1419 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 fb_info = adev->dm.dmub_fb_info;
1421
1422 if (!fb_info) {
1423 DRM_ERROR(
1424 "Failed to allocate framebuffer info for DMUB service!\n");
1425 return -ENOMEM;
1426 }
1427
1428 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 if (status != DMUB_STATUS_OK) {
1430 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431 return -EINVAL;
1432 }
1433
743b9786
NK
1434 return 0;
1435}
1436
a94d5569
DF
1437static int dm_sw_init(void *handle)
1438{
1439 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1440 int r;
1441
1442 r = dm_dmub_sw_init(adev);
1443 if (r)
1444 return r;
a94d5569
DF
1445
1446 return load_dmcu_fw(adev);
1447}
1448
4562236b
HW
1449static int dm_sw_fini(void *handle)
1450{
a94d5569
DF
1451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452
8c7aea40
NK
1453 kfree(adev->dm.dmub_fb_info);
1454 adev->dm.dmub_fb_info = NULL;
1455
743b9786
NK
1456 if (adev->dm.dmub_srv) {
1457 dmub_srv_destroy(adev->dm.dmub_srv);
1458 adev->dm.dmub_srv = NULL;
1459 }
1460
75e1658e
ND
1461 release_firmware(adev->dm.dmub_fw);
1462 adev->dm.dmub_fw = NULL;
743b9786 1463
75e1658e
ND
1464 release_firmware(adev->dm.fw_dmcu);
1465 adev->dm.fw_dmcu = NULL;
a94d5569 1466
4562236b
HW
1467 return 0;
1468}
1469
7abcf6b5 1470static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1471{
c84dec2f 1472 struct amdgpu_dm_connector *aconnector;
4562236b 1473 struct drm_connector *connector;
f8d2d39e 1474 struct drm_connector_list_iter iter;
7abcf6b5 1475 int ret = 0;
4562236b 1476
f8d2d39e
LP
1477 drm_connector_list_iter_begin(dev, &iter);
1478 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1479 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1480 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 aconnector->mst_mgr.aux) {
f1ad2f5e 1482 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1483 aconnector,
1484 aconnector->base.base.id);
7abcf6b5
AG
1485
1486 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487 if (ret < 0) {
1488 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1489 aconnector->dc_link->type =
1490 dc_connection_single;
1491 break;
7abcf6b5 1492 }
f8d2d39e 1493 }
4562236b 1494 }
f8d2d39e 1495 drm_connector_list_iter_end(&iter);
4562236b 1496
7abcf6b5
AG
1497 return ret;
1498}
1499
1500static int dm_late_init(void *handle)
1501{
42e67c3b 1502 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1503
bbf854dc
DF
1504 struct dmcu_iram_parameters params;
1505 unsigned int linear_lut[16];
1506 int i;
17bdb4a8 1507 struct dmcu *dmcu = NULL;
5cb32419 1508 bool ret = true;
bbf854dc 1509
17bdb4a8
JFZ
1510 dmcu = adev->dm.dc->res_pool->dmcu;
1511
bbf854dc
DF
1512 for (i = 0; i < 16; i++)
1513 linear_lut[i] = 0xFFFF * i / 15;
1514
1515 params.set = 0;
1516 params.backlight_ramping_start = 0xCCCC;
1517 params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 params.backlight_lut_array_size = 16;
1519 params.backlight_lut_array = linear_lut;
1520
2ad0cdf9
AK
1521 /* Min backlight level after ABM reduction, Don't allow below 1%
1522 * 0xFFFF x 0.01 = 0x28F
1523 */
1524 params.min_abm_backlight = 0x28F;
1525
5cb32419
RL
1526 /* In the case where abm is implemented on dmcub,
1527 * dmcu object will be null.
1528 * ABM 2.4 and up are implemented on dmcub.
1529 */
1530 if (dmcu)
1531 ret = dmcu_load_iram(dmcu, params);
1532 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1533 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1534
14ed1c90
HW
1535 if (!ret)
1536 return -EINVAL;
bbf854dc 1537
4a580877 1538 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1539}
1540
1541static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542{
c84dec2f 1543 struct amdgpu_dm_connector *aconnector;
4562236b 1544 struct drm_connector *connector;
f8d2d39e 1545 struct drm_connector_list_iter iter;
fe7553be
LP
1546 struct drm_dp_mst_topology_mgr *mgr;
1547 int ret;
1548 bool need_hotplug = false;
4562236b 1549
f8d2d39e
LP
1550 drm_connector_list_iter_begin(dev, &iter);
1551 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1552 aconnector = to_amdgpu_dm_connector(connector);
1553 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 aconnector->mst_port)
1555 continue;
1556
1557 mgr = &aconnector->mst_mgr;
1558
1559 if (suspend) {
1560 drm_dp_mst_topology_mgr_suspend(mgr);
1561 } else {
6f85f738 1562 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1563 if (ret < 0) {
1564 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 need_hotplug = true;
1566 }
1567 }
4562236b 1568 }
f8d2d39e 1569 drm_connector_list_iter_end(&iter);
fe7553be
LP
1570
1571 if (need_hotplug)
1572 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1573}
1574
9340dfd3
HW
1575static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576{
1577 struct smu_context *smu = &adev->smu;
1578 int ret = 0;
1579
1580 if (!is_support_sw_smu(adev))
1581 return 0;
1582
1583 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 * on window driver dc implementation.
1585 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 * should be passed to smu during boot up and resume from s3.
1587 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 * dcn20_resource_construct
1589 * then call pplib functions below to pass the settings to smu:
1590 * smu_set_watermarks_for_clock_ranges
1591 * smu_set_watermarks_table
1592 * navi10_set_watermarks_table
1593 * smu_write_watermarks_table
1594 *
1595 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 * dc has implemented different flow for window driver:
1597 * dc_hardware_init / dc_set_power_state
1598 * dcn10_init_hw
1599 * notify_wm_ranges
1600 * set_wm_ranges
1601 * -- Linux
1602 * smu_set_watermarks_for_clock_ranges
1603 * renoir_set_watermarks_table
1604 * smu_write_watermarks_table
1605 *
1606 * For Linux,
1607 * dc_hardware_init -> amdgpu_dm_init
1608 * dc_set_power_state --> dm_resume
1609 *
1610 * therefore, this function apply to navi10/12/14 but not Renoir
1611 * *
1612 */
1613 switch(adev->asic_type) {
1614 case CHIP_NAVI10:
1615 case CHIP_NAVI14:
1616 case CHIP_NAVI12:
1617 break;
1618 default:
1619 return 0;
1620 }
1621
e7a95eea
EQ
1622 ret = smu_write_watermarks_table(smu);
1623 if (ret) {
1624 DRM_ERROR("Failed to update WMTABLE!\n");
1625 return ret;
9340dfd3
HW
1626 }
1627
9340dfd3
HW
1628 return 0;
1629}
1630
b8592b48
LL
1631/**
1632 * dm_hw_init() - Initialize DC device
28d687ea 1633 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1634 *
1635 * Initialize the &struct amdgpu_display_manager device. This involves calling
1636 * the initializers of each DM component, then populating the struct with them.
1637 *
1638 * Although the function implies hardware initialization, both hardware and
1639 * software are initialized here. Splitting them out to their relevant init
1640 * hooks is a future TODO item.
1641 *
1642 * Some notable things that are initialized here:
1643 *
1644 * - Display Core, both software and hardware
1645 * - DC modules that we need (freesync and color management)
1646 * - DRM software states
1647 * - Interrupt sources and handlers
1648 * - Vblank support
1649 * - Debug FS entries, if enabled
1650 */
4562236b
HW
1651static int dm_hw_init(void *handle)
1652{
1653 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 /* Create DAL display manager */
1655 amdgpu_dm_init(adev);
4562236b
HW
1656 amdgpu_dm_hpd_init(adev);
1657
4562236b
HW
1658 return 0;
1659}
1660
b8592b48
LL
1661/**
1662 * dm_hw_fini() - Teardown DC device
28d687ea 1663 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1664 *
1665 * Teardown components within &struct amdgpu_display_manager that require
1666 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667 * were loaded. Also flush IRQ workqueues and disable them.
1668 */
4562236b
HW
1669static int dm_hw_fini(void *handle)
1670{
1671 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672
1673 amdgpu_dm_hpd_fini(adev);
1674
1675 amdgpu_dm_irq_fini(adev);
21de3396 1676 amdgpu_dm_fini(adev);
4562236b
HW
1677 return 0;
1678}
1679
cdaae837
BL
1680
1681static int dm_enable_vblank(struct drm_crtc *crtc);
1682static void dm_disable_vblank(struct drm_crtc *crtc);
1683
1684static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 struct dc_state *state, bool enable)
1686{
1687 enum dc_irq_source irq_source;
1688 struct amdgpu_crtc *acrtc;
1689 int rc = -EBUSY;
1690 int i = 0;
1691
1692 for (i = 0; i < state->stream_count; i++) {
1693 acrtc = get_crtc_by_otg_inst(
1694 adev, state->stream_status[i].primary_otg_inst);
1695
1696 if (acrtc && state->stream_status[i].plane_count != 0) {
1697 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 acrtc->crtc_id, enable ? "en" : "dis", rc);
1701 if (rc)
1702 DRM_WARN("Failed to %s pflip interrupts\n",
1703 enable ? "enable" : "disable");
1704
1705 if (enable) {
1706 rc = dm_enable_vblank(&acrtc->base);
1707 if (rc)
1708 DRM_WARN("Failed to enable vblank interrupts\n");
1709 } else {
1710 dm_disable_vblank(&acrtc->base);
1711 }
1712
1713 }
1714 }
1715
1716}
1717
dfd84d90 1718static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1719{
1720 struct dc_state *context = NULL;
1721 enum dc_status res = DC_ERROR_UNEXPECTED;
1722 int i;
1723 struct dc_stream_state *del_streams[MAX_PIPES];
1724 int del_streams_count = 0;
1725
1726 memset(del_streams, 0, sizeof(del_streams));
1727
1728 context = dc_create_state(dc);
1729 if (context == NULL)
1730 goto context_alloc_fail;
1731
1732 dc_resource_state_copy_construct_current(dc, context);
1733
1734 /* First remove from context all streams */
1735 for (i = 0; i < context->stream_count; i++) {
1736 struct dc_stream_state *stream = context->streams[i];
1737
1738 del_streams[del_streams_count++] = stream;
1739 }
1740
1741 /* Remove all planes for removed streams and then remove the streams */
1742 for (i = 0; i < del_streams_count; i++) {
1743 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 res = DC_FAIL_DETACH_SURFACES;
1745 goto fail;
1746 }
1747
1748 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749 if (res != DC_OK)
1750 goto fail;
1751 }
1752
1753
1754 res = dc_validate_global_state(dc, context, false);
1755
1756 if (res != DC_OK) {
1757 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758 goto fail;
1759 }
1760
1761 res = dc_commit_state(dc, context);
1762
1763fail:
1764 dc_release_state(context);
1765
1766context_alloc_fail:
1767 return res;
1768}
1769
4562236b
HW
1770static int dm_suspend(void *handle)
1771{
1772 struct amdgpu_device *adev = handle;
1773 struct amdgpu_display_manager *dm = &adev->dm;
1774 int ret = 0;
4562236b 1775
53b3f8f4 1776 if (amdgpu_in_reset(adev)) {
cdaae837 1777 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1778
1779#if defined(CONFIG_DRM_AMD_DC_DCN)
1780 dc_allow_idle_optimizations(adev->dm.dc, false);
1781#endif
1782
cdaae837
BL
1783 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1784
1785 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1786
1787 amdgpu_dm_commit_zero_streams(dm->dc);
1788
1789 amdgpu_dm_irq_suspend(adev);
1790
1791 return ret;
1792 }
4562236b 1793
d2f0b53b 1794 WARN_ON(adev->dm.cached_state);
4a580877 1795 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1796
4a580877 1797 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1798
4562236b
HW
1799 amdgpu_dm_irq_suspend(adev);
1800
a3621485 1801
32f5062d 1802 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1803
1c2075d4 1804 return 0;
4562236b
HW
1805}
1806
1daf8c63
AD
1807static struct amdgpu_dm_connector *
1808amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809 struct drm_crtc *crtc)
4562236b
HW
1810{
1811 uint32_t i;
c2cea706 1812 struct drm_connector_state *new_con_state;
4562236b
HW
1813 struct drm_connector *connector;
1814 struct drm_crtc *crtc_from_state;
1815
c2cea706
LSL
1816 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817 crtc_from_state = new_con_state->crtc;
4562236b
HW
1818
1819 if (crtc_from_state == crtc)
c84dec2f 1820 return to_amdgpu_dm_connector(connector);
4562236b
HW
1821 }
1822
1823 return NULL;
1824}
1825
fbbdadf2
BL
1826static void emulated_link_detect(struct dc_link *link)
1827{
1828 struct dc_sink_init_data sink_init_data = { 0 };
1829 struct display_sink_capability sink_caps = { 0 };
1830 enum dc_edid_status edid_status;
1831 struct dc_context *dc_ctx = link->ctx;
1832 struct dc_sink *sink = NULL;
1833 struct dc_sink *prev_sink = NULL;
1834
1835 link->type = dc_connection_none;
1836 prev_sink = link->local_sink;
1837
30164a16
VL
1838 if (prev_sink)
1839 dc_sink_release(prev_sink);
fbbdadf2
BL
1840
1841 switch (link->connector_signal) {
1842 case SIGNAL_TYPE_HDMI_TYPE_A: {
1843 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1845 break;
1846 }
1847
1848 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1851 break;
1852 }
1853
1854 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1857 break;
1858 }
1859
1860 case SIGNAL_TYPE_LVDS: {
1861 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 sink_caps.signal = SIGNAL_TYPE_LVDS;
1863 break;
1864 }
1865
1866 case SIGNAL_TYPE_EDP: {
1867 sink_caps.transaction_type =
1868 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869 sink_caps.signal = SIGNAL_TYPE_EDP;
1870 break;
1871 }
1872
1873 case SIGNAL_TYPE_DISPLAY_PORT: {
1874 sink_caps.transaction_type =
1875 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1877 break;
1878 }
1879
1880 default:
1881 DC_ERROR("Invalid connector type! signal:%d\n",
1882 link->connector_signal);
1883 return;
1884 }
1885
1886 sink_init_data.link = link;
1887 sink_init_data.sink_signal = sink_caps.signal;
1888
1889 sink = dc_sink_create(&sink_init_data);
1890 if (!sink) {
1891 DC_ERROR("Failed to create sink!\n");
1892 return;
1893 }
1894
dcd5fb82 1895 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1896 link->local_sink = sink;
1897
1898 edid_status = dm_helpers_read_local_edid(
1899 link->ctx,
1900 link,
1901 sink);
1902
1903 if (edid_status != EDID_OK)
1904 DC_ERROR("Failed to read EDID");
1905
1906}
1907
cdaae837
BL
1908static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909 struct amdgpu_display_manager *dm)
1910{
1911 struct {
1912 struct dc_surface_update surface_updates[MAX_SURFACES];
1913 struct dc_plane_info plane_infos[MAX_SURFACES];
1914 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916 struct dc_stream_update stream_update;
1917 } * bundle;
1918 int k, m;
1919
1920 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921
1922 if (!bundle) {
1923 dm_error("Failed to allocate update bundle\n");
1924 goto cleanup;
1925 }
1926
1927 for (k = 0; k < dc_state->stream_count; k++) {
1928 bundle->stream_update.stream = dc_state->streams[k];
1929
1930 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931 bundle->surface_updates[m].surface =
1932 dc_state->stream_status->plane_states[m];
1933 bundle->surface_updates[m].surface->force_full_update =
1934 true;
1935 }
1936 dc_commit_updates_for_stream(
1937 dm->dc, bundle->surface_updates,
1938 dc_state->stream_status->plane_count,
efc8278e 1939 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
1940 }
1941
1942cleanup:
1943 kfree(bundle);
1944
1945 return;
1946}
1947
3c4d55c9
AP
1948static void dm_set_dpms_off(struct dc_link *link)
1949{
1950 struct dc_stream_state *stream_state;
1951 struct amdgpu_dm_connector *aconnector = link->priv;
1952 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1953 struct dc_stream_update stream_update;
1954 bool dpms_off = true;
1955
1956 memset(&stream_update, 0, sizeof(stream_update));
1957 stream_update.dpms_off = &dpms_off;
1958
1959 mutex_lock(&adev->dm.dc_lock);
1960 stream_state = dc_stream_find_from_link(link);
1961
1962 if (stream_state == NULL) {
1963 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1964 mutex_unlock(&adev->dm.dc_lock);
1965 return;
1966 }
1967
1968 stream_update.stream = stream_state;
1969 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
1970 stream_state, &stream_update,
1971 stream_state->ctx->dc->current_state);
3c4d55c9
AP
1972 mutex_unlock(&adev->dm.dc_lock);
1973}
1974
4562236b
HW
1975static int dm_resume(void *handle)
1976{
1977 struct amdgpu_device *adev = handle;
4a580877 1978 struct drm_device *ddev = adev_to_drm(adev);
4562236b 1979 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1980 struct amdgpu_dm_connector *aconnector;
4562236b 1981 struct drm_connector *connector;
f8d2d39e 1982 struct drm_connector_list_iter iter;
4562236b 1983 struct drm_crtc *crtc;
c2cea706 1984 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1985 struct dm_crtc_state *dm_new_crtc_state;
1986 struct drm_plane *plane;
1987 struct drm_plane_state *new_plane_state;
1988 struct dm_plane_state *dm_new_plane_state;
113b7a01 1989 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1990 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
1991 struct dc_state *dc_state;
1992 int i, r, j;
4562236b 1993
53b3f8f4 1994 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1995 dc_state = dm->cached_dc_state;
1996
1997 r = dm_dmub_hw_init(adev);
1998 if (r)
1999 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2000
2001 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2002 dc_resume(dm->dc);
2003
2004 amdgpu_dm_irq_resume_early(adev);
2005
2006 for (i = 0; i < dc_state->stream_count; i++) {
2007 dc_state->streams[i]->mode_changed = true;
2008 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2009 dc_state->stream_status->plane_states[j]->update_flags.raw
2010 = 0xffffffff;
2011 }
2012 }
2013
2014 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2015
cdaae837
BL
2016 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2017
2018 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2019
2020 dc_release_state(dm->cached_dc_state);
2021 dm->cached_dc_state = NULL;
2022
2023 amdgpu_dm_irq_resume_late(adev);
2024
2025 mutex_unlock(&dm->dc_lock);
2026
2027 return 0;
2028 }
113b7a01
LL
2029 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2030 dc_release_state(dm_state->context);
2031 dm_state->context = dc_create_state(dm->dc);
2032 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2033 dc_resource_state_construct(dm->dc, dm_state->context);
2034
8c7aea40
NK
2035 /* Before powering on DC we need to re-initialize DMUB. */
2036 r = dm_dmub_hw_init(adev);
2037 if (r)
2038 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2039
a80aa93d
ML
2040 /* power on hardware */
2041 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2042
4562236b
HW
2043 /* program HPD filter */
2044 dc_resume(dm->dc);
2045
4562236b
HW
2046 /*
2047 * early enable HPD Rx IRQ, should be done before set mode as short
2048 * pulse interrupts are used for MST
2049 */
2050 amdgpu_dm_irq_resume_early(adev);
2051
d20ebea8 2052 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2053 s3_handle_mst(ddev, false);
2054
4562236b 2055 /* Do detection*/
f8d2d39e
LP
2056 drm_connector_list_iter_begin(ddev, &iter);
2057 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2058 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2059
2060 /*
2061 * this is the case when traversing through already created
2062 * MST connectors, should be skipped
2063 */
2064 if (aconnector->mst_port)
2065 continue;
2066
03ea364c 2067 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2068 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069 DRM_ERROR("KMS: Failed to detect connector\n");
2070
2071 if (aconnector->base.force && new_connection_type == dc_connection_none)
2072 emulated_link_detect(aconnector->dc_link);
2073 else
2074 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2075
2076 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2077 aconnector->fake_enable = false;
2078
dcd5fb82
MF
2079 if (aconnector->dc_sink)
2080 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2081 aconnector->dc_sink = NULL;
2082 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2083 mutex_unlock(&aconnector->hpd_lock);
4562236b 2084 }
f8d2d39e 2085 drm_connector_list_iter_end(&iter);
4562236b 2086
1f6010a9 2087 /* Force mode set in atomic commit */
a80aa93d 2088 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2089 new_crtc_state->active_changed = true;
4f346e65 2090
fcb4019e
LSL
2091 /*
2092 * atomic_check is expected to create the dc states. We need to release
2093 * them here, since they were duplicated as part of the suspend
2094 * procedure.
2095 */
a80aa93d 2096 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2097 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2098 if (dm_new_crtc_state->stream) {
2099 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2100 dc_stream_release(dm_new_crtc_state->stream);
2101 dm_new_crtc_state->stream = NULL;
2102 }
2103 }
2104
a80aa93d 2105 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2106 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2107 if (dm_new_plane_state->dc_state) {
2108 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2109 dc_plane_state_release(dm_new_plane_state->dc_state);
2110 dm_new_plane_state->dc_state = NULL;
2111 }
2112 }
2113
2d1af6a1 2114 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2115
a80aa93d 2116 dm->cached_state = NULL;
0a214e2f 2117
9faa4237 2118 amdgpu_dm_irq_resume_late(adev);
4562236b 2119
9340dfd3
HW
2120 amdgpu_dm_smu_write_watermarks_table(adev);
2121
2d1af6a1 2122 return 0;
4562236b
HW
2123}
2124
b8592b48
LL
2125/**
2126 * DOC: DM Lifecycle
2127 *
2128 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2129 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2130 * the base driver's device list to be initialized and torn down accordingly.
2131 *
2132 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2133 */
2134
4562236b
HW
2135static const struct amd_ip_funcs amdgpu_dm_funcs = {
2136 .name = "dm",
2137 .early_init = dm_early_init,
7abcf6b5 2138 .late_init = dm_late_init,
4562236b
HW
2139 .sw_init = dm_sw_init,
2140 .sw_fini = dm_sw_fini,
2141 .hw_init = dm_hw_init,
2142 .hw_fini = dm_hw_fini,
2143 .suspend = dm_suspend,
2144 .resume = dm_resume,
2145 .is_idle = dm_is_idle,
2146 .wait_for_idle = dm_wait_for_idle,
2147 .check_soft_reset = dm_check_soft_reset,
2148 .soft_reset = dm_soft_reset,
2149 .set_clockgating_state = dm_set_clockgating_state,
2150 .set_powergating_state = dm_set_powergating_state,
2151};
2152
2153const struct amdgpu_ip_block_version dm_ip_block =
2154{
2155 .type = AMD_IP_BLOCK_TYPE_DCE,
2156 .major = 1,
2157 .minor = 0,
2158 .rev = 0,
2159 .funcs = &amdgpu_dm_funcs,
2160};
2161
ca3268c4 2162
b8592b48
LL
2163/**
2164 * DOC: atomic
2165 *
2166 * *WIP*
2167 */
0a323b84 2168
b3663f70 2169static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2170 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2171 .get_format_info = amd_get_format_info,
366c1baa 2172 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2173 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2174 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2175};
2176
2177static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2178 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2179};
2180
94562810
RS
2181static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2182{
2183 u32 max_cll, min_cll, max, min, q, r;
2184 struct amdgpu_dm_backlight_caps *caps;
2185 struct amdgpu_display_manager *dm;
2186 struct drm_connector *conn_base;
2187 struct amdgpu_device *adev;
ec11fe37 2188 struct dc_link *link = NULL;
94562810
RS
2189 static const u8 pre_computed_values[] = {
2190 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2191 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2192
2193 if (!aconnector || !aconnector->dc_link)
2194 return;
2195
ec11fe37 2196 link = aconnector->dc_link;
2197 if (link->connector_signal != SIGNAL_TYPE_EDP)
2198 return;
2199
94562810 2200 conn_base = &aconnector->base;
1348969a 2201 adev = drm_to_adev(conn_base->dev);
94562810
RS
2202 dm = &adev->dm;
2203 caps = &dm->backlight_caps;
2204 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2205 caps->aux_support = false;
2206 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2207 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2208
2209 if (caps->ext_caps->bits.oled == 1 ||
2210 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2211 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2212 caps->aux_support = true;
2213
2214 /* From the specification (CTA-861-G), for calculating the maximum
2215 * luminance we need to use:
2216 * Luminance = 50*2**(CV/32)
2217 * Where CV is a one-byte value.
2218 * For calculating this expression we may need float point precision;
2219 * to avoid this complexity level, we take advantage that CV is divided
2220 * by a constant. From the Euclids division algorithm, we know that CV
2221 * can be written as: CV = 32*q + r. Next, we replace CV in the
2222 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2223 * need to pre-compute the value of r/32. For pre-computing the values
2224 * We just used the following Ruby line:
2225 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2226 * The results of the above expressions can be verified at
2227 * pre_computed_values.
2228 */
2229 q = max_cll >> 5;
2230 r = max_cll % 32;
2231 max = (1 << q) * pre_computed_values[r];
2232
2233 // min luminance: maxLum * (CV/255)^2 / 100
2234 q = DIV_ROUND_CLOSEST(min_cll, 255);
2235 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2236
2237 caps->aux_max_input_signal = max;
2238 caps->aux_min_input_signal = min;
2239}
2240
97e51c16
HW
2241void amdgpu_dm_update_connector_after_detect(
2242 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2243{
2244 struct drm_connector *connector = &aconnector->base;
2245 struct drm_device *dev = connector->dev;
b73a22d3 2246 struct dc_sink *sink;
4562236b
HW
2247
2248 /* MST handled by drm_mst framework */
2249 if (aconnector->mst_mgr.mst_state == true)
2250 return;
2251
4562236b 2252 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2253 if (sink)
2254 dc_sink_retain(sink);
4562236b 2255
1f6010a9
DF
2256 /*
2257 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2258 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2259 * Skip if already done during boot.
4562236b
HW
2260 */
2261 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2262 && aconnector->dc_em_sink) {
2263
1f6010a9
DF
2264 /*
2265 * For S3 resume with headless use eml_sink to fake stream
2266 * because on resume connector->sink is set to NULL
4562236b
HW
2267 */
2268 mutex_lock(&dev->mode_config.mutex);
2269
2270 if (sink) {
922aa1e1 2271 if (aconnector->dc_sink) {
98e6436d 2272 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2273 /*
2274 * retain and release below are used to
2275 * bump up refcount for sink because the link doesn't point
2276 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2277 * reshuffle by UMD we will get into unwanted dc_sink release
2278 */
dcd5fb82 2279 dc_sink_release(aconnector->dc_sink);
922aa1e1 2280 }
4562236b 2281 aconnector->dc_sink = sink;
dcd5fb82 2282 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2283 amdgpu_dm_update_freesync_caps(connector,
2284 aconnector->edid);
4562236b 2285 } else {
98e6436d 2286 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2287 if (!aconnector->dc_sink) {
4562236b 2288 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2289 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2290 }
4562236b
HW
2291 }
2292
2293 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2294
2295 if (sink)
2296 dc_sink_release(sink);
4562236b
HW
2297 return;
2298 }
2299
2300 /*
2301 * TODO: temporary guard to look for proper fix
2302 * if this sink is MST sink, we should not do anything
2303 */
dcd5fb82
MF
2304 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2305 dc_sink_release(sink);
4562236b 2306 return;
dcd5fb82 2307 }
4562236b
HW
2308
2309 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2310 /*
2311 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2312 * Do nothing!!
2313 */
f1ad2f5e 2314 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2315 aconnector->connector_id);
dcd5fb82
MF
2316 if (sink)
2317 dc_sink_release(sink);
4562236b
HW
2318 return;
2319 }
2320
f1ad2f5e 2321 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2322 aconnector->connector_id, aconnector->dc_sink, sink);
2323
2324 mutex_lock(&dev->mode_config.mutex);
2325
1f6010a9
DF
2326 /*
2327 * 1. Update status of the drm connector
2328 * 2. Send an event and let userspace tell us what to do
2329 */
4562236b 2330 if (sink) {
1f6010a9
DF
2331 /*
2332 * TODO: check if we still need the S3 mode update workaround.
2333 * If yes, put it here.
2334 */
c64b0d6b 2335 if (aconnector->dc_sink) {
98e6436d 2336 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2337 dc_sink_release(aconnector->dc_sink);
2338 }
4562236b
HW
2339
2340 aconnector->dc_sink = sink;
dcd5fb82 2341 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2342 if (sink->dc_edid.length == 0) {
4562236b 2343 aconnector->edid = NULL;
e6142dd5
AP
2344 if (aconnector->dc_link->aux_mode) {
2345 drm_dp_cec_unset_edid(
2346 &aconnector->dm_dp_aux.aux);
2347 }
900b3cb1 2348 } else {
4562236b 2349 aconnector->edid =
e6142dd5 2350 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2351
c555f023 2352 drm_connector_update_edid_property(connector,
e6142dd5 2353 aconnector->edid);
e6142dd5
AP
2354 if (aconnector->dc_link->aux_mode)
2355 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2356 aconnector->edid);
4562236b 2357 }
e6142dd5 2358
98e6436d 2359 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2360 update_connector_ext_caps(aconnector);
4562236b 2361 } else {
e86e8947 2362 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2363 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2364 drm_connector_update_edid_property(connector, NULL);
4562236b 2365 aconnector->num_modes = 0;
dcd5fb82 2366 dc_sink_release(aconnector->dc_sink);
4562236b 2367 aconnector->dc_sink = NULL;
5326c452 2368 aconnector->edid = NULL;
0c8620d6
BL
2369#ifdef CONFIG_DRM_AMD_DC_HDCP
2370 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2371 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2372 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2373#endif
4562236b
HW
2374 }
2375
2376 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2377
0f877894
OV
2378 update_subconnector_property(aconnector);
2379
dcd5fb82
MF
2380 if (sink)
2381 dc_sink_release(sink);
4562236b
HW
2382}
2383
2384static void handle_hpd_irq(void *param)
2385{
c84dec2f 2386 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2387 struct drm_connector *connector = &aconnector->base;
2388 struct drm_device *dev = connector->dev;
fbbdadf2 2389 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2390#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2391 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2392 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2393#endif
4562236b 2394
1f6010a9
DF
2395 /*
2396 * In case of failure or MST no need to update connector status or notify the OS
2397 * since (for MST case) MST does this in its own context.
4562236b
HW
2398 */
2399 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2400
0c8620d6 2401#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2402 if (adev->dm.hdcp_workqueue) {
96a3b32e 2403 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2404 dm_con_state->update_hdcp = true;
2405 }
0c8620d6 2406#endif
2e0ac3d6
HW
2407 if (aconnector->fake_enable)
2408 aconnector->fake_enable = false;
2409
fbbdadf2
BL
2410 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2411 DRM_ERROR("KMS: Failed to detect connector\n");
2412
2413 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2414 emulated_link_detect(aconnector->dc_link);
2415
2416
2417 drm_modeset_lock_all(dev);
2418 dm_restore_drm_connector_state(dev, connector);
2419 drm_modeset_unlock_all(dev);
2420
2421 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2422 drm_kms_helper_hotplug_event(dev);
2423
2424 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2425 if (new_connection_type == dc_connection_none &&
2426 aconnector->dc_link->type == dc_connection_none)
2427 dm_set_dpms_off(aconnector->dc_link);
4562236b 2428
3c4d55c9 2429 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2430
2431 drm_modeset_lock_all(dev);
2432 dm_restore_drm_connector_state(dev, connector);
2433 drm_modeset_unlock_all(dev);
2434
2435 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2436 drm_kms_helper_hotplug_event(dev);
2437 }
2438 mutex_unlock(&aconnector->hpd_lock);
2439
2440}
2441
c84dec2f 2442static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2443{
2444 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2445 uint8_t dret;
2446 bool new_irq_handled = false;
2447 int dpcd_addr;
2448 int dpcd_bytes_to_read;
2449
2450 const int max_process_count = 30;
2451 int process_count = 0;
2452
2453 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2454
2455 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2456 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2457 /* DPCD 0x200 - 0x201 for downstream IRQ */
2458 dpcd_addr = DP_SINK_COUNT;
2459 } else {
2460 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2461 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2462 dpcd_addr = DP_SINK_COUNT_ESI;
2463 }
2464
2465 dret = drm_dp_dpcd_read(
2466 &aconnector->dm_dp_aux.aux,
2467 dpcd_addr,
2468 esi,
2469 dpcd_bytes_to_read);
2470
2471 while (dret == dpcd_bytes_to_read &&
2472 process_count < max_process_count) {
2473 uint8_t retry;
2474 dret = 0;
2475
2476 process_count++;
2477
f1ad2f5e 2478 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2479 /* handle HPD short pulse irq */
2480 if (aconnector->mst_mgr.mst_state)
2481 drm_dp_mst_hpd_irq(
2482 &aconnector->mst_mgr,
2483 esi,
2484 &new_irq_handled);
4562236b
HW
2485
2486 if (new_irq_handled) {
2487 /* ACK at DPCD to notify down stream */
2488 const int ack_dpcd_bytes_to_write =
2489 dpcd_bytes_to_read - 1;
2490
2491 for (retry = 0; retry < 3; retry++) {
2492 uint8_t wret;
2493
2494 wret = drm_dp_dpcd_write(
2495 &aconnector->dm_dp_aux.aux,
2496 dpcd_addr + 1,
2497 &esi[1],
2498 ack_dpcd_bytes_to_write);
2499 if (wret == ack_dpcd_bytes_to_write)
2500 break;
2501 }
2502
1f6010a9 2503 /* check if there is new irq to be handled */
4562236b
HW
2504 dret = drm_dp_dpcd_read(
2505 &aconnector->dm_dp_aux.aux,
2506 dpcd_addr,
2507 esi,
2508 dpcd_bytes_to_read);
2509
2510 new_irq_handled = false;
d4a6e8a9 2511 } else {
4562236b 2512 break;
d4a6e8a9 2513 }
4562236b
HW
2514 }
2515
2516 if (process_count == max_process_count)
f1ad2f5e 2517 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2518}
2519
2520static void handle_hpd_rx_irq(void *param)
2521{
c84dec2f 2522 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2523 struct drm_connector *connector = &aconnector->base;
2524 struct drm_device *dev = connector->dev;
53cbf65c 2525 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2526 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2527 bool result = false;
fbbdadf2 2528 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2529 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2530 union hpd_irq_data hpd_irq_data;
2a0f9270
BL
2531
2532 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2533
1f6010a9
DF
2534 /*
2535 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2536 * conflict, after implement i2c helper, this mutex should be
2537 * retired.
2538 */
53cbf65c 2539 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2540 mutex_lock(&aconnector->hpd_lock);
2541
3083a984
QZ
2542 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2543
2544 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2545 (dc_link->type == dc_connection_mst_branch)) {
2546 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2547 result = true;
2548 dm_handle_hpd_rx_irq(aconnector);
2549 goto out;
2550 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2551 result = false;
2552 dm_handle_hpd_rx_irq(aconnector);
2553 goto out;
2554 }
2555 }
2556
c8ea79a8 2557 mutex_lock(&adev->dm.dc_lock);
2a0f9270 2558#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2559 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2560#else
c8ea79a8 2561 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2562#endif
c8ea79a8
QZ
2563 mutex_unlock(&adev->dm.dc_lock);
2564
3083a984 2565out:
c8ea79a8 2566 if (result && !is_mst_root_connector) {
4562236b 2567 /* Downstream Port status changed. */
fbbdadf2
BL
2568 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2569 DRM_ERROR("KMS: Failed to detect connector\n");
2570
2571 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2572 emulated_link_detect(dc_link);
2573
2574 if (aconnector->fake_enable)
2575 aconnector->fake_enable = false;
2576
2577 amdgpu_dm_update_connector_after_detect(aconnector);
2578
2579
2580 drm_modeset_lock_all(dev);
2581 dm_restore_drm_connector_state(dev, connector);
2582 drm_modeset_unlock_all(dev);
2583
2584 drm_kms_helper_hotplug_event(dev);
2585 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2586
2587 if (aconnector->fake_enable)
2588 aconnector->fake_enable = false;
2589
4562236b
HW
2590 amdgpu_dm_update_connector_after_detect(aconnector);
2591
2592
2593 drm_modeset_lock_all(dev);
2594 dm_restore_drm_connector_state(dev, connector);
2595 drm_modeset_unlock_all(dev);
2596
2597 drm_kms_helper_hotplug_event(dev);
2598 }
2599 }
2a0f9270 2600#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2601 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2602 if (adev->dm.hdcp_workqueue)
2603 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2604 }
2a0f9270 2605#endif
4562236b 2606
e86e8947
HV
2607 if (dc_link->type != dc_connection_mst_branch) {
2608 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2609 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2610 }
4562236b
HW
2611}
2612
2613static void register_hpd_handlers(struct amdgpu_device *adev)
2614{
4a580877 2615 struct drm_device *dev = adev_to_drm(adev);
4562236b 2616 struct drm_connector *connector;
c84dec2f 2617 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2618 const struct dc_link *dc_link;
2619 struct dc_interrupt_params int_params = {0};
2620
2621 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2622 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2623
2624 list_for_each_entry(connector,
2625 &dev->mode_config.connector_list, head) {
2626
c84dec2f 2627 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2628 dc_link = aconnector->dc_link;
2629
2630 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2631 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2632 int_params.irq_source = dc_link->irq_source_hpd;
2633
2634 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2635 handle_hpd_irq,
2636 (void *) aconnector);
2637 }
2638
2639 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2640
2641 /* Also register for DP short pulse (hpd_rx). */
2642 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2643 int_params.irq_source = dc_link->irq_source_hpd_rx;
2644
2645 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2646 handle_hpd_rx_irq,
2647 (void *) aconnector);
2648 }
2649 }
2650}
2651
55e56389
MR
2652#if defined(CONFIG_DRM_AMD_DC_SI)
2653/* Register IRQ sources and initialize IRQ callbacks */
2654static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2655{
2656 struct dc *dc = adev->dm.dc;
2657 struct common_irq_params *c_irq_params;
2658 struct dc_interrupt_params int_params = {0};
2659 int r;
2660 int i;
2661 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2662
2663 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2664 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2665
2666 /*
2667 * Actions of amdgpu_irq_add_id():
2668 * 1. Register a set() function with base driver.
2669 * Base driver will call set() function to enable/disable an
2670 * interrupt in DC hardware.
2671 * 2. Register amdgpu_dm_irq_handler().
2672 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2673 * coming from DC hardware.
2674 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2675 * for acknowledging and handling. */
2676
2677 /* Use VBLANK interrupt */
2678 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2679 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2680 if (r) {
2681 DRM_ERROR("Failed to add crtc irq id!\n");
2682 return r;
2683 }
2684
2685 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2686 int_params.irq_source =
2687 dc_interrupt_to_irq_source(dc, i+1 , 0);
2688
2689 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2690
2691 c_irq_params->adev = adev;
2692 c_irq_params->irq_src = int_params.irq_source;
2693
2694 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2695 dm_crtc_high_irq, c_irq_params);
2696 }
2697
2698 /* Use GRPH_PFLIP interrupt */
2699 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2700 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2701 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2702 if (r) {
2703 DRM_ERROR("Failed to add page flip irq id!\n");
2704 return r;
2705 }
2706
2707 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708 int_params.irq_source =
2709 dc_interrupt_to_irq_source(dc, i, 0);
2710
2711 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2712
2713 c_irq_params->adev = adev;
2714 c_irq_params->irq_src = int_params.irq_source;
2715
2716 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2717 dm_pflip_high_irq, c_irq_params);
2718
2719 }
2720
2721 /* HPD */
2722 r = amdgpu_irq_add_id(adev, client_id,
2723 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2724 if (r) {
2725 DRM_ERROR("Failed to add hpd irq id!\n");
2726 return r;
2727 }
2728
2729 register_hpd_handlers(adev);
2730
2731 return 0;
2732}
2733#endif
2734
4562236b
HW
2735/* Register IRQ sources and initialize IRQ callbacks */
2736static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2737{
2738 struct dc *dc = adev->dm.dc;
2739 struct common_irq_params *c_irq_params;
2740 struct dc_interrupt_params int_params = {0};
2741 int r;
2742 int i;
1ffdeca6 2743 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2744
84374725 2745 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2746 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2747
2748 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2749 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2750
1f6010a9
DF
2751 /*
2752 * Actions of amdgpu_irq_add_id():
4562236b
HW
2753 * 1. Register a set() function with base driver.
2754 * Base driver will call set() function to enable/disable an
2755 * interrupt in DC hardware.
2756 * 2. Register amdgpu_dm_irq_handler().
2757 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2758 * coming from DC hardware.
2759 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2760 * for acknowledging and handling. */
2761
b57de80a 2762 /* Use VBLANK interrupt */
e9029155 2763 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2764 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2765 if (r) {
2766 DRM_ERROR("Failed to add crtc irq id!\n");
2767 return r;
2768 }
2769
2770 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2771 int_params.irq_source =
3d761e79 2772 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2773
b57de80a 2774 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2775
2776 c_irq_params->adev = adev;
2777 c_irq_params->irq_src = int_params.irq_source;
2778
2779 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2780 dm_crtc_high_irq, c_irq_params);
2781 }
2782
d2574c33
MK
2783 /* Use VUPDATE interrupt */
2784 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2785 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2786 if (r) {
2787 DRM_ERROR("Failed to add vupdate irq id!\n");
2788 return r;
2789 }
2790
2791 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2792 int_params.irq_source =
2793 dc_interrupt_to_irq_source(dc, i, 0);
2794
2795 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2796
2797 c_irq_params->adev = adev;
2798 c_irq_params->irq_src = int_params.irq_source;
2799
2800 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2801 dm_vupdate_high_irq, c_irq_params);
2802 }
2803
3d761e79 2804 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2805 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2806 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2807 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2808 if (r) {
2809 DRM_ERROR("Failed to add page flip irq id!\n");
2810 return r;
2811 }
2812
2813 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2814 int_params.irq_source =
2815 dc_interrupt_to_irq_source(dc, i, 0);
2816
2817 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2818
2819 c_irq_params->adev = adev;
2820 c_irq_params->irq_src = int_params.irq_source;
2821
2822 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2823 dm_pflip_high_irq, c_irq_params);
2824
2825 }
2826
2827 /* HPD */
2c8ad2d5
AD
2828 r = amdgpu_irq_add_id(adev, client_id,
2829 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2830 if (r) {
2831 DRM_ERROR("Failed to add hpd irq id!\n");
2832 return r;
2833 }
2834
2835 register_hpd_handlers(adev);
2836
2837 return 0;
2838}
2839
b86a1aa3 2840#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2841/* Register IRQ sources and initialize IRQ callbacks */
2842static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2843{
2844 struct dc *dc = adev->dm.dc;
2845 struct common_irq_params *c_irq_params;
2846 struct dc_interrupt_params int_params = {0};
2847 int r;
2848 int i;
2849
2850 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2851 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2852
1f6010a9
DF
2853 /*
2854 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2855 * 1. Register a set() function with base driver.
2856 * Base driver will call set() function to enable/disable an
2857 * interrupt in DC hardware.
2858 * 2. Register amdgpu_dm_irq_handler().
2859 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2860 * coming from DC hardware.
2861 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2862 * for acknowledging and handling.
1f6010a9 2863 */
ff5ef992
AD
2864
2865 /* Use VSTARTUP interrupt */
2866 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2867 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2868 i++) {
3760f76c 2869 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2870
2871 if (r) {
2872 DRM_ERROR("Failed to add crtc irq id!\n");
2873 return r;
2874 }
2875
2876 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2877 int_params.irq_source =
2878 dc_interrupt_to_irq_source(dc, i, 0);
2879
2880 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2881
2882 c_irq_params->adev = adev;
2883 c_irq_params->irq_src = int_params.irq_source;
2884
2346ef47
NK
2885 amdgpu_dm_irq_register_interrupt(
2886 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2887 }
2888
2889 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2890 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2891 * to trigger at end of each vblank, regardless of state of the lock,
2892 * matching DCE behaviour.
2893 */
2894 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2895 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2896 i++) {
2897 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2898
2899 if (r) {
2900 DRM_ERROR("Failed to add vupdate irq id!\n");
2901 return r;
2902 }
2903
2904 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2905 int_params.irq_source =
2906 dc_interrupt_to_irq_source(dc, i, 0);
2907
2908 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2909
2910 c_irq_params->adev = adev;
2911 c_irq_params->irq_src = int_params.irq_source;
2912
ff5ef992 2913 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2914 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2915 }
2916
ff5ef992
AD
2917 /* Use GRPH_PFLIP interrupt */
2918 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2919 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2920 i++) {
3760f76c 2921 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2922 if (r) {
2923 DRM_ERROR("Failed to add page flip irq id!\n");
2924 return r;
2925 }
2926
2927 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2928 int_params.irq_source =
2929 dc_interrupt_to_irq_source(dc, i, 0);
2930
2931 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2932
2933 c_irq_params->adev = adev;
2934 c_irq_params->irq_src = int_params.irq_source;
2935
2936 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2937 dm_pflip_high_irq, c_irq_params);
2938
2939 }
2940
2941 /* HPD */
3760f76c 2942 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2943 &adev->hpd_irq);
2944 if (r) {
2945 DRM_ERROR("Failed to add hpd irq id!\n");
2946 return r;
2947 }
2948
2949 register_hpd_handlers(adev);
2950
2951 return 0;
2952}
2953#endif
2954
eb3dc897
NK
2955/*
2956 * Acquires the lock for the atomic state object and returns
2957 * the new atomic state.
2958 *
2959 * This should only be called during atomic check.
2960 */
2961static int dm_atomic_get_state(struct drm_atomic_state *state,
2962 struct dm_atomic_state **dm_state)
2963{
2964 struct drm_device *dev = state->dev;
1348969a 2965 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2966 struct amdgpu_display_manager *dm = &adev->dm;
2967 struct drm_private_state *priv_state;
eb3dc897
NK
2968
2969 if (*dm_state)
2970 return 0;
2971
eb3dc897
NK
2972 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2973 if (IS_ERR(priv_state))
2974 return PTR_ERR(priv_state);
2975
2976 *dm_state = to_dm_atomic_state(priv_state);
2977
2978 return 0;
2979}
2980
dfd84d90 2981static struct dm_atomic_state *
eb3dc897
NK
2982dm_atomic_get_new_state(struct drm_atomic_state *state)
2983{
2984 struct drm_device *dev = state->dev;
1348969a 2985 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2986 struct amdgpu_display_manager *dm = &adev->dm;
2987 struct drm_private_obj *obj;
2988 struct drm_private_state *new_obj_state;
2989 int i;
2990
2991 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2992 if (obj->funcs == dm->atomic_obj.funcs)
2993 return to_dm_atomic_state(new_obj_state);
2994 }
2995
2996 return NULL;
2997}
2998
eb3dc897
NK
2999static struct drm_private_state *
3000dm_atomic_duplicate_state(struct drm_private_obj *obj)
3001{
3002 struct dm_atomic_state *old_state, *new_state;
3003
3004 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3005 if (!new_state)
3006 return NULL;
3007
3008 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3009
813d20dc
AW
3010 old_state = to_dm_atomic_state(obj->state);
3011
3012 if (old_state && old_state->context)
3013 new_state->context = dc_copy_state(old_state->context);
3014
eb3dc897
NK
3015 if (!new_state->context) {
3016 kfree(new_state);
3017 return NULL;
3018 }
3019
eb3dc897
NK
3020 return &new_state->base;
3021}
3022
3023static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3024 struct drm_private_state *state)
3025{
3026 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3027
3028 if (dm_state && dm_state->context)
3029 dc_release_state(dm_state->context);
3030
3031 kfree(dm_state);
3032}
3033
3034static struct drm_private_state_funcs dm_atomic_state_funcs = {
3035 .atomic_duplicate_state = dm_atomic_duplicate_state,
3036 .atomic_destroy_state = dm_atomic_destroy_state,
3037};
3038
4562236b
HW
3039static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3040{
eb3dc897 3041 struct dm_atomic_state *state;
4562236b
HW
3042 int r;
3043
3044 adev->mode_info.mode_config_initialized = true;
3045
4a580877
LT
3046 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3047 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3048
4a580877
LT
3049 adev_to_drm(adev)->mode_config.max_width = 16384;
3050 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3051
4a580877
LT
3052 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3053 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3054 /* indicates support for immediate flip */
4a580877 3055 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3056
4a580877 3057 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3058
eb3dc897
NK
3059 state = kzalloc(sizeof(*state), GFP_KERNEL);
3060 if (!state)
3061 return -ENOMEM;
3062
813d20dc 3063 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3064 if (!state->context) {
3065 kfree(state);
3066 return -ENOMEM;
3067 }
3068
3069 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3070
4a580877 3071 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3072 &adev->dm.atomic_obj,
eb3dc897
NK
3073 &state->base,
3074 &dm_atomic_state_funcs);
3075
3dc9b1ce 3076 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3077 if (r) {
3078 dc_release_state(state->context);
3079 kfree(state);
4562236b 3080 return r;
b67a468a 3081 }
4562236b 3082
6ce8f316 3083 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3084 if (r) {
3085 dc_release_state(state->context);
3086 kfree(state);
6ce8f316 3087 return r;
b67a468a 3088 }
6ce8f316 3089
4562236b
HW
3090 return 0;
3091}
3092
206bbafe
DF
3093#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3094#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3095#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3096
4562236b
HW
3097#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3098 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3099
206bbafe
DF
3100static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3101{
3102#if defined(CONFIG_ACPI)
3103 struct amdgpu_dm_backlight_caps caps;
3104
58965855
FS
3105 memset(&caps, 0, sizeof(caps));
3106
206bbafe
DF
3107 if (dm->backlight_caps.caps_valid)
3108 return;
3109
3110 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3111 if (caps.caps_valid) {
94562810
RS
3112 dm->backlight_caps.caps_valid = true;
3113 if (caps.aux_support)
3114 return;
206bbafe
DF
3115 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3116 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3117 } else {
3118 dm->backlight_caps.min_input_signal =
3119 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3120 dm->backlight_caps.max_input_signal =
3121 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3122 }
3123#else
94562810
RS
3124 if (dm->backlight_caps.aux_support)
3125 return;
3126
8bcbc9ef
DF
3127 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3128 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3129#endif
3130}
3131
94562810
RS
3132static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3133{
3134 bool rc;
3135
3136 if (!link)
3137 return 1;
3138
3139 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3140 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3141
3142 return rc ? 0 : 1;
3143}
3144
69d9f427
AM
3145static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3146 unsigned *min, unsigned *max)
94562810 3147{
94562810 3148 if (!caps)
69d9f427 3149 return 0;
94562810 3150
69d9f427
AM
3151 if (caps->aux_support) {
3152 // Firmware limits are in nits, DC API wants millinits.
3153 *max = 1000 * caps->aux_max_input_signal;
3154 *min = 1000 * caps->aux_min_input_signal;
94562810 3155 } else {
69d9f427
AM
3156 // Firmware limits are 8-bit, PWM control is 16-bit.
3157 *max = 0x101 * caps->max_input_signal;
3158 *min = 0x101 * caps->min_input_signal;
94562810 3159 }
69d9f427
AM
3160 return 1;
3161}
94562810 3162
69d9f427
AM
3163static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3164 uint32_t brightness)
3165{
3166 unsigned min, max;
94562810 3167
69d9f427
AM
3168 if (!get_brightness_range(caps, &min, &max))
3169 return brightness;
3170
3171 // Rescale 0..255 to min..max
3172 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3173 AMDGPU_MAX_BL_LEVEL);
3174}
3175
3176static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3177 uint32_t brightness)
3178{
3179 unsigned min, max;
3180
3181 if (!get_brightness_range(caps, &min, &max))
3182 return brightness;
3183
3184 if (brightness < min)
3185 return 0;
3186 // Rescale min..max to 0..255
3187 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3188 max - min);
94562810
RS
3189}
3190
4562236b
HW
3191static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3192{
3193 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3194 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3195 struct dc_link *link = NULL;
3196 u32 brightness;
3197 bool rc;
4562236b 3198
206bbafe
DF
3199 amdgpu_dm_update_backlight_caps(dm);
3200 caps = dm->backlight_caps;
94562810
RS
3201
3202 link = (struct dc_link *)dm->backlight_link;
3203
69d9f427 3204 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3205 // Change brightness based on AUX property
3206 if (caps.aux_support)
3207 return set_backlight_via_aux(link, brightness);
3208
3209 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3210
3211 return rc ? 0 : 1;
4562236b
HW
3212}
3213
3214static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3215{
620a0d27
DF
3216 struct amdgpu_display_manager *dm = bl_get_data(bd);
3217 int ret = dc_link_get_backlight_level(dm->backlight_link);
3218
3219 if (ret == DC_ERROR_UNEXPECTED)
3220 return bd->props.brightness;
69d9f427 3221 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3222}
3223
3224static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3225 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3226 .get_brightness = amdgpu_dm_backlight_get_brightness,
3227 .update_status = amdgpu_dm_backlight_update_status,
3228};
3229
7578ecda
AD
3230static void
3231amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3232{
3233 char bl_name[16];
3234 struct backlight_properties props = { 0 };
3235
206bbafe
DF
3236 amdgpu_dm_update_backlight_caps(dm);
3237
4562236b 3238 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3239 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3240 props.type = BACKLIGHT_RAW;
3241
3242 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3243 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3244
3245 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3246 adev_to_drm(dm->adev)->dev,
3247 dm,
3248 &amdgpu_dm_backlight_ops,
3249 &props);
4562236b 3250
74baea42 3251 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3252 DRM_ERROR("DM: Backlight registration failed!\n");
3253 else
f1ad2f5e 3254 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3255}
3256
3257#endif
3258
df534fff 3259static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3260 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3261 enum drm_plane_type plane_type,
3262 const struct dc_plane_cap *plane_cap)
df534fff 3263{
f180b4bc 3264 struct drm_plane *plane;
df534fff
S
3265 unsigned long possible_crtcs;
3266 int ret = 0;
3267
f180b4bc 3268 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3269 if (!plane) {
3270 DRM_ERROR("KMS: Failed to allocate plane\n");
3271 return -ENOMEM;
3272 }
b2fddb13 3273 plane->type = plane_type;
df534fff
S
3274
3275 /*
b2fddb13
NK
3276 * HACK: IGT tests expect that the primary plane for a CRTC
3277 * can only have one possible CRTC. Only expose support for
3278 * any CRTC if they're not going to be used as a primary plane
3279 * for a CRTC - like overlay or underlay planes.
df534fff
S
3280 */
3281 possible_crtcs = 1 << plane_id;
3282 if (plane_id >= dm->dc->caps.max_streams)
3283 possible_crtcs = 0xff;
3284
cc1fec57 3285 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3286
3287 if (ret) {
3288 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3289 kfree(plane);
df534fff
S
3290 return ret;
3291 }
3292
54087768
NK
3293 if (mode_info)
3294 mode_info->planes[plane_id] = plane;
3295
df534fff
S
3296 return ret;
3297}
3298
89fc8d4e
HW
3299
3300static void register_backlight_device(struct amdgpu_display_manager *dm,
3301 struct dc_link *link)
3302{
3303#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3304 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3305
3306 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3307 link->type != dc_connection_none) {
1f6010a9
DF
3308 /*
3309 * Event if registration failed, we should continue with
89fc8d4e
HW
3310 * DM initialization because not having a backlight control
3311 * is better then a black screen.
3312 */
3313 amdgpu_dm_register_backlight_device(dm);
3314
3315 if (dm->backlight_dev)
3316 dm->backlight_link = link;
3317 }
3318#endif
3319}
3320
3321
1f6010a9
DF
3322/*
3323 * In this architecture, the association
4562236b
HW
3324 * connector -> encoder -> crtc
3325 * id not really requried. The crtc and connector will hold the
3326 * display_index as an abstraction to use with DAL component
3327 *
3328 * Returns 0 on success
3329 */
7578ecda 3330static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3331{
3332 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3333 int32_t i;
c84dec2f 3334 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3335 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3336 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3337 uint32_t link_cnt;
cc1fec57 3338 int32_t primary_planes;
fbbdadf2 3339 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3340 const struct dc_plane_cap *plane;
4562236b 3341
d58159de
AD
3342 dm->display_indexes_num = dm->dc->caps.max_streams;
3343 /* Update the actual used number of crtc */
3344 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3345
4562236b 3346 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3347 if (amdgpu_dm_mode_config_init(dm->adev)) {
3348 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3349 return -EINVAL;
4562236b
HW
3350 }
3351
b2fddb13
NK
3352 /* There is one primary plane per CRTC */
3353 primary_planes = dm->dc->caps.max_streams;
54087768 3354 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3355
b2fddb13
NK
3356 /*
3357 * Initialize primary planes, implicit planes for legacy IOCTLS.
3358 * Order is reversed to match iteration order in atomic check.
3359 */
3360 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3361 plane = &dm->dc->caps.planes[i];
3362
b2fddb13 3363 if (initialize_plane(dm, mode_info, i,
cc1fec57 3364 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3365 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3366 goto fail;
d4e13b0d 3367 }
df534fff 3368 }
92f3ac40 3369
0d579c7e
NK
3370 /*
3371 * Initialize overlay planes, index starting after primary planes.
3372 * These planes have a higher DRM index than the primary planes since
3373 * they should be considered as having a higher z-order.
3374 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3375 *
3376 * Only support DCN for now, and only expose one so we don't encourage
3377 * userspace to use up all the pipes.
0d579c7e 3378 */
cc1fec57
NK
3379 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3380 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3381
3382 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3383 continue;
3384
3385 if (!plane->blends_with_above || !plane->blends_with_below)
3386 continue;
3387
ea36ad34 3388 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3389 continue;
3390
54087768 3391 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3392 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3393 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3394 goto fail;
d4e13b0d 3395 }
cc1fec57
NK
3396
3397 /* Only create one overlay plane. */
3398 break;
d4e13b0d 3399 }
4562236b 3400
d4e13b0d 3401 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3402 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3403 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3404 goto fail;
4562236b 3405 }
4562236b 3406
4562236b
HW
3407 /* loops over all connectors on the board */
3408 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3409 struct dc_link *link = NULL;
4562236b
HW
3410
3411 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3412 DRM_ERROR(
3413 "KMS: Cannot support more than %d display indexes\n",
3414 AMDGPU_DM_MAX_DISPLAY_INDEX);
3415 continue;
3416 }
3417
3418 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3419 if (!aconnector)
cd8a2ae8 3420 goto fail;
4562236b
HW
3421
3422 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3423 if (!aencoder)
cd8a2ae8 3424 goto fail;
4562236b
HW
3425
3426 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3427 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3428 goto fail;
4562236b
HW
3429 }
3430
3431 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3432 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3433 goto fail;
4562236b
HW
3434 }
3435
89fc8d4e
HW
3436 link = dc_get_link_at_index(dm->dc, i);
3437
fbbdadf2
BL
3438 if (!dc_link_detect_sink(link, &new_connection_type))
3439 DRM_ERROR("KMS: Failed to detect connector\n");
3440
3441 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3442 emulated_link_detect(link);
3443 amdgpu_dm_update_connector_after_detect(aconnector);
3444
3445 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3446 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3447 register_backlight_device(dm, link);
397a9bc5
RL
3448 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3449 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3450 }
3451
3452
4562236b
HW
3453 }
3454
3455 /* Software is initialized. Now we can register interrupt handlers. */
3456 switch (adev->asic_type) {
55e56389
MR
3457#if defined(CONFIG_DRM_AMD_DC_SI)
3458 case CHIP_TAHITI:
3459 case CHIP_PITCAIRN:
3460 case CHIP_VERDE:
3461 case CHIP_OLAND:
3462 if (dce60_register_irq_handlers(dm->adev)) {
3463 DRM_ERROR("DM: Failed to initialize IRQ\n");
3464 goto fail;
3465 }
3466 break;
3467#endif
4562236b
HW
3468 case CHIP_BONAIRE:
3469 case CHIP_HAWAII:
cd4b356f
AD
3470 case CHIP_KAVERI:
3471 case CHIP_KABINI:
3472 case CHIP_MULLINS:
4562236b
HW
3473 case CHIP_TONGA:
3474 case CHIP_FIJI:
3475 case CHIP_CARRIZO:
3476 case CHIP_STONEY:
3477 case CHIP_POLARIS11:
3478 case CHIP_POLARIS10:
b264d345 3479 case CHIP_POLARIS12:
7737de91 3480 case CHIP_VEGAM:
2c8ad2d5 3481 case CHIP_VEGA10:
2325ff30 3482 case CHIP_VEGA12:
1fe6bf2f 3483 case CHIP_VEGA20:
4562236b
HW
3484 if (dce110_register_irq_handlers(dm->adev)) {
3485 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3486 goto fail;
4562236b
HW
3487 }
3488 break;
b86a1aa3 3489#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3490 case CHIP_RAVEN:
fbd2afe5 3491 case CHIP_NAVI12:
476e955d 3492 case CHIP_NAVI10:
fce651e3 3493 case CHIP_NAVI14:
30221ad8 3494 case CHIP_RENOIR:
79037324 3495 case CHIP_SIENNA_CICHLID:
a6c5308f 3496 case CHIP_NAVY_FLOUNDER:
2a411205 3497 case CHIP_DIMGREY_CAVEFISH:
469989ca 3498 case CHIP_VANGOGH:
ff5ef992
AD
3499 if (dcn10_register_irq_handlers(dm->adev)) {
3500 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3501 goto fail;
ff5ef992
AD
3502 }
3503 break;
3504#endif
4562236b 3505 default:
e63f8673 3506 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3507 goto fail;
4562236b
HW
3508 }
3509
4562236b 3510 return 0;
cd8a2ae8 3511fail:
4562236b 3512 kfree(aencoder);
4562236b 3513 kfree(aconnector);
54087768 3514
59d0f396 3515 return -EINVAL;
4562236b
HW
3516}
3517
7578ecda 3518static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3519{
3520 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3521 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3522 return;
3523}
3524
3525/******************************************************************************
3526 * amdgpu_display_funcs functions
3527 *****************************************************************************/
3528
1f6010a9 3529/*
4562236b
HW
3530 * dm_bandwidth_update - program display watermarks
3531 *
3532 * @adev: amdgpu_device pointer
3533 *
3534 * Calculate and program the display watermarks and line buffer allocation.
3535 */
3536static void dm_bandwidth_update(struct amdgpu_device *adev)
3537{
49c07a99 3538 /* TODO: implement later */
4562236b
HW
3539}
3540
39cc5be2 3541static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3542 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3543 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3544 .backlight_set_level = NULL, /* never called for DC */
3545 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3546 .hpd_sense = NULL,/* called unconditionally */
3547 .hpd_set_polarity = NULL, /* called unconditionally */
3548 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3549 .page_flip_get_scanoutpos =
3550 dm_crtc_get_scanoutpos,/* called unconditionally */
3551 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3552 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3553};
3554
3555#if defined(CONFIG_DEBUG_KERNEL_DC)
3556
3ee6b26b
AD
3557static ssize_t s3_debug_store(struct device *device,
3558 struct device_attribute *attr,
3559 const char *buf,
3560 size_t count)
4562236b
HW
3561{
3562 int ret;
3563 int s3_state;
ef1de361 3564 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3565 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3566
3567 ret = kstrtoint(buf, 0, &s3_state);
3568
3569 if (ret == 0) {
3570 if (s3_state) {
3571 dm_resume(adev);
4a580877 3572 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3573 } else
3574 dm_suspend(adev);
3575 }
3576
3577 return ret == 0 ? count : 0;
3578}
3579
3580DEVICE_ATTR_WO(s3_debug);
3581
3582#endif
3583
3584static int dm_early_init(void *handle)
3585{
3586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3587
4562236b 3588 switch (adev->asic_type) {
55e56389
MR
3589#if defined(CONFIG_DRM_AMD_DC_SI)
3590 case CHIP_TAHITI:
3591 case CHIP_PITCAIRN:
3592 case CHIP_VERDE:
3593 adev->mode_info.num_crtc = 6;
3594 adev->mode_info.num_hpd = 6;
3595 adev->mode_info.num_dig = 6;
3596 break;
3597 case CHIP_OLAND:
3598 adev->mode_info.num_crtc = 2;
3599 adev->mode_info.num_hpd = 2;
3600 adev->mode_info.num_dig = 2;
3601 break;
3602#endif
4562236b
HW
3603 case CHIP_BONAIRE:
3604 case CHIP_HAWAII:
3605 adev->mode_info.num_crtc = 6;
3606 adev->mode_info.num_hpd = 6;
3607 adev->mode_info.num_dig = 6;
4562236b 3608 break;
cd4b356f
AD
3609 case CHIP_KAVERI:
3610 adev->mode_info.num_crtc = 4;
3611 adev->mode_info.num_hpd = 6;
3612 adev->mode_info.num_dig = 7;
cd4b356f
AD
3613 break;
3614 case CHIP_KABINI:
3615 case CHIP_MULLINS:
3616 adev->mode_info.num_crtc = 2;
3617 adev->mode_info.num_hpd = 6;
3618 adev->mode_info.num_dig = 6;
cd4b356f 3619 break;
4562236b
HW
3620 case CHIP_FIJI:
3621 case CHIP_TONGA:
3622 adev->mode_info.num_crtc = 6;
3623 adev->mode_info.num_hpd = 6;
3624 adev->mode_info.num_dig = 7;
4562236b
HW
3625 break;
3626 case CHIP_CARRIZO:
3627 adev->mode_info.num_crtc = 3;
3628 adev->mode_info.num_hpd = 6;
3629 adev->mode_info.num_dig = 9;
4562236b
HW
3630 break;
3631 case CHIP_STONEY:
3632 adev->mode_info.num_crtc = 2;
3633 adev->mode_info.num_hpd = 6;
3634 adev->mode_info.num_dig = 9;
4562236b
HW
3635 break;
3636 case CHIP_POLARIS11:
b264d345 3637 case CHIP_POLARIS12:
4562236b
HW
3638 adev->mode_info.num_crtc = 5;
3639 adev->mode_info.num_hpd = 5;
3640 adev->mode_info.num_dig = 5;
4562236b
HW
3641 break;
3642 case CHIP_POLARIS10:
7737de91 3643 case CHIP_VEGAM:
4562236b
HW
3644 adev->mode_info.num_crtc = 6;
3645 adev->mode_info.num_hpd = 6;
3646 adev->mode_info.num_dig = 6;
4562236b 3647 break;
2c8ad2d5 3648 case CHIP_VEGA10:
2325ff30 3649 case CHIP_VEGA12:
1fe6bf2f 3650 case CHIP_VEGA20:
2c8ad2d5
AD
3651 adev->mode_info.num_crtc = 6;
3652 adev->mode_info.num_hpd = 6;
3653 adev->mode_info.num_dig = 6;
3654 break;
b86a1aa3 3655#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3656 case CHIP_RAVEN:
20f2ffe5
AD
3657 case CHIP_RENOIR:
3658 case CHIP_VANGOGH:
ff5ef992
AD
3659 adev->mode_info.num_crtc = 4;
3660 adev->mode_info.num_hpd = 4;
3661 adev->mode_info.num_dig = 4;
ff5ef992 3662 break;
476e955d 3663 case CHIP_NAVI10:
fbd2afe5 3664 case CHIP_NAVI12:
79037324 3665 case CHIP_SIENNA_CICHLID:
a6c5308f 3666 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3667 adev->mode_info.num_crtc = 6;
3668 adev->mode_info.num_hpd = 6;
3669 adev->mode_info.num_dig = 6;
3670 break;
fce651e3 3671 case CHIP_NAVI14:
2a411205 3672 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3673 adev->mode_info.num_crtc = 5;
3674 adev->mode_info.num_hpd = 5;
3675 adev->mode_info.num_dig = 5;
3676 break;
20f2ffe5 3677#endif
4562236b 3678 default:
e63f8673 3679 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3680 return -EINVAL;
3681 }
3682
c8dd5715
MD
3683 amdgpu_dm_set_irq_funcs(adev);
3684
39cc5be2
AD
3685 if (adev->mode_info.funcs == NULL)
3686 adev->mode_info.funcs = &dm_display_funcs;
3687
1f6010a9
DF
3688 /*
3689 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3690 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3691 * amdgpu_device_init()
3692 */
4562236b
HW
3693#if defined(CONFIG_DEBUG_KERNEL_DC)
3694 device_create_file(
4a580877 3695 adev_to_drm(adev)->dev,
4562236b
HW
3696 &dev_attr_s3_debug);
3697#endif
3698
3699 return 0;
3700}
3701
9b690ef3 3702static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3703 struct dc_stream_state *new_stream,
3704 struct dc_stream_state *old_stream)
9b690ef3 3705{
2afda735 3706 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3707}
3708
3709static bool modereset_required(struct drm_crtc_state *crtc_state)
3710{
2afda735 3711 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3712}
3713
7578ecda 3714static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3715{
3716 drm_encoder_cleanup(encoder);
3717 kfree(encoder);
3718}
3719
3720static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3721 .destroy = amdgpu_dm_encoder_destroy,
3722};
3723
e7b07cee 3724
6300b3bd
MK
3725static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3726 struct drm_framebuffer *fb,
3727 int *min_downscale, int *max_upscale)
3728{
3729 struct amdgpu_device *adev = drm_to_adev(dev);
3730 struct dc *dc = adev->dm.dc;
3731 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3732 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3733
3734 switch (fb->format->format) {
3735 case DRM_FORMAT_P010:
3736 case DRM_FORMAT_NV12:
3737 case DRM_FORMAT_NV21:
3738 *max_upscale = plane_cap->max_upscale_factor.nv12;
3739 *min_downscale = plane_cap->max_downscale_factor.nv12;
3740 break;
3741
3742 case DRM_FORMAT_XRGB16161616F:
3743 case DRM_FORMAT_ARGB16161616F:
3744 case DRM_FORMAT_XBGR16161616F:
3745 case DRM_FORMAT_ABGR16161616F:
3746 *max_upscale = plane_cap->max_upscale_factor.fp16;
3747 *min_downscale = plane_cap->max_downscale_factor.fp16;
3748 break;
3749
3750 default:
3751 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3752 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3753 break;
3754 }
3755
3756 /*
3757 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3758 * scaling factor of 1.0 == 1000 units.
3759 */
3760 if (*max_upscale == 1)
3761 *max_upscale = 1000;
3762
3763 if (*min_downscale == 1)
3764 *min_downscale = 1000;
3765}
3766
3767
695af5f9
NK
3768static int fill_dc_scaling_info(const struct drm_plane_state *state,
3769 struct dc_scaling_info *scaling_info)
e7b07cee 3770{
6300b3bd 3771 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 3772
695af5f9 3773 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3774
695af5f9
NK
3775 /* Source is fixed 16.16 but we ignore mantissa for now... */
3776 scaling_info->src_rect.x = state->src_x >> 16;
3777 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3778
695af5f9
NK
3779 scaling_info->src_rect.width = state->src_w >> 16;
3780 if (scaling_info->src_rect.width == 0)
3781 return -EINVAL;
3782
3783 scaling_info->src_rect.height = state->src_h >> 16;
3784 if (scaling_info->src_rect.height == 0)
3785 return -EINVAL;
3786
3787 scaling_info->dst_rect.x = state->crtc_x;
3788 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3789
3790 if (state->crtc_w == 0)
695af5f9 3791 return -EINVAL;
e7b07cee 3792
695af5f9 3793 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3794
3795 if (state->crtc_h == 0)
695af5f9 3796 return -EINVAL;
e7b07cee 3797
695af5f9 3798 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3799
695af5f9
NK
3800 /* DRM doesn't specify clipping on destination output. */
3801 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3802
6300b3bd
MK
3803 /* Validate scaling per-format with DC plane caps */
3804 if (state->plane && state->plane->dev && state->fb) {
3805 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3806 &min_downscale, &max_upscale);
3807 } else {
3808 min_downscale = 250;
3809 max_upscale = 16000;
3810 }
3811
6491f0c0
NK
3812 scale_w = scaling_info->dst_rect.width * 1000 /
3813 scaling_info->src_rect.width;
e7b07cee 3814
6300b3bd 3815 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
3816 return -EINVAL;
3817
3818 scale_h = scaling_info->dst_rect.height * 1000 /
3819 scaling_info->src_rect.height;
3820
6300b3bd 3821 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
3822 return -EINVAL;
3823
695af5f9
NK
3824 /*
3825 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3826 * assume reasonable defaults based on the format.
3827 */
e7b07cee 3828
695af5f9 3829 return 0;
4562236b 3830}
695af5f9 3831
a3241991
BN
3832static void
3833fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3834 uint64_t tiling_flags)
e7b07cee 3835{
a3241991
BN
3836 /* Fill GFX8 params */
3837 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3838 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3839
a3241991
BN
3840 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3841 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3842 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3843 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3844 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3845
a3241991
BN
3846 /* XXX fix me for VI */
3847 tiling_info->gfx8.num_banks = num_banks;
3848 tiling_info->gfx8.array_mode =
3849 DC_ARRAY_2D_TILED_THIN1;
3850 tiling_info->gfx8.tile_split = tile_split;
3851 tiling_info->gfx8.bank_width = bankw;
3852 tiling_info->gfx8.bank_height = bankh;
3853 tiling_info->gfx8.tile_aspect = mtaspect;
3854 tiling_info->gfx8.tile_mode =
3855 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3856 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3857 == DC_ARRAY_1D_TILED_THIN1) {
3858 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3859 }
3860
a3241991
BN
3861 tiling_info->gfx8.pipe_config =
3862 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3863}
3864
a3241991
BN
3865static void
3866fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3867 union dc_tiling_info *tiling_info)
3868{
3869 tiling_info->gfx9.num_pipes =
3870 adev->gfx.config.gb_addr_config_fields.num_pipes;
3871 tiling_info->gfx9.num_banks =
3872 adev->gfx.config.gb_addr_config_fields.num_banks;
3873 tiling_info->gfx9.pipe_interleave =
3874 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3875 tiling_info->gfx9.num_shader_engines =
3876 adev->gfx.config.gb_addr_config_fields.num_se;
3877 tiling_info->gfx9.max_compressed_frags =
3878 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3879 tiling_info->gfx9.num_rb_per_se =
3880 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3881 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
3882 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3883 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3884 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3885 adev->asic_type == CHIP_VANGOGH)
3886 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
3887}
3888
695af5f9 3889static int
a3241991
BN
3890validate_dcc(struct amdgpu_device *adev,
3891 const enum surface_pixel_format format,
3892 const enum dc_rotation_angle rotation,
3893 const union dc_tiling_info *tiling_info,
3894 const struct dc_plane_dcc_param *dcc,
3895 const struct dc_plane_address *address,
3896 const struct plane_size *plane_size)
7df7e505
NK
3897{
3898 struct dc *dc = adev->dm.dc;
8daa1218
NC
3899 struct dc_dcc_surface_param input;
3900 struct dc_surface_dcc_cap output;
7df7e505 3901
8daa1218
NC
3902 memset(&input, 0, sizeof(input));
3903 memset(&output, 0, sizeof(output));
3904
a3241991 3905 if (!dcc->enable)
87b7ebc2
RS
3906 return 0;
3907
a3241991
BN
3908 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3909 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3910 return -EINVAL;
7df7e505 3911
695af5f9 3912 input.format = format;
12e2b2d4
DL
3913 input.surface_size.width = plane_size->surface_size.width;
3914 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3915 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3916
695af5f9 3917 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3918 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3919 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3920 input.scan = SCAN_DIRECTION_VERTICAL;
3921
3922 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3923 return -EINVAL;
7df7e505
NK
3924
3925 if (!output.capable)
09e5665a 3926 return -EINVAL;
7df7e505 3927
a3241991
BN
3928 if (dcc->independent_64b_blks == 0 &&
3929 output.grph.rgb.independent_64b_blks != 0)
09e5665a 3930 return -EINVAL;
7df7e505 3931
a3241991
BN
3932 return 0;
3933}
3934
37384b3f
BN
3935static bool
3936modifier_has_dcc(uint64_t modifier)
3937{
3938 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3939}
3940
3941static unsigned
3942modifier_gfx9_swizzle_mode(uint64_t modifier)
3943{
3944 if (modifier == DRM_FORMAT_MOD_LINEAR)
3945 return 0;
3946
3947 return AMD_FMT_MOD_GET(TILE, modifier);
3948}
3949
dfbbfe3c
BN
3950static const struct drm_format_info *
3951amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3952{
816853f9 3953 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
3954}
3955
37384b3f
BN
3956static void
3957fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3958 union dc_tiling_info *tiling_info,
3959 uint64_t modifier)
3960{
3961 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3962 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3963 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3964 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3965
3966 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3967
3968 if (!IS_AMD_FMT_MOD(modifier))
3969 return;
3970
3971 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3972 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3973
3974 if (adev->family >= AMDGPU_FAMILY_NV) {
3975 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3976 } else {
3977 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3978
3979 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3980 }
3981}
3982
faa37f54
BN
3983enum dm_micro_swizzle {
3984 MICRO_SWIZZLE_Z = 0,
3985 MICRO_SWIZZLE_S = 1,
3986 MICRO_SWIZZLE_D = 2,
3987 MICRO_SWIZZLE_R = 3
3988};
3989
3990static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3991 uint32_t format,
3992 uint64_t modifier)
3993{
3994 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3995 const struct drm_format_info *info = drm_format_info(format);
3996
3997 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3998
3999 if (!info)
4000 return false;
4001
4002 /*
4003 * We always have to allow this modifier, because core DRM still
4004 * checks LINEAR support if userspace does not provide modifers.
4005 */
4006 if (modifier == DRM_FORMAT_MOD_LINEAR)
4007 return true;
4008
4009 /*
4010 * The arbitrary tiling support for multiplane formats has not been hooked
4011 * up.
4012 */
4013 if (info->num_planes > 1)
4014 return false;
4015
4016 /*
4017 * For D swizzle the canonical modifier depends on the bpp, so check
4018 * it here.
4019 */
4020 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4021 adev->family >= AMDGPU_FAMILY_NV) {
4022 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4023 return false;
4024 }
4025
4026 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4027 info->cpp[0] < 8)
4028 return false;
4029
4030 if (modifier_has_dcc(modifier)) {
4031 /* Per radeonsi comments 16/64 bpp are more complicated. */
4032 if (info->cpp[0] != 4)
4033 return false;
4034 }
4035
4036 return true;
4037}
4038
4039static void
4040add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4041{
4042 if (!*mods)
4043 return;
4044
4045 if (*cap - *size < 1) {
4046 uint64_t new_cap = *cap * 2;
4047 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4048
4049 if (!new_mods) {
4050 kfree(*mods);
4051 *mods = NULL;
4052 return;
4053 }
4054
4055 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4056 kfree(*mods);
4057 *mods = new_mods;
4058 *cap = new_cap;
4059 }
4060
4061 (*mods)[*size] = mod;
4062 *size += 1;
4063}
4064
4065static void
4066add_gfx9_modifiers(const struct amdgpu_device *adev,
4067 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4068{
4069 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4070 int pipe_xor_bits = min(8, pipes +
4071 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4072 int bank_xor_bits = min(8 - pipe_xor_bits,
4073 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4074 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4075 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4076
4077
4078 if (adev->family == AMDGPU_FAMILY_RV) {
4079 /* Raven2 and later */
4080 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4081
4082 /*
4083 * No _D DCC swizzles yet because we only allow 32bpp, which
4084 * doesn't support _D on DCN
4085 */
4086
4087 if (has_constant_encode) {
4088 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4089 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4090 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4091 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4092 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4093 AMD_FMT_MOD_SET(DCC, 1) |
4094 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4095 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4096 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4097 }
4098
4099 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4100 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4101 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4102 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4103 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4104 AMD_FMT_MOD_SET(DCC, 1) |
4105 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4106 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4107 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4108
4109 if (has_constant_encode) {
4110 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4111 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4112 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4113 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4114 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4115 AMD_FMT_MOD_SET(DCC, 1) |
4116 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4117 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4118 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4119
4120 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4121 AMD_FMT_MOD_SET(RB, rb) |
4122 AMD_FMT_MOD_SET(PIPE, pipes));
4123 }
4124
4125 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4126 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4127 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4128 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4129 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4130 AMD_FMT_MOD_SET(DCC, 1) |
4131 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4132 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4133 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4134 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4135 AMD_FMT_MOD_SET(RB, rb) |
4136 AMD_FMT_MOD_SET(PIPE, pipes));
4137 }
4138
4139 /*
4140 * Only supported for 64bpp on Raven, will be filtered on format in
4141 * dm_plane_format_mod_supported.
4142 */
4143 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4144 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4145 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4146 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4147 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4148
4149 if (adev->family == AMDGPU_FAMILY_RV) {
4150 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4151 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4152 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4153 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4154 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4155 }
4156
4157 /*
4158 * Only supported for 64bpp on Raven, will be filtered on format in
4159 * dm_plane_format_mod_supported.
4160 */
4161 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4163 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4164
4165 if (adev->family == AMDGPU_FAMILY_RV) {
4166 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4167 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4168 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4169 }
4170}
4171
4172static void
4173add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4174 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4175{
4176 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4177
4178 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4179 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4180 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4181 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4182 AMD_FMT_MOD_SET(DCC, 1) |
4183 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4184 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4185 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4186
4187 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4188 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4189 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4190 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4191 AMD_FMT_MOD_SET(DCC, 1) |
4192 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4193 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4194 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4195 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4196
4197 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4199 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4200 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4201
4202 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4203 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4204 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4205 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4206
4207
4208 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4209 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4210 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4211 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4212
4213 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4214 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4215 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4216}
4217
4218static void
4219add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4220 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4221{
4222 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4223 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4224
4225 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4226 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4227 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4228 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4229 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4230 AMD_FMT_MOD_SET(DCC, 1) |
4231 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4232 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4233 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4234 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4235
4236 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4237 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4238 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4239 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4240 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4241 AMD_FMT_MOD_SET(DCC, 1) |
4242 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4243 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4244 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4245 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4246 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4247
4248 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4249 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4250 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4251 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4252 AMD_FMT_MOD_SET(PACKERS, pkrs));
4253
4254 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4255 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4256 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4257 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4258 AMD_FMT_MOD_SET(PACKERS, pkrs));
4259
4260 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4261 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4262 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4263 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4264
4265 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4266 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4267 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4268}
4269
4270static int
4271get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4272{
4273 uint64_t size = 0, capacity = 128;
4274 *mods = NULL;
4275
4276 /* We have not hooked up any pre-GFX9 modifiers. */
4277 if (adev->family < AMDGPU_FAMILY_AI)
4278 return 0;
4279
4280 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4281
4282 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4283 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4284 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4285 return *mods ? 0 : -ENOMEM;
4286 }
4287
4288 switch (adev->family) {
4289 case AMDGPU_FAMILY_AI:
4290 case AMDGPU_FAMILY_RV:
4291 add_gfx9_modifiers(adev, mods, &size, &capacity);
4292 break;
4293 case AMDGPU_FAMILY_NV:
4294 case AMDGPU_FAMILY_VGH:
4295 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4296 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4297 else
4298 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4299 break;
4300 }
4301
4302 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4303
4304 /* INVALID marks the end of the list. */
4305 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4306
4307 if (!*mods)
4308 return -ENOMEM;
4309
4310 return 0;
4311}
4312
37384b3f
BN
4313static int
4314fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4315 const struct amdgpu_framebuffer *afb,
4316 const enum surface_pixel_format format,
4317 const enum dc_rotation_angle rotation,
4318 const struct plane_size *plane_size,
4319 union dc_tiling_info *tiling_info,
4320 struct dc_plane_dcc_param *dcc,
4321 struct dc_plane_address *address,
4322 const bool force_disable_dcc)
4323{
4324 const uint64_t modifier = afb->base.modifier;
4325 int ret;
4326
4327 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4328 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4329
4330 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4331 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4332
4333 dcc->enable = 1;
4334 dcc->meta_pitch = afb->base.pitches[1];
4335 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4336
4337 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4338 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4339 }
4340
4341 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4342 if (ret)
4343 return ret;
7df7e505 4344
09e5665a
NK
4345 return 0;
4346}
4347
4348static int
320932bf 4349fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4350 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4351 const enum surface_pixel_format format,
4352 const enum dc_rotation_angle rotation,
4353 const uint64_t tiling_flags,
09e5665a 4354 union dc_tiling_info *tiling_info,
12e2b2d4 4355 struct plane_size *plane_size,
09e5665a 4356 struct dc_plane_dcc_param *dcc,
87b7ebc2 4357 struct dc_plane_address *address,
5888f07a 4358 bool tmz_surface,
87b7ebc2 4359 bool force_disable_dcc)
09e5665a 4360{
320932bf 4361 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4362 int ret;
4363
4364 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4365 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4366 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4367 memset(address, 0, sizeof(*address));
4368
5888f07a
HW
4369 address->tmz_surface = tmz_surface;
4370
695af5f9 4371 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4372 uint64_t addr = afb->address + fb->offsets[0];
4373
12e2b2d4
DL
4374 plane_size->surface_size.x = 0;
4375 plane_size->surface_size.y = 0;
4376 plane_size->surface_size.width = fb->width;
4377 plane_size->surface_size.height = fb->height;
4378 plane_size->surface_pitch =
320932bf
NK
4379 fb->pitches[0] / fb->format->cpp[0];
4380
e0634e8d 4381 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4382 address->grph.addr.low_part = lower_32_bits(addr);
4383 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4384 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4385 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4386 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4387
12e2b2d4
DL
4388 plane_size->surface_size.x = 0;
4389 plane_size->surface_size.y = 0;
4390 plane_size->surface_size.width = fb->width;
4391 plane_size->surface_size.height = fb->height;
4392 plane_size->surface_pitch =
320932bf
NK
4393 fb->pitches[0] / fb->format->cpp[0];
4394
12e2b2d4
DL
4395 plane_size->chroma_size.x = 0;
4396 plane_size->chroma_size.y = 0;
320932bf 4397 /* TODO: set these based on surface format */
12e2b2d4
DL
4398 plane_size->chroma_size.width = fb->width / 2;
4399 plane_size->chroma_size.height = fb->height / 2;
320932bf 4400
12e2b2d4 4401 plane_size->chroma_pitch =
320932bf
NK
4402 fb->pitches[1] / fb->format->cpp[1];
4403
e0634e8d
NK
4404 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4405 address->video_progressive.luma_addr.low_part =
be7b9b32 4406 lower_32_bits(luma_addr);
e0634e8d 4407 address->video_progressive.luma_addr.high_part =
be7b9b32 4408 upper_32_bits(luma_addr);
e0634e8d
NK
4409 address->video_progressive.chroma_addr.low_part =
4410 lower_32_bits(chroma_addr);
4411 address->video_progressive.chroma_addr.high_part =
4412 upper_32_bits(chroma_addr);
4413 }
09e5665a 4414
a3241991 4415 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4416 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4417 rotation, plane_size,
4418 tiling_info, dcc,
4419 address,
4420 force_disable_dcc);
09e5665a
NK
4421 if (ret)
4422 return ret;
a3241991
BN
4423 } else {
4424 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4425 }
4426
4427 return 0;
7df7e505
NK
4428}
4429
d74004b6 4430static void
695af5f9 4431fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4432 bool *per_pixel_alpha, bool *global_alpha,
4433 int *global_alpha_value)
4434{
4435 *per_pixel_alpha = false;
4436 *global_alpha = false;
4437 *global_alpha_value = 0xff;
4438
4439 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4440 return;
4441
4442 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4443 static const uint32_t alpha_formats[] = {
4444 DRM_FORMAT_ARGB8888,
4445 DRM_FORMAT_RGBA8888,
4446 DRM_FORMAT_ABGR8888,
4447 };
4448 uint32_t format = plane_state->fb->format->format;
4449 unsigned int i;
4450
4451 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4452 if (format == alpha_formats[i]) {
4453 *per_pixel_alpha = true;
4454 break;
4455 }
4456 }
4457 }
4458
4459 if (plane_state->alpha < 0xffff) {
4460 *global_alpha = true;
4461 *global_alpha_value = plane_state->alpha >> 8;
4462 }
4463}
4464
004fefa3
NK
4465static int
4466fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4467 const enum surface_pixel_format format,
004fefa3
NK
4468 enum dc_color_space *color_space)
4469{
4470 bool full_range;
4471
4472 *color_space = COLOR_SPACE_SRGB;
4473
4474 /* DRM color properties only affect non-RGB formats. */
695af5f9 4475 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4476 return 0;
4477
4478 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4479
4480 switch (plane_state->color_encoding) {
4481 case DRM_COLOR_YCBCR_BT601:
4482 if (full_range)
4483 *color_space = COLOR_SPACE_YCBCR601;
4484 else
4485 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4486 break;
4487
4488 case DRM_COLOR_YCBCR_BT709:
4489 if (full_range)
4490 *color_space = COLOR_SPACE_YCBCR709;
4491 else
4492 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4493 break;
4494
4495 case DRM_COLOR_YCBCR_BT2020:
4496 if (full_range)
4497 *color_space = COLOR_SPACE_2020_YCBCR;
4498 else
4499 return -EINVAL;
4500 break;
4501
4502 default:
4503 return -EINVAL;
4504 }
4505
4506 return 0;
4507}
4508
695af5f9
NK
4509static int
4510fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4511 const struct drm_plane_state *plane_state,
4512 const uint64_t tiling_flags,
4513 struct dc_plane_info *plane_info,
87b7ebc2 4514 struct dc_plane_address *address,
5888f07a 4515 bool tmz_surface,
87b7ebc2 4516 bool force_disable_dcc)
695af5f9
NK
4517{
4518 const struct drm_framebuffer *fb = plane_state->fb;
4519 const struct amdgpu_framebuffer *afb =
4520 to_amdgpu_framebuffer(plane_state->fb);
4521 struct drm_format_name_buf format_name;
4522 int ret;
4523
4524 memset(plane_info, 0, sizeof(*plane_info));
4525
4526 switch (fb->format->format) {
4527 case DRM_FORMAT_C8:
4528 plane_info->format =
4529 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4530 break;
4531 case DRM_FORMAT_RGB565:
4532 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4533 break;
4534 case DRM_FORMAT_XRGB8888:
4535 case DRM_FORMAT_ARGB8888:
4536 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4537 break;
4538 case DRM_FORMAT_XRGB2101010:
4539 case DRM_FORMAT_ARGB2101010:
4540 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4541 break;
4542 case DRM_FORMAT_XBGR2101010:
4543 case DRM_FORMAT_ABGR2101010:
4544 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4545 break;
4546 case DRM_FORMAT_XBGR8888:
4547 case DRM_FORMAT_ABGR8888:
4548 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4549 break;
4550 case DRM_FORMAT_NV21:
4551 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4552 break;
4553 case DRM_FORMAT_NV12:
4554 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4555 break;
cbec6477
SW
4556 case DRM_FORMAT_P010:
4557 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4558 break;
492548dc
SW
4559 case DRM_FORMAT_XRGB16161616F:
4560 case DRM_FORMAT_ARGB16161616F:
4561 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4562 break;
2a5195dc
MK
4563 case DRM_FORMAT_XBGR16161616F:
4564 case DRM_FORMAT_ABGR16161616F:
4565 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4566 break;
695af5f9
NK
4567 default:
4568 DRM_ERROR(
4569 "Unsupported screen format %s\n",
4570 drm_get_format_name(fb->format->format, &format_name));
4571 return -EINVAL;
4572 }
4573
4574 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4575 case DRM_MODE_ROTATE_0:
4576 plane_info->rotation = ROTATION_ANGLE_0;
4577 break;
4578 case DRM_MODE_ROTATE_90:
4579 plane_info->rotation = ROTATION_ANGLE_90;
4580 break;
4581 case DRM_MODE_ROTATE_180:
4582 plane_info->rotation = ROTATION_ANGLE_180;
4583 break;
4584 case DRM_MODE_ROTATE_270:
4585 plane_info->rotation = ROTATION_ANGLE_270;
4586 break;
4587 default:
4588 plane_info->rotation = ROTATION_ANGLE_0;
4589 break;
4590 }
4591
4592 plane_info->visible = true;
4593 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4594
6d83a32d
MS
4595 plane_info->layer_index = 0;
4596
695af5f9
NK
4597 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4598 &plane_info->color_space);
4599 if (ret)
4600 return ret;
4601
4602 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4603 plane_info->rotation, tiling_flags,
4604 &plane_info->tiling_info,
4605 &plane_info->plane_size,
5888f07a 4606 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4607 force_disable_dcc);
695af5f9
NK
4608 if (ret)
4609 return ret;
4610
4611 fill_blending_from_plane_state(
4612 plane_state, &plane_info->per_pixel_alpha,
4613 &plane_info->global_alpha, &plane_info->global_alpha_value);
4614
4615 return 0;
4616}
4617
4618static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4619 struct dc_plane_state *dc_plane_state,
4620 struct drm_plane_state *plane_state,
4621 struct drm_crtc_state *crtc_state)
e7b07cee 4622{
cf020d49 4623 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4624 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4625 struct dc_scaling_info scaling_info;
4626 struct dc_plane_info plane_info;
695af5f9 4627 int ret;
87b7ebc2 4628 bool force_disable_dcc = false;
e7b07cee 4629
695af5f9
NK
4630 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4631 if (ret)
4632 return ret;
e7b07cee 4633
695af5f9
NK
4634 dc_plane_state->src_rect = scaling_info.src_rect;
4635 dc_plane_state->dst_rect = scaling_info.dst_rect;
4636 dc_plane_state->clip_rect = scaling_info.clip_rect;
4637 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4638
87b7ebc2 4639 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4640 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4641 afb->tiling_flags,
695af5f9 4642 &plane_info,
87b7ebc2 4643 &dc_plane_state->address,
6eed95b0 4644 afb->tmz_surface,
87b7ebc2 4645 force_disable_dcc);
004fefa3
NK
4646 if (ret)
4647 return ret;
4648
695af5f9
NK
4649 dc_plane_state->format = plane_info.format;
4650 dc_plane_state->color_space = plane_info.color_space;
4651 dc_plane_state->format = plane_info.format;
4652 dc_plane_state->plane_size = plane_info.plane_size;
4653 dc_plane_state->rotation = plane_info.rotation;
4654 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4655 dc_plane_state->stereo_format = plane_info.stereo_format;
4656 dc_plane_state->tiling_info = plane_info.tiling_info;
4657 dc_plane_state->visible = plane_info.visible;
4658 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4659 dc_plane_state->global_alpha = plane_info.global_alpha;
4660 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4661 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4662 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4663
e277adc5
LSL
4664 /*
4665 * Always set input transfer function, since plane state is refreshed
4666 * every time.
4667 */
cf020d49
NK
4668 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4669 if (ret)
4670 return ret;
e7b07cee 4671
cf020d49 4672 return 0;
e7b07cee
HW
4673}
4674
3ee6b26b
AD
4675static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4676 const struct dm_connector_state *dm_state,
4677 struct dc_stream_state *stream)
e7b07cee
HW
4678{
4679 enum amdgpu_rmx_type rmx_type;
4680
4681 struct rect src = { 0 }; /* viewport in composition space*/
4682 struct rect dst = { 0 }; /* stream addressable area */
4683
4684 /* no mode. nothing to be done */
4685 if (!mode)
4686 return;
4687
4688 /* Full screen scaling by default */
4689 src.width = mode->hdisplay;
4690 src.height = mode->vdisplay;
4691 dst.width = stream->timing.h_addressable;
4692 dst.height = stream->timing.v_addressable;
4693
f4791779
HW
4694 if (dm_state) {
4695 rmx_type = dm_state->scaling;
4696 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4697 if (src.width * dst.height <
4698 src.height * dst.width) {
4699 /* height needs less upscaling/more downscaling */
4700 dst.width = src.width *
4701 dst.height / src.height;
4702 } else {
4703 /* width needs less upscaling/more downscaling */
4704 dst.height = src.height *
4705 dst.width / src.width;
4706 }
4707 } else if (rmx_type == RMX_CENTER) {
4708 dst = src;
e7b07cee 4709 }
e7b07cee 4710
f4791779
HW
4711 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4712 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4713
f4791779
HW
4714 if (dm_state->underscan_enable) {
4715 dst.x += dm_state->underscan_hborder / 2;
4716 dst.y += dm_state->underscan_vborder / 2;
4717 dst.width -= dm_state->underscan_hborder;
4718 dst.height -= dm_state->underscan_vborder;
4719 }
e7b07cee
HW
4720 }
4721
4722 stream->src = src;
4723 stream->dst = dst;
4724
f1ad2f5e 4725 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4726 dst.x, dst.y, dst.width, dst.height);
4727
4728}
4729
3ee6b26b 4730static enum dc_color_depth
42ba01fc 4731convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4732 bool is_y420, int requested_bpc)
e7b07cee 4733{
1bc22f20 4734 uint8_t bpc;
01c22997 4735
1bc22f20
SW
4736 if (is_y420) {
4737 bpc = 8;
4738
4739 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4740 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4741 bpc = 16;
4742 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4743 bpc = 12;
4744 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4745 bpc = 10;
4746 } else {
4747 bpc = (uint8_t)connector->display_info.bpc;
4748 /* Assume 8 bpc by default if no bpc is specified. */
4749 bpc = bpc ? bpc : 8;
4750 }
e7b07cee 4751
cbd14ae7 4752 if (requested_bpc > 0) {
01c22997
NK
4753 /*
4754 * Cap display bpc based on the user requested value.
4755 *
4756 * The value for state->max_bpc may not correctly updated
4757 * depending on when the connector gets added to the state
4758 * or if this was called outside of atomic check, so it
4759 * can't be used directly.
4760 */
cbd14ae7 4761 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4762
1825fd34
NK
4763 /* Round down to the nearest even number. */
4764 bpc = bpc - (bpc & 1);
4765 }
07e3a1cf 4766
e7b07cee
HW
4767 switch (bpc) {
4768 case 0:
1f6010a9
DF
4769 /*
4770 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4771 * EDID revision before 1.4
4772 * TODO: Fix edid parsing
4773 */
4774 return COLOR_DEPTH_888;
4775 case 6:
4776 return COLOR_DEPTH_666;
4777 case 8:
4778 return COLOR_DEPTH_888;
4779 case 10:
4780 return COLOR_DEPTH_101010;
4781 case 12:
4782 return COLOR_DEPTH_121212;
4783 case 14:
4784 return COLOR_DEPTH_141414;
4785 case 16:
4786 return COLOR_DEPTH_161616;
4787 default:
4788 return COLOR_DEPTH_UNDEFINED;
4789 }
4790}
4791
3ee6b26b
AD
4792static enum dc_aspect_ratio
4793get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4794{
e11d4147
LSL
4795 /* 1-1 mapping, since both enums follow the HDMI spec. */
4796 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4797}
4798
3ee6b26b
AD
4799static enum dc_color_space
4800get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4801{
4802 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4803
4804 switch (dc_crtc_timing->pixel_encoding) {
4805 case PIXEL_ENCODING_YCBCR422:
4806 case PIXEL_ENCODING_YCBCR444:
4807 case PIXEL_ENCODING_YCBCR420:
4808 {
4809 /*
4810 * 27030khz is the separation point between HDTV and SDTV
4811 * according to HDMI spec, we use YCbCr709 and YCbCr601
4812 * respectively
4813 */
380604e2 4814 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4815 if (dc_crtc_timing->flags.Y_ONLY)
4816 color_space =
4817 COLOR_SPACE_YCBCR709_LIMITED;
4818 else
4819 color_space = COLOR_SPACE_YCBCR709;
4820 } else {
4821 if (dc_crtc_timing->flags.Y_ONLY)
4822 color_space =
4823 COLOR_SPACE_YCBCR601_LIMITED;
4824 else
4825 color_space = COLOR_SPACE_YCBCR601;
4826 }
4827
4828 }
4829 break;
4830 case PIXEL_ENCODING_RGB:
4831 color_space = COLOR_SPACE_SRGB;
4832 break;
4833
4834 default:
4835 WARN_ON(1);
4836 break;
4837 }
4838
4839 return color_space;
4840}
4841
ea117312
TA
4842static bool adjust_colour_depth_from_display_info(
4843 struct dc_crtc_timing *timing_out,
4844 const struct drm_display_info *info)
400443e8 4845{
ea117312 4846 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4847 int normalized_clk;
400443e8 4848 do {
380604e2 4849 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4850 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4851 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4852 normalized_clk /= 2;
4853 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4854 switch (depth) {
4855 case COLOR_DEPTH_888:
4856 break;
400443e8
ML
4857 case COLOR_DEPTH_101010:
4858 normalized_clk = (normalized_clk * 30) / 24;
4859 break;
4860 case COLOR_DEPTH_121212:
4861 normalized_clk = (normalized_clk * 36) / 24;
4862 break;
4863 case COLOR_DEPTH_161616:
4864 normalized_clk = (normalized_clk * 48) / 24;
4865 break;
4866 default:
ea117312
TA
4867 /* The above depths are the only ones valid for HDMI. */
4868 return false;
400443e8 4869 }
ea117312
TA
4870 if (normalized_clk <= info->max_tmds_clock) {
4871 timing_out->display_color_depth = depth;
4872 return true;
4873 }
4874 } while (--depth > COLOR_DEPTH_666);
4875 return false;
400443e8 4876}
e7b07cee 4877
42ba01fc
NK
4878static void fill_stream_properties_from_drm_display_mode(
4879 struct dc_stream_state *stream,
4880 const struct drm_display_mode *mode_in,
4881 const struct drm_connector *connector,
4882 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4883 const struct dc_stream_state *old_stream,
4884 int requested_bpc)
e7b07cee
HW
4885{
4886 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4887 const struct drm_display_info *info = &connector->display_info;
d4252eee 4888 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4889 struct hdmi_vendor_infoframe hv_frame;
4890 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4891
acf83f86
WL
4892 memset(&hv_frame, 0, sizeof(hv_frame));
4893 memset(&avi_frame, 0, sizeof(avi_frame));
4894
e7b07cee
HW
4895 timing_out->h_border_left = 0;
4896 timing_out->h_border_right = 0;
4897 timing_out->v_border_top = 0;
4898 timing_out->v_border_bottom = 0;
4899 /* TODO: un-hardcode */
fe61a2f1 4900 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4901 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4902 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4903 else if (drm_mode_is_420_also(info, mode_in)
4904 && aconnector->force_yuv420_output)
4905 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4906 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4907 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4908 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4909 else
4910 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4911
4912 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4913 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4914 connector,
4915 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4916 requested_bpc);
e7b07cee
HW
4917 timing_out->scan_type = SCANNING_TYPE_NODATA;
4918 timing_out->hdmi_vic = 0;
b333730d
BL
4919
4920 if(old_stream) {
4921 timing_out->vic = old_stream->timing.vic;
4922 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4923 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4924 } else {
4925 timing_out->vic = drm_match_cea_mode(mode_in);
4926 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4927 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4928 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4929 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4930 }
e7b07cee 4931
1cb1d477
WL
4932 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4933 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4934 timing_out->vic = avi_frame.video_code;
4935 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4936 timing_out->hdmi_vic = hv_frame.vic;
4937 }
4938
e7b07cee
HW
4939 timing_out->h_addressable = mode_in->crtc_hdisplay;
4940 timing_out->h_total = mode_in->crtc_htotal;
4941 timing_out->h_sync_width =
4942 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4943 timing_out->h_front_porch =
4944 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4945 timing_out->v_total = mode_in->crtc_vtotal;
4946 timing_out->v_addressable = mode_in->crtc_vdisplay;
4947 timing_out->v_front_porch =
4948 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4949 timing_out->v_sync_width =
4950 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4951 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4952 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4953
4954 stream->output_color_space = get_output_color_space(timing_out);
4955
e43a432c
AK
4956 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4957 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4958 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4959 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4960 drm_mode_is_420_also(info, mode_in) &&
4961 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4962 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4963 adjust_colour_depth_from_display_info(timing_out, info);
4964 }
4965 }
e7b07cee
HW
4966}
4967
3ee6b26b
AD
4968static void fill_audio_info(struct audio_info *audio_info,
4969 const struct drm_connector *drm_connector,
4970 const struct dc_sink *dc_sink)
e7b07cee
HW
4971{
4972 int i = 0;
4973 int cea_revision = 0;
4974 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4975
4976 audio_info->manufacture_id = edid_caps->manufacturer_id;
4977 audio_info->product_id = edid_caps->product_id;
4978
4979 cea_revision = drm_connector->display_info.cea_rev;
4980
090afc1e 4981 strscpy(audio_info->display_name,
d2b2562c 4982 edid_caps->display_name,
090afc1e 4983 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4984
b830ebc9 4985 if (cea_revision >= 3) {
e7b07cee
HW
4986 audio_info->mode_count = edid_caps->audio_mode_count;
4987
4988 for (i = 0; i < audio_info->mode_count; ++i) {
4989 audio_info->modes[i].format_code =
4990 (enum audio_format_code)
4991 (edid_caps->audio_modes[i].format_code);
4992 audio_info->modes[i].channel_count =
4993 edid_caps->audio_modes[i].channel_count;
4994 audio_info->modes[i].sample_rates.all =
4995 edid_caps->audio_modes[i].sample_rate;
4996 audio_info->modes[i].sample_size =
4997 edid_caps->audio_modes[i].sample_size;
4998 }
4999 }
5000
5001 audio_info->flags.all = edid_caps->speaker_flags;
5002
5003 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5004 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5005 audio_info->video_latency = drm_connector->video_latency[0];
5006 audio_info->audio_latency = drm_connector->audio_latency[0];
5007 }
5008
5009 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5010
5011}
5012
3ee6b26b
AD
5013static void
5014copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5015 struct drm_display_mode *dst_mode)
e7b07cee
HW
5016{
5017 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5018 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5019 dst_mode->crtc_clock = src_mode->crtc_clock;
5020 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5021 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5022 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5023 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5024 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5025 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5026 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5027 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5028 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5029 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5030 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5031}
5032
3ee6b26b
AD
5033static void
5034decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5035 const struct drm_display_mode *native_mode,
5036 bool scale_enabled)
e7b07cee
HW
5037{
5038 if (scale_enabled) {
5039 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5040 } else if (native_mode->clock == drm_mode->clock &&
5041 native_mode->htotal == drm_mode->htotal &&
5042 native_mode->vtotal == drm_mode->vtotal) {
5043 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5044 } else {
5045 /* no scaling nor amdgpu inserted, no need to patch */
5046 }
5047}
5048
aed15309
ML
5049static struct dc_sink *
5050create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5051{
2e0ac3d6 5052 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5053 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5054 sink_init_data.link = aconnector->dc_link;
5055 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5056
5057 sink = dc_sink_create(&sink_init_data);
423788c7 5058 if (!sink) {
2e0ac3d6 5059 DRM_ERROR("Failed to create sink!\n");
aed15309 5060 return NULL;
423788c7 5061 }
2e0ac3d6 5062 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5063
aed15309 5064 return sink;
2e0ac3d6
HW
5065}
5066
fa2123db
ML
5067static void set_multisync_trigger_params(
5068 struct dc_stream_state *stream)
5069{
5070 if (stream->triggered_crtc_reset.enabled) {
5071 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5072 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5073 }
5074}
5075
5076static void set_master_stream(struct dc_stream_state *stream_set[],
5077 int stream_count)
5078{
5079 int j, highest_rfr = 0, master_stream = 0;
5080
5081 for (j = 0; j < stream_count; j++) {
5082 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5083 int refresh_rate = 0;
5084
380604e2 5085 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5086 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5087 if (refresh_rate > highest_rfr) {
5088 highest_rfr = refresh_rate;
5089 master_stream = j;
5090 }
5091 }
5092 }
5093 for (j = 0; j < stream_count; j++) {
03736f4c 5094 if (stream_set[j])
fa2123db
ML
5095 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5096 }
5097}
5098
5099static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5100{
5101 int i = 0;
5102
5103 if (context->stream_count < 2)
5104 return;
5105 for (i = 0; i < context->stream_count ; i++) {
5106 if (!context->streams[i])
5107 continue;
1f6010a9
DF
5108 /*
5109 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5110 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5111 * For now it's set to false
fa2123db
ML
5112 */
5113 set_multisync_trigger_params(context->streams[i]);
5114 }
5115 set_master_stream(context->streams, context->stream_count);
5116}
5117
3ee6b26b
AD
5118static struct dc_stream_state *
5119create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5120 const struct drm_display_mode *drm_mode,
b333730d 5121 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5122 const struct dc_stream_state *old_stream,
5123 int requested_bpc)
e7b07cee
HW
5124{
5125 struct drm_display_mode *preferred_mode = NULL;
391ef035 5126 struct drm_connector *drm_connector;
42ba01fc
NK
5127 const struct drm_connector_state *con_state =
5128 dm_state ? &dm_state->base : NULL;
0971c40e 5129 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5130 struct drm_display_mode mode = *drm_mode;
5131 bool native_mode_found = false;
b333730d
BL
5132 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5133 int mode_refresh;
58124bf8 5134 int preferred_refresh = 0;
defeb878 5135#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5136 struct dsc_dec_dpcd_caps dsc_caps;
5137 uint32_t link_bandwidth_kbps;
7c431455 5138#endif
aed15309 5139 struct dc_sink *sink = NULL;
b830ebc9 5140 if (aconnector == NULL) {
e7b07cee 5141 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5142 return stream;
e7b07cee
HW
5143 }
5144
e7b07cee 5145 drm_connector = &aconnector->base;
2e0ac3d6 5146
f4ac176e 5147 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5148 sink = create_fake_sink(aconnector);
5149 if (!sink)
5150 return stream;
aed15309
ML
5151 } else {
5152 sink = aconnector->dc_sink;
dcd5fb82 5153 dc_sink_retain(sink);
f4ac176e 5154 }
2e0ac3d6 5155
aed15309 5156 stream = dc_create_stream_for_sink(sink);
4562236b 5157
b830ebc9 5158 if (stream == NULL) {
e7b07cee 5159 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5160 goto finish;
e7b07cee
HW
5161 }
5162
ceb3dbb4
JL
5163 stream->dm_stream_context = aconnector;
5164
4a36fcba
WL
5165 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5166 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5167
e7b07cee
HW
5168 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5169 /* Search for preferred mode */
5170 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5171 native_mode_found = true;
5172 break;
5173 }
5174 }
5175 if (!native_mode_found)
5176 preferred_mode = list_first_entry_or_null(
5177 &aconnector->base.modes,
5178 struct drm_display_mode,
5179 head);
5180
b333730d
BL
5181 mode_refresh = drm_mode_vrefresh(&mode);
5182
b830ebc9 5183 if (preferred_mode == NULL) {
1f6010a9
DF
5184 /*
5185 * This may not be an error, the use case is when we have no
e7b07cee
HW
5186 * usermode calls to reset and set mode upon hotplug. In this
5187 * case, we call set mode ourselves to restore the previous mode
5188 * and the modelist may not be filled in in time.
5189 */
f1ad2f5e 5190 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
5191 } else {
5192 decide_crtc_timing_for_drm_display_mode(
5193 &mode, preferred_mode,
f4791779 5194 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5195 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5196 }
5197
f783577c
JFZ
5198 if (!dm_state)
5199 drm_mode_set_crtcinfo(&mode, 0);
5200
b333730d
BL
5201 /*
5202 * If scaling is enabled and refresh rate didn't change
5203 * we copy the vic and polarities of the old timings
5204 */
5205 if (!scale || mode_refresh != preferred_refresh)
5206 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5207 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
5208 else
5209 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5210 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5211
df2f1015
DF
5212 stream->timing.flags.DSC = 0;
5213
5214 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5215#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5216 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5217 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5218 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5219 &dsc_caps);
5220 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5221 dc_link_get_link_cap(aconnector->dc_link));
5222
0749ddeb 5223 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5224 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5225 dc_dsc_policy_set_enable_dsc_when_not_needed(
5226 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5227
0417df16 5228 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5229 &dsc_caps,
0417df16 5230 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5231 0,
df2f1015
DF
5232 link_bandwidth_kbps,
5233 &stream->timing,
5234 &stream->timing.dsc_cfg))
5235 stream->timing.flags.DSC = 1;
27e84dd7 5236 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5237 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5238 stream->timing.flags.DSC = 1;
734e4c97 5239
28b2f656
EB
5240 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5241 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5242
28b2f656
EB
5243 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5244 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5245
5246 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5247 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5248 }
39a4eb85 5249#endif
df2f1015 5250 }
39a4eb85 5251
e7b07cee
HW
5252 update_stream_scaling_settings(&mode, dm_state, stream);
5253
5254 fill_audio_info(
5255 &stream->audio_info,
5256 drm_connector,
aed15309 5257 sink);
e7b07cee 5258
ceb3dbb4 5259 update_stream_signal(stream, sink);
9182b4cb 5260
d832fc3b 5261 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5262 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5263
8a488f5d
RL
5264 if (stream->link->psr_settings.psr_feature_enabled) {
5265 //
5266 // should decide stream support vsc sdp colorimetry capability
5267 // before building vsc info packet
5268 //
5269 stream->use_vsc_sdp_for_colorimetry = false;
5270 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5271 stream->use_vsc_sdp_for_colorimetry =
5272 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5273 } else {
5274 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5275 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5276 }
8a488f5d 5277 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5278 }
aed15309 5279finish:
dcd5fb82 5280 dc_sink_release(sink);
9e3efe3e 5281
e7b07cee
HW
5282 return stream;
5283}
5284
7578ecda 5285static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5286{
5287 drm_crtc_cleanup(crtc);
5288 kfree(crtc);
5289}
5290
5291static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5292 struct drm_crtc_state *state)
e7b07cee
HW
5293{
5294 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5295
5296 /* TODO Destroy dc_stream objects are stream object is flattened */
5297 if (cur->stream)
5298 dc_stream_release(cur->stream);
5299
5300
5301 __drm_atomic_helper_crtc_destroy_state(state);
5302
5303
5304 kfree(state);
5305}
5306
5307static void dm_crtc_reset_state(struct drm_crtc *crtc)
5308{
5309 struct dm_crtc_state *state;
5310
5311 if (crtc->state)
5312 dm_crtc_destroy_state(crtc, crtc->state);
5313
5314 state = kzalloc(sizeof(*state), GFP_KERNEL);
5315 if (WARN_ON(!state))
5316 return;
5317
1f8a52ec 5318 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5319}
5320
5321static struct drm_crtc_state *
5322dm_crtc_duplicate_state(struct drm_crtc *crtc)
5323{
5324 struct dm_crtc_state *state, *cur;
5325
5326 cur = to_dm_crtc_state(crtc->state);
5327
5328 if (WARN_ON(!crtc->state))
5329 return NULL;
5330
2004f45e 5331 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5332 if (!state)
5333 return NULL;
e7b07cee
HW
5334
5335 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5336
5337 if (cur->stream) {
5338 state->stream = cur->stream;
5339 dc_stream_retain(state->stream);
5340 }
5341
d6ef9b41 5342 state->active_planes = cur->active_planes;
98e6436d 5343 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5344 state->abm_level = cur->abm_level;
bb47de73
NK
5345 state->vrr_supported = cur->vrr_supported;
5346 state->freesync_config = cur->freesync_config;
14b25846 5347 state->crc_src = cur->crc_src;
cf020d49
NK
5348 state->cm_has_degamma = cur->cm_has_degamma;
5349 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e2881d6d 5350
e7b07cee
HW
5351 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5352
5353 return &state->base;
5354}
5355
d2574c33
MK
5356static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5357{
5358 enum dc_irq_source irq_source;
5359 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5360 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5361 int rc;
5362
5363 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5364
5365 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5366
5367 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5368 acrtc->crtc_id, enable ? "en" : "dis", rc);
5369 return rc;
5370}
589d2739
HW
5371
5372static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5373{
5374 enum dc_irq_source irq_source;
5375 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5376 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5377 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
71338cb4 5378 struct amdgpu_display_manager *dm = &adev->dm;
d2574c33
MK
5379 int rc = 0;
5380
5381 if (enable) {
5382 /* vblank irq on -> Only need vupdate irq in vrr mode */
5383 if (amdgpu_dm_vrr_active(acrtc_state))
5384 rc = dm_set_vupdate_irq(crtc, true);
5385 } else {
5386 /* vblank irq off -> vupdate irq off */
5387 rc = dm_set_vupdate_irq(crtc, false);
5388 }
5389
5390 if (rc)
5391 return rc;
589d2739
HW
5392
5393 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5394
5395 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5396 return -EBUSY;
5397
98ab5f35
BL
5398 if (amdgpu_in_reset(adev))
5399 return 0;
5400
71338cb4
BL
5401 mutex_lock(&dm->dc_lock);
5402
5403 if (enable)
5404 dm->active_vblank_irq_count++;
5405 else
5406 dm->active_vblank_irq_count--;
5407
4928b480 5408#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4
BL
5409 dc_allow_idle_optimizations(
5410 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5411
5412 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
4928b480 5413#endif
71338cb4
BL
5414
5415 mutex_unlock(&dm->dc_lock);
5416
5417 return 0;
589d2739
HW
5418}
5419
5420static int dm_enable_vblank(struct drm_crtc *crtc)
5421{
5422 return dm_set_vblank(crtc, true);
5423}
5424
5425static void dm_disable_vblank(struct drm_crtc *crtc)
5426{
5427 dm_set_vblank(crtc, false);
5428}
5429
e7b07cee
HW
5430/* Implemented only the options currently availible for the driver */
5431static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5432 .reset = dm_crtc_reset_state,
5433 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
5434 .set_config = drm_atomic_helper_set_config,
5435 .page_flip = drm_atomic_helper_page_flip,
5436 .atomic_duplicate_state = dm_crtc_duplicate_state,
5437 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5438 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5439 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5440 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5441 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5442 .enable_vblank = dm_enable_vblank,
5443 .disable_vblank = dm_disable_vblank,
e3eff4b5 5444 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
5445};
5446
5447static enum drm_connector_status
5448amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5449{
5450 bool connected;
c84dec2f 5451 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5452
1f6010a9
DF
5453 /*
5454 * Notes:
e7b07cee
HW
5455 * 1. This interface is NOT called in context of HPD irq.
5456 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5457 * makes it a bad place for *any* MST-related activity.
5458 */
e7b07cee 5459
8580d60b
HW
5460 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5461 !aconnector->fake_enable)
e7b07cee
HW
5462 connected = (aconnector->dc_sink != NULL);
5463 else
5464 connected = (aconnector->base.force == DRM_FORCE_ON);
5465
0f877894
OV
5466 update_subconnector_property(aconnector);
5467
e7b07cee
HW
5468 return (connected ? connector_status_connected :
5469 connector_status_disconnected);
5470}
5471
3ee6b26b
AD
5472int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5473 struct drm_connector_state *connector_state,
5474 struct drm_property *property,
5475 uint64_t val)
e7b07cee
HW
5476{
5477 struct drm_device *dev = connector->dev;
1348969a 5478 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5479 struct dm_connector_state *dm_old_state =
5480 to_dm_connector_state(connector->state);
5481 struct dm_connector_state *dm_new_state =
5482 to_dm_connector_state(connector_state);
5483
5484 int ret = -EINVAL;
5485
5486 if (property == dev->mode_config.scaling_mode_property) {
5487 enum amdgpu_rmx_type rmx_type;
5488
5489 switch (val) {
5490 case DRM_MODE_SCALE_CENTER:
5491 rmx_type = RMX_CENTER;
5492 break;
5493 case DRM_MODE_SCALE_ASPECT:
5494 rmx_type = RMX_ASPECT;
5495 break;
5496 case DRM_MODE_SCALE_FULLSCREEN:
5497 rmx_type = RMX_FULL;
5498 break;
5499 case DRM_MODE_SCALE_NONE:
5500 default:
5501 rmx_type = RMX_OFF;
5502 break;
5503 }
5504
5505 if (dm_old_state->scaling == rmx_type)
5506 return 0;
5507
5508 dm_new_state->scaling = rmx_type;
5509 ret = 0;
5510 } else if (property == adev->mode_info.underscan_hborder_property) {
5511 dm_new_state->underscan_hborder = val;
5512 ret = 0;
5513 } else if (property == adev->mode_info.underscan_vborder_property) {
5514 dm_new_state->underscan_vborder = val;
5515 ret = 0;
5516 } else if (property == adev->mode_info.underscan_property) {
5517 dm_new_state->underscan_enable = val;
5518 ret = 0;
c1ee92f9
DF
5519 } else if (property == adev->mode_info.abm_level_property) {
5520 dm_new_state->abm_level = val;
5521 ret = 0;
e7b07cee
HW
5522 }
5523
5524 return ret;
5525}
5526
3ee6b26b
AD
5527int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5528 const struct drm_connector_state *state,
5529 struct drm_property *property,
5530 uint64_t *val)
e7b07cee
HW
5531{
5532 struct drm_device *dev = connector->dev;
1348969a 5533 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5534 struct dm_connector_state *dm_state =
5535 to_dm_connector_state(state);
5536 int ret = -EINVAL;
5537
5538 if (property == dev->mode_config.scaling_mode_property) {
5539 switch (dm_state->scaling) {
5540 case RMX_CENTER:
5541 *val = DRM_MODE_SCALE_CENTER;
5542 break;
5543 case RMX_ASPECT:
5544 *val = DRM_MODE_SCALE_ASPECT;
5545 break;
5546 case RMX_FULL:
5547 *val = DRM_MODE_SCALE_FULLSCREEN;
5548 break;
5549 case RMX_OFF:
5550 default:
5551 *val = DRM_MODE_SCALE_NONE;
5552 break;
5553 }
5554 ret = 0;
5555 } else if (property == adev->mode_info.underscan_hborder_property) {
5556 *val = dm_state->underscan_hborder;
5557 ret = 0;
5558 } else if (property == adev->mode_info.underscan_vborder_property) {
5559 *val = dm_state->underscan_vborder;
5560 ret = 0;
5561 } else if (property == adev->mode_info.underscan_property) {
5562 *val = dm_state->underscan_enable;
5563 ret = 0;
c1ee92f9
DF
5564 } else if (property == adev->mode_info.abm_level_property) {
5565 *val = dm_state->abm_level;
5566 ret = 0;
e7b07cee 5567 }
c1ee92f9 5568
e7b07cee
HW
5569 return ret;
5570}
5571
526c654a
ED
5572static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5573{
5574 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5575
5576 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5577}
5578
7578ecda 5579static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5580{
c84dec2f 5581 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5582 const struct dc_link *link = aconnector->dc_link;
1348969a 5583 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5584 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5585
5dff80bd
AG
5586 /*
5587 * Call only if mst_mgr was iniitalized before since it's not done
5588 * for all connector types.
5589 */
5590 if (aconnector->mst_mgr.dev)
5591 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5592
e7b07cee
HW
5593#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5594 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5595
89fc8d4e 5596 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5597 link->type != dc_connection_none &&
5598 dm->backlight_dev) {
5599 backlight_device_unregister(dm->backlight_dev);
5600 dm->backlight_dev = NULL;
e7b07cee
HW
5601 }
5602#endif
dcd5fb82
MF
5603
5604 if (aconnector->dc_em_sink)
5605 dc_sink_release(aconnector->dc_em_sink);
5606 aconnector->dc_em_sink = NULL;
5607 if (aconnector->dc_sink)
5608 dc_sink_release(aconnector->dc_sink);
5609 aconnector->dc_sink = NULL;
5610
e86e8947 5611 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5612 drm_connector_unregister(connector);
5613 drm_connector_cleanup(connector);
526c654a
ED
5614 if (aconnector->i2c) {
5615 i2c_del_adapter(&aconnector->i2c->base);
5616 kfree(aconnector->i2c);
5617 }
7daec99f 5618 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5619
e7b07cee
HW
5620 kfree(connector);
5621}
5622
5623void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5624{
5625 struct dm_connector_state *state =
5626 to_dm_connector_state(connector->state);
5627
df099b9b
LSL
5628 if (connector->state)
5629 __drm_atomic_helper_connector_destroy_state(connector->state);
5630
e7b07cee
HW
5631 kfree(state);
5632
5633 state = kzalloc(sizeof(*state), GFP_KERNEL);
5634
5635 if (state) {
5636 state->scaling = RMX_OFF;
5637 state->underscan_enable = false;
5638 state->underscan_hborder = 0;
5639 state->underscan_vborder = 0;
01933ba4 5640 state->base.max_requested_bpc = 8;
3261e013
ML
5641 state->vcpi_slots = 0;
5642 state->pbn = 0;
c3e50f89
NK
5643 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5644 state->abm_level = amdgpu_dm_abm_level;
5645
df099b9b 5646 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5647 }
5648}
5649
3ee6b26b
AD
5650struct drm_connector_state *
5651amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5652{
5653 struct dm_connector_state *state =
5654 to_dm_connector_state(connector->state);
5655
5656 struct dm_connector_state *new_state =
5657 kmemdup(state, sizeof(*state), GFP_KERNEL);
5658
98e6436d
AK
5659 if (!new_state)
5660 return NULL;
e7b07cee 5661
98e6436d
AK
5662 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5663
5664 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5665 new_state->abm_level = state->abm_level;
922454c2
NK
5666 new_state->scaling = state->scaling;
5667 new_state->underscan_enable = state->underscan_enable;
5668 new_state->underscan_hborder = state->underscan_hborder;
5669 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5670 new_state->vcpi_slots = state->vcpi_slots;
5671 new_state->pbn = state->pbn;
98e6436d 5672 return &new_state->base;
e7b07cee
HW
5673}
5674
14f04fa4
AD
5675static int
5676amdgpu_dm_connector_late_register(struct drm_connector *connector)
5677{
5678 struct amdgpu_dm_connector *amdgpu_dm_connector =
5679 to_amdgpu_dm_connector(connector);
00a8037e 5680 int r;
14f04fa4 5681
00a8037e
AD
5682 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5683 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5684 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5685 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5686 if (r)
5687 return r;
5688 }
5689
5690#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5691 connector_debugfs_init(amdgpu_dm_connector);
5692#endif
5693
5694 return 0;
5695}
5696
e7b07cee
HW
5697static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5698 .reset = amdgpu_dm_connector_funcs_reset,
5699 .detect = amdgpu_dm_connector_detect,
5700 .fill_modes = drm_helper_probe_single_connector_modes,
5701 .destroy = amdgpu_dm_connector_destroy,
5702 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5703 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5704 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5705 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5706 .late_register = amdgpu_dm_connector_late_register,
526c654a 5707 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5708};
5709
e7b07cee
HW
5710static int get_modes(struct drm_connector *connector)
5711{
5712 return amdgpu_dm_connector_get_modes(connector);
5713}
5714
c84dec2f 5715static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5716{
5717 struct dc_sink_init_data init_params = {
5718 .link = aconnector->dc_link,
5719 .sink_signal = SIGNAL_TYPE_VIRTUAL
5720 };
70e8ffc5 5721 struct edid *edid;
e7b07cee 5722
a89ff457 5723 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5724 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5725 aconnector->base.name);
5726
5727 aconnector->base.force = DRM_FORCE_OFF;
5728 aconnector->base.override_edid = false;
5729 return;
5730 }
5731
70e8ffc5
HW
5732 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5733
e7b07cee
HW
5734 aconnector->edid = edid;
5735
5736 aconnector->dc_em_sink = dc_link_add_remote_sink(
5737 aconnector->dc_link,
5738 (uint8_t *)edid,
5739 (edid->extensions + 1) * EDID_LENGTH,
5740 &init_params);
5741
dcd5fb82 5742 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5743 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5744 aconnector->dc_link->local_sink :
5745 aconnector->dc_em_sink;
dcd5fb82
MF
5746 dc_sink_retain(aconnector->dc_sink);
5747 }
e7b07cee
HW
5748}
5749
c84dec2f 5750static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5751{
5752 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5753
1f6010a9
DF
5754 /*
5755 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5756 * Those settings have to be != 0 to get initial modeset
5757 */
5758 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5759 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5760 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5761 }
5762
5763
5764 aconnector->base.override_edid = true;
5765 create_eml_sink(aconnector);
5766}
5767
cbd14ae7
SW
5768static struct dc_stream_state *
5769create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5770 const struct drm_display_mode *drm_mode,
5771 const struct dm_connector_state *dm_state,
5772 const struct dc_stream_state *old_stream)
5773{
5774 struct drm_connector *connector = &aconnector->base;
1348969a 5775 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5776 struct dc_stream_state *stream;
4b7da34b
SW
5777 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5778 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5779 enum dc_status dc_result = DC_OK;
5780
5781 do {
5782 stream = create_stream_for_sink(aconnector, drm_mode,
5783 dm_state, old_stream,
5784 requested_bpc);
5785 if (stream == NULL) {
5786 DRM_ERROR("Failed to create stream for sink!\n");
5787 break;
5788 }
5789
5790 dc_result = dc_validate_stream(adev->dm.dc, stream);
5791
5792 if (dc_result != DC_OK) {
74a16675 5793 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5794 drm_mode->hdisplay,
5795 drm_mode->vdisplay,
5796 drm_mode->clock,
74a16675
RS
5797 dc_result,
5798 dc_status_to_str(dc_result));
cbd14ae7
SW
5799
5800 dc_stream_release(stream);
5801 stream = NULL;
5802 requested_bpc -= 2; /* lower bpc to retry validation */
5803 }
5804
5805 } while (stream == NULL && requested_bpc >= 6);
5806
5807 return stream;
5808}
5809
ba9ca088 5810enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5811 struct drm_display_mode *mode)
e7b07cee
HW
5812{
5813 int result = MODE_ERROR;
5814 struct dc_sink *dc_sink;
e7b07cee 5815 /* TODO: Unhardcode stream count */
0971c40e 5816 struct dc_stream_state *stream;
c84dec2f 5817 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5818
5819 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5820 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5821 return result;
5822
1f6010a9
DF
5823 /*
5824 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5825 * EDID mgmt
5826 */
5827 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5828 !aconnector->dc_em_sink)
5829 handle_edid_mgmt(aconnector);
5830
c84dec2f 5831 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5832
ad975f44
VL
5833 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5834 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5835 DRM_ERROR("dc_sink is NULL!\n");
5836 goto fail;
5837 }
5838
cbd14ae7
SW
5839 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5840 if (stream) {
5841 dc_stream_release(stream);
e7b07cee 5842 result = MODE_OK;
cbd14ae7 5843 }
e7b07cee
HW
5844
5845fail:
5846 /* TODO: error handling*/
5847 return result;
5848}
5849
88694af9
NK
5850static int fill_hdr_info_packet(const struct drm_connector_state *state,
5851 struct dc_info_packet *out)
5852{
5853 struct hdmi_drm_infoframe frame;
5854 unsigned char buf[30]; /* 26 + 4 */
5855 ssize_t len;
5856 int ret, i;
5857
5858 memset(out, 0, sizeof(*out));
5859
5860 if (!state->hdr_output_metadata)
5861 return 0;
5862
5863 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5864 if (ret)
5865 return ret;
5866
5867 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5868 if (len < 0)
5869 return (int)len;
5870
5871 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5872 if (len != 30)
5873 return -EINVAL;
5874
5875 /* Prepare the infopacket for DC. */
5876 switch (state->connector->connector_type) {
5877 case DRM_MODE_CONNECTOR_HDMIA:
5878 out->hb0 = 0x87; /* type */
5879 out->hb1 = 0x01; /* version */
5880 out->hb2 = 0x1A; /* length */
5881 out->sb[0] = buf[3]; /* checksum */
5882 i = 1;
5883 break;
5884
5885 case DRM_MODE_CONNECTOR_DisplayPort:
5886 case DRM_MODE_CONNECTOR_eDP:
5887 out->hb0 = 0x00; /* sdp id, zero */
5888 out->hb1 = 0x87; /* type */
5889 out->hb2 = 0x1D; /* payload len - 1 */
5890 out->hb3 = (0x13 << 2); /* sdp version */
5891 out->sb[0] = 0x01; /* version */
5892 out->sb[1] = 0x1A; /* length */
5893 i = 2;
5894 break;
5895
5896 default:
5897 return -EINVAL;
5898 }
5899
5900 memcpy(&out->sb[i], &buf[4], 26);
5901 out->valid = true;
5902
5903 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5904 sizeof(out->sb), false);
5905
5906 return 0;
5907}
5908
5909static bool
5910is_hdr_metadata_different(const struct drm_connector_state *old_state,
5911 const struct drm_connector_state *new_state)
5912{
5913 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5914 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5915
5916 if (old_blob != new_blob) {
5917 if (old_blob && new_blob &&
5918 old_blob->length == new_blob->length)
5919 return memcmp(old_blob->data, new_blob->data,
5920 old_blob->length);
5921
5922 return true;
5923 }
5924
5925 return false;
5926}
5927
5928static int
5929amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5930 struct drm_atomic_state *state)
88694af9 5931{
51e857af
SP
5932 struct drm_connector_state *new_con_state =
5933 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5934 struct drm_connector_state *old_con_state =
5935 drm_atomic_get_old_connector_state(state, conn);
5936 struct drm_crtc *crtc = new_con_state->crtc;
5937 struct drm_crtc_state *new_crtc_state;
5938 int ret;
5939
e8a98235
RS
5940 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5941
88694af9
NK
5942 if (!crtc)
5943 return 0;
5944
5945 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5946 struct dc_info_packet hdr_infopacket;
5947
5948 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5949 if (ret)
5950 return ret;
5951
5952 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5953 if (IS_ERR(new_crtc_state))
5954 return PTR_ERR(new_crtc_state);
5955
5956 /*
5957 * DC considers the stream backends changed if the
5958 * static metadata changes. Forcing the modeset also
5959 * gives a simple way for userspace to switch from
b232d4ed
NK
5960 * 8bpc to 10bpc when setting the metadata to enter
5961 * or exit HDR.
5962 *
5963 * Changing the static metadata after it's been
5964 * set is permissible, however. So only force a
5965 * modeset if we're entering or exiting HDR.
88694af9 5966 */
b232d4ed
NK
5967 new_crtc_state->mode_changed =
5968 !old_con_state->hdr_output_metadata ||
5969 !new_con_state->hdr_output_metadata;
88694af9
NK
5970 }
5971
5972 return 0;
5973}
5974
e7b07cee
HW
5975static const struct drm_connector_helper_funcs
5976amdgpu_dm_connector_helper_funcs = {
5977 /*
1f6010a9 5978 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5979 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5980 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5981 * in get_modes call back, not just return the modes count
5982 */
e7b07cee
HW
5983 .get_modes = get_modes,
5984 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5985 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5986};
5987
5988static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5989{
5990}
5991
d6ef9b41 5992static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5993{
5994 struct drm_atomic_state *state = new_crtc_state->state;
5995 struct drm_plane *plane;
5996 int num_active = 0;
5997
5998 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5999 struct drm_plane_state *new_plane_state;
6000
6001 /* Cursor planes are "fake". */
6002 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6003 continue;
6004
6005 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6006
6007 if (!new_plane_state) {
6008 /*
6009 * The plane is enable on the CRTC and hasn't changed
6010 * state. This means that it previously passed
6011 * validation and is therefore enabled.
6012 */
6013 num_active += 1;
6014 continue;
6015 }
6016
6017 /* We need a framebuffer to be considered enabled. */
6018 num_active += (new_plane_state->fb != NULL);
6019 }
6020
d6ef9b41
NK
6021 return num_active;
6022}
6023
8fe684e9
NK
6024static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6025 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6026{
6027 struct dm_crtc_state *dm_new_crtc_state =
6028 to_dm_crtc_state(new_crtc_state);
6029
6030 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6031
6032 if (!dm_new_crtc_state->stream)
6033 return;
6034
6035 dm_new_crtc_state->active_planes =
6036 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6037}
6038
3ee6b26b 6039static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6040 struct drm_atomic_state *state)
e7b07cee 6041{
29b77ad7
MR
6042 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6043 crtc);
1348969a 6044 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6045 struct dc *dc = adev->dm.dc;
29b77ad7 6046 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6047 int ret = -EINVAL;
6048
5b8c5969 6049 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6050
29b77ad7 6051 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6052
9b690ef3 6053 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6054 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6055 WARN_ON(1);
6056 return ret;
6057 }
6058
bc92c065 6059 /*
b836a274
MD
6060 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6061 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6062 * planes are disabled, which is not supported by the hardware. And there is legacy
6063 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6064 */
29b77ad7 6065 if (crtc_state->enable &&
ea9522f5
SS
6066 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6067 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6068 return -EINVAL;
ea9522f5 6069 }
c14a005c 6070
b836a274
MD
6071 /* In some use cases, like reset, no stream is attached */
6072 if (!dm_crtc_state->stream)
6073 return 0;
6074
62c933f9 6075 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6076 return 0;
6077
ea9522f5 6078 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6079 return ret;
6080}
6081
3ee6b26b
AD
6082static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6083 const struct drm_display_mode *mode,
6084 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6085{
6086 return true;
6087}
6088
6089static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6090 .disable = dm_crtc_helper_disable,
6091 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6092 .mode_fixup = dm_crtc_helper_mode_fixup,
6093 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6094};
6095
6096static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6097{
6098
6099}
6100
3261e013
ML
6101static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6102{
6103 switch (display_color_depth) {
6104 case COLOR_DEPTH_666:
6105 return 6;
6106 case COLOR_DEPTH_888:
6107 return 8;
6108 case COLOR_DEPTH_101010:
6109 return 10;
6110 case COLOR_DEPTH_121212:
6111 return 12;
6112 case COLOR_DEPTH_141414:
6113 return 14;
6114 case COLOR_DEPTH_161616:
6115 return 16;
6116 default:
6117 break;
6118 }
6119 return 0;
6120}
6121
3ee6b26b
AD
6122static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6123 struct drm_crtc_state *crtc_state,
6124 struct drm_connector_state *conn_state)
e7b07cee 6125{
3261e013
ML
6126 struct drm_atomic_state *state = crtc_state->state;
6127 struct drm_connector *connector = conn_state->connector;
6128 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6129 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6130 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6131 struct drm_dp_mst_topology_mgr *mst_mgr;
6132 struct drm_dp_mst_port *mst_port;
6133 enum dc_color_depth color_depth;
6134 int clock, bpp = 0;
1bc22f20 6135 bool is_y420 = false;
3261e013
ML
6136
6137 if (!aconnector->port || !aconnector->dc_sink)
6138 return 0;
6139
6140 mst_port = aconnector->port;
6141 mst_mgr = &aconnector->mst_port->mst_mgr;
6142
6143 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6144 return 0;
6145
6146 if (!state->duplicated) {
cbd14ae7 6147 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6148 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6149 aconnector->force_yuv420_output;
cbd14ae7
SW
6150 color_depth = convert_color_depth_from_display_info(connector,
6151 is_y420,
6152 max_bpc);
3261e013
ML
6153 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6154 clock = adjusted_mode->clock;
dc48529f 6155 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6156 }
6157 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6158 mst_mgr,
6159 mst_port,
1c6c1cb5 6160 dm_new_connector_state->pbn,
03ca9600 6161 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6162 if (dm_new_connector_state->vcpi_slots < 0) {
6163 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6164 return dm_new_connector_state->vcpi_slots;
6165 }
e7b07cee
HW
6166 return 0;
6167}
6168
6169const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6170 .disable = dm_encoder_helper_disable,
6171 .atomic_check = dm_encoder_helper_atomic_check
6172};
6173
d9fe1a4c 6174#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6175static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6176 struct dc_state *dc_state)
6177{
6178 struct dc_stream_state *stream = NULL;
6179 struct drm_connector *connector;
6180 struct drm_connector_state *new_con_state, *old_con_state;
6181 struct amdgpu_dm_connector *aconnector;
6182 struct dm_connector_state *dm_conn_state;
6183 int i, j, clock, bpp;
6184 int vcpi, pbn_div, pbn = 0;
6185
6186 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6187
6188 aconnector = to_amdgpu_dm_connector(connector);
6189
6190 if (!aconnector->port)
6191 continue;
6192
6193 if (!new_con_state || !new_con_state->crtc)
6194 continue;
6195
6196 dm_conn_state = to_dm_connector_state(new_con_state);
6197
6198 for (j = 0; j < dc_state->stream_count; j++) {
6199 stream = dc_state->streams[j];
6200 if (!stream)
6201 continue;
6202
6203 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6204 break;
6205
6206 stream = NULL;
6207 }
6208
6209 if (!stream)
6210 continue;
6211
6212 if (stream->timing.flags.DSC != 1) {
6213 drm_dp_mst_atomic_enable_dsc(state,
6214 aconnector->port,
6215 dm_conn_state->pbn,
6216 0,
6217 false);
6218 continue;
6219 }
6220
6221 pbn_div = dm_mst_get_pbn_divider(stream->link);
6222 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6223 clock = stream->timing.pix_clk_100hz / 10;
6224 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6225 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6226 aconnector->port,
6227 pbn, pbn_div,
6228 true);
6229 if (vcpi < 0)
6230 return vcpi;
6231
6232 dm_conn_state->pbn = pbn;
6233 dm_conn_state->vcpi_slots = vcpi;
6234 }
6235 return 0;
6236}
d9fe1a4c 6237#endif
29b9ba74 6238
e7b07cee
HW
6239static void dm_drm_plane_reset(struct drm_plane *plane)
6240{
6241 struct dm_plane_state *amdgpu_state = NULL;
6242
6243 if (plane->state)
6244 plane->funcs->atomic_destroy_state(plane, plane->state);
6245
6246 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6247 WARN_ON(amdgpu_state == NULL);
1f6010a9 6248
7ddaef96
NK
6249 if (amdgpu_state)
6250 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6251}
6252
6253static struct drm_plane_state *
6254dm_drm_plane_duplicate_state(struct drm_plane *plane)
6255{
6256 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6257
6258 old_dm_plane_state = to_dm_plane_state(plane->state);
6259 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6260 if (!dm_plane_state)
6261 return NULL;
6262
6263 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6264
3be5262e
HW
6265 if (old_dm_plane_state->dc_state) {
6266 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6267 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6268 }
6269
6270 return &dm_plane_state->base;
6271}
6272
dfd84d90 6273static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6274 struct drm_plane_state *state)
e7b07cee
HW
6275{
6276 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6277
3be5262e
HW
6278 if (dm_plane_state->dc_state)
6279 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6280
0627bbd3 6281 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6282}
6283
6284static const struct drm_plane_funcs dm_plane_funcs = {
6285 .update_plane = drm_atomic_helper_update_plane,
6286 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6287 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6288 .reset = dm_drm_plane_reset,
6289 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6290 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6291 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6292};
6293
3ee6b26b
AD
6294static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6295 struct drm_plane_state *new_state)
e7b07cee
HW
6296{
6297 struct amdgpu_framebuffer *afb;
6298 struct drm_gem_object *obj;
5d43be0c 6299 struct amdgpu_device *adev;
e7b07cee 6300 struct amdgpu_bo *rbo;
e7b07cee 6301 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6302 struct list_head list;
6303 struct ttm_validate_buffer tv;
6304 struct ww_acquire_ctx ticket;
5d43be0c
CK
6305 uint32_t domain;
6306 int r;
e7b07cee
HW
6307
6308 if (!new_state->fb) {
f1ad2f5e 6309 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6310 return 0;
6311 }
6312
6313 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6314 obj = new_state->fb->obj[0];
e7b07cee 6315 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6316 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6317 INIT_LIST_HEAD(&list);
6318
6319 tv.bo = &rbo->tbo;
6320 tv.num_shared = 1;
6321 list_add(&tv.head, &list);
6322
9165fb87 6323 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6324 if (r) {
6325 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6326 return r;
0f257b09 6327 }
e7b07cee 6328
5d43be0c 6329 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6330 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6331 else
6332 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6333
7b7c6c81 6334 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6335 if (unlikely(r != 0)) {
30b7c614
HW
6336 if (r != -ERESTARTSYS)
6337 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6338 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6339 return r;
6340 }
6341
bb812f1e
JZ
6342 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6343 if (unlikely(r != 0)) {
6344 amdgpu_bo_unpin(rbo);
0f257b09 6345 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6346 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6347 return r;
6348 }
7df7e505 6349
0f257b09 6350 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6351
7b7c6c81 6352 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6353
6354 amdgpu_bo_ref(rbo);
6355
cf322b49
NK
6356 /**
6357 * We don't do surface updates on planes that have been newly created,
6358 * but we also don't have the afb->address during atomic check.
6359 *
6360 * Fill in buffer attributes depending on the address here, but only on
6361 * newly created planes since they're not being used by DC yet and this
6362 * won't modify global state.
6363 */
6364 dm_plane_state_old = to_dm_plane_state(plane->state);
6365 dm_plane_state_new = to_dm_plane_state(new_state);
6366
3be5262e 6367 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6368 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6369 struct dc_plane_state *plane_state =
6370 dm_plane_state_new->dc_state;
6371 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6372
320932bf 6373 fill_plane_buffer_attributes(
695af5f9 6374 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6375 afb->tiling_flags,
cf322b49
NK
6376 &plane_state->tiling_info, &plane_state->plane_size,
6377 &plane_state->dcc, &plane_state->address,
6eed95b0 6378 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6379 }
6380
e7b07cee
HW
6381 return 0;
6382}
6383
3ee6b26b
AD
6384static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6385 struct drm_plane_state *old_state)
e7b07cee
HW
6386{
6387 struct amdgpu_bo *rbo;
e7b07cee
HW
6388 int r;
6389
6390 if (!old_state->fb)
6391 return;
6392
e68d14dd 6393 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6394 r = amdgpu_bo_reserve(rbo, false);
6395 if (unlikely(r)) {
6396 DRM_ERROR("failed to reserve rbo before unpin\n");
6397 return;
b830ebc9
HW
6398 }
6399
6400 amdgpu_bo_unpin(rbo);
6401 amdgpu_bo_unreserve(rbo);
6402 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6403}
6404
8c44515b
AP
6405static int dm_plane_helper_check_state(struct drm_plane_state *state,
6406 struct drm_crtc_state *new_crtc_state)
6407{
6300b3bd
MK
6408 struct drm_framebuffer *fb = state->fb;
6409 int min_downscale, max_upscale;
6410 int min_scale = 0;
6411 int max_scale = INT_MAX;
6412
40d916a2 6413 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 6414 if (fb && state->crtc) {
40d916a2
NC
6415 /* Validate viewport to cover the case when only the position changes */
6416 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6417 int viewport_width = state->crtc_w;
6418 int viewport_height = state->crtc_h;
6419
6420 if (state->crtc_x < 0)
6421 viewport_width += state->crtc_x;
6422 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6423 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6424
6425 if (state->crtc_y < 0)
6426 viewport_height += state->crtc_y;
6427 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6428 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6429
6430 /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6431 * which is still OK to satisfy the condition below, thereby also covering these cases
6432 * (when plane is completely outside of screen).
6433 * x2 for width is because of pipe-split.
6434 */
6435 if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6436 return -EINVAL;
6437 }
6438
6439 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
6440 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6441 &min_downscale, &max_upscale);
6442 /*
6443 * Convert to drm convention: 16.16 fixed point, instead of dc's
6444 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6445 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6446 */
6447 min_scale = (1000 << 16) / max_upscale;
6448 max_scale = (1000 << 16) / min_downscale;
6449 }
8c44515b 6450
8c44515b 6451 return drm_atomic_helper_check_plane_state(
6300b3bd 6452 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
6453}
6454
7578ecda
AD
6455static int dm_plane_atomic_check(struct drm_plane *plane,
6456 struct drm_plane_state *state)
cbd19488 6457{
1348969a 6458 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6459 struct dc *dc = adev->dm.dc;
78171832 6460 struct dm_plane_state *dm_plane_state;
695af5f9 6461 struct dc_scaling_info scaling_info;
8c44515b 6462 struct drm_crtc_state *new_crtc_state;
695af5f9 6463 int ret;
78171832 6464
e8a98235
RS
6465 trace_amdgpu_dm_plane_atomic_check(state);
6466
78171832 6467 dm_plane_state = to_dm_plane_state(state);
cbd19488 6468
3be5262e 6469 if (!dm_plane_state->dc_state)
9a3329b1 6470 return 0;
cbd19488 6471
8c44515b
AP
6472 new_crtc_state =
6473 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6474 if (!new_crtc_state)
6475 return -EINVAL;
6476
6477 ret = dm_plane_helper_check_state(state, new_crtc_state);
6478 if (ret)
6479 return ret;
6480
695af5f9
NK
6481 ret = fill_dc_scaling_info(state, &scaling_info);
6482 if (ret)
6483 return ret;
a05bcff1 6484
62c933f9 6485 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6486 return 0;
6487
6488 return -EINVAL;
6489}
6490
674e78ac
NK
6491static int dm_plane_atomic_async_check(struct drm_plane *plane,
6492 struct drm_plane_state *new_plane_state)
6493{
6494 /* Only support async updates on cursor planes. */
6495 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6496 return -EINVAL;
6497
6498 return 0;
6499}
6500
6501static void dm_plane_atomic_async_update(struct drm_plane *plane,
6502 struct drm_plane_state *new_state)
6503{
6504 struct drm_plane_state *old_state =
6505 drm_atomic_get_old_plane_state(new_state->state, plane);
6506
e8a98235
RS
6507 trace_amdgpu_dm_atomic_update_cursor(new_state);
6508
332af874 6509 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6510
6511 plane->state->src_x = new_state->src_x;
6512 plane->state->src_y = new_state->src_y;
6513 plane->state->src_w = new_state->src_w;
6514 plane->state->src_h = new_state->src_h;
6515 plane->state->crtc_x = new_state->crtc_x;
6516 plane->state->crtc_y = new_state->crtc_y;
6517 plane->state->crtc_w = new_state->crtc_w;
6518 plane->state->crtc_h = new_state->crtc_h;
6519
6520 handle_cursor_update(plane, old_state);
6521}
6522
e7b07cee
HW
6523static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6524 .prepare_fb = dm_plane_helper_prepare_fb,
6525 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6526 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6527 .atomic_async_check = dm_plane_atomic_async_check,
6528 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6529};
6530
6531/*
6532 * TODO: these are currently initialized to rgb formats only.
6533 * For future use cases we should either initialize them dynamically based on
6534 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6535 * check will succeed, and let DC implement proper check
e7b07cee 6536 */
d90371b0 6537static const uint32_t rgb_formats[] = {
e7b07cee
HW
6538 DRM_FORMAT_XRGB8888,
6539 DRM_FORMAT_ARGB8888,
6540 DRM_FORMAT_RGBA8888,
6541 DRM_FORMAT_XRGB2101010,
6542 DRM_FORMAT_XBGR2101010,
6543 DRM_FORMAT_ARGB2101010,
6544 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6545 DRM_FORMAT_XBGR8888,
6546 DRM_FORMAT_ABGR8888,
46dd9ff7 6547 DRM_FORMAT_RGB565,
e7b07cee
HW
6548};
6549
0d579c7e
NK
6550static const uint32_t overlay_formats[] = {
6551 DRM_FORMAT_XRGB8888,
6552 DRM_FORMAT_ARGB8888,
6553 DRM_FORMAT_RGBA8888,
6554 DRM_FORMAT_XBGR8888,
6555 DRM_FORMAT_ABGR8888,
7267a1a9 6556 DRM_FORMAT_RGB565
e7b07cee
HW
6557};
6558
6559static const u32 cursor_formats[] = {
6560 DRM_FORMAT_ARGB8888
6561};
6562
37c6a93b
NK
6563static int get_plane_formats(const struct drm_plane *plane,
6564 const struct dc_plane_cap *plane_cap,
6565 uint32_t *formats, int max_formats)
e7b07cee 6566{
37c6a93b
NK
6567 int i, num_formats = 0;
6568
6569 /*
6570 * TODO: Query support for each group of formats directly from
6571 * DC plane caps. This will require adding more formats to the
6572 * caps list.
6573 */
e7b07cee 6574
f180b4bc 6575 switch (plane->type) {
e7b07cee 6576 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6577 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6578 if (num_formats >= max_formats)
6579 break;
6580
6581 formats[num_formats++] = rgb_formats[i];
6582 }
6583
ea36ad34 6584 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6585 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6586 if (plane_cap && plane_cap->pixel_format_support.p010)
6587 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6588 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6589 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6590 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6591 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6592 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6593 }
e7b07cee 6594 break;
37c6a93b 6595
e7b07cee 6596 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6597 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6598 if (num_formats >= max_formats)
6599 break;
6600
6601 formats[num_formats++] = overlay_formats[i];
6602 }
e7b07cee 6603 break;
37c6a93b 6604
e7b07cee 6605 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6606 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6607 if (num_formats >= max_formats)
6608 break;
6609
6610 formats[num_formats++] = cursor_formats[i];
6611 }
e7b07cee
HW
6612 break;
6613 }
6614
37c6a93b
NK
6615 return num_formats;
6616}
6617
6618static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6619 struct drm_plane *plane,
6620 unsigned long possible_crtcs,
6621 const struct dc_plane_cap *plane_cap)
6622{
6623 uint32_t formats[32];
6624 int num_formats;
6625 int res = -EPERM;
ecc874a6 6626 unsigned int supported_rotations;
faa37f54 6627 uint64_t *modifiers = NULL;
37c6a93b
NK
6628
6629 num_formats = get_plane_formats(plane, plane_cap, formats,
6630 ARRAY_SIZE(formats));
6631
faa37f54
BN
6632 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6633 if (res)
6634 return res;
6635
4a580877 6636 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6637 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6638 modifiers, plane->type, NULL);
6639 kfree(modifiers);
37c6a93b
NK
6640 if (res)
6641 return res;
6642
cc1fec57
NK
6643 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6644 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6645 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6646 BIT(DRM_MODE_BLEND_PREMULTI);
6647
6648 drm_plane_create_alpha_property(plane);
6649 drm_plane_create_blend_mode_property(plane, blend_caps);
6650 }
6651
fc8e5230 6652 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6653 plane_cap &&
6654 (plane_cap->pixel_format_support.nv12 ||
6655 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6656 /* This only affects YUV formats. */
6657 drm_plane_create_color_properties(
6658 plane,
6659 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6660 BIT(DRM_COLOR_YCBCR_BT709) |
6661 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6662 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6663 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6664 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6665 }
6666
ecc874a6
PLG
6667 supported_rotations =
6668 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6669 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6670
1347385f
SS
6671 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6672 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
6673 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6674 supported_rotations);
ecc874a6 6675
f180b4bc 6676 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6677
96719c54 6678 /* Create (reset) the plane state */
f180b4bc
HW
6679 if (plane->funcs->reset)
6680 plane->funcs->reset(plane);
96719c54 6681
37c6a93b 6682 return 0;
e7b07cee
HW
6683}
6684
7578ecda
AD
6685static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6686 struct drm_plane *plane,
6687 uint32_t crtc_index)
e7b07cee
HW
6688{
6689 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6690 struct drm_plane *cursor_plane;
e7b07cee
HW
6691
6692 int res = -ENOMEM;
6693
6694 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6695 if (!cursor_plane)
6696 goto fail;
6697
f180b4bc 6698 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6699 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6700
6701 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6702 if (!acrtc)
6703 goto fail;
6704
6705 res = drm_crtc_init_with_planes(
6706 dm->ddev,
6707 &acrtc->base,
6708 plane,
f180b4bc 6709 cursor_plane,
e7b07cee
HW
6710 &amdgpu_dm_crtc_funcs, NULL);
6711
6712 if (res)
6713 goto fail;
6714
6715 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6716
96719c54
HW
6717 /* Create (reset) the plane state */
6718 if (acrtc->base.funcs->reset)
6719 acrtc->base.funcs->reset(&acrtc->base);
6720
e7b07cee
HW
6721 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6722 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6723
6724 acrtc->crtc_id = crtc_index;
6725 acrtc->base.enabled = false;
c37e2d29 6726 acrtc->otg_inst = -1;
e7b07cee
HW
6727
6728 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6729 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6730 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6731 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 6732
e7b07cee
HW
6733 return 0;
6734
6735fail:
b830ebc9
HW
6736 kfree(acrtc);
6737 kfree(cursor_plane);
e7b07cee
HW
6738 return res;
6739}
6740
6741
6742static int to_drm_connector_type(enum signal_type st)
6743{
6744 switch (st) {
6745 case SIGNAL_TYPE_HDMI_TYPE_A:
6746 return DRM_MODE_CONNECTOR_HDMIA;
6747 case SIGNAL_TYPE_EDP:
6748 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6749 case SIGNAL_TYPE_LVDS:
6750 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6751 case SIGNAL_TYPE_RGB:
6752 return DRM_MODE_CONNECTOR_VGA;
6753 case SIGNAL_TYPE_DISPLAY_PORT:
6754 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6755 return DRM_MODE_CONNECTOR_DisplayPort;
6756 case SIGNAL_TYPE_DVI_DUAL_LINK:
6757 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6758 return DRM_MODE_CONNECTOR_DVID;
6759 case SIGNAL_TYPE_VIRTUAL:
6760 return DRM_MODE_CONNECTOR_VIRTUAL;
6761
6762 default:
6763 return DRM_MODE_CONNECTOR_Unknown;
6764 }
6765}
6766
2b4c1c05
DV
6767static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6768{
62afb4ad
JRS
6769 struct drm_encoder *encoder;
6770
6771 /* There is only one encoder per connector */
6772 drm_connector_for_each_possible_encoder(connector, encoder)
6773 return encoder;
6774
6775 return NULL;
2b4c1c05
DV
6776}
6777
e7b07cee
HW
6778static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6779{
e7b07cee
HW
6780 struct drm_encoder *encoder;
6781 struct amdgpu_encoder *amdgpu_encoder;
6782
2b4c1c05 6783 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6784
6785 if (encoder == NULL)
6786 return;
6787
6788 amdgpu_encoder = to_amdgpu_encoder(encoder);
6789
6790 amdgpu_encoder->native_mode.clock = 0;
6791
6792 if (!list_empty(&connector->probed_modes)) {
6793 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6794
e7b07cee 6795 list_for_each_entry(preferred_mode,
b830ebc9
HW
6796 &connector->probed_modes,
6797 head) {
6798 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6799 amdgpu_encoder->native_mode = *preferred_mode;
6800
e7b07cee
HW
6801 break;
6802 }
6803
6804 }
6805}
6806
3ee6b26b
AD
6807static struct drm_display_mode *
6808amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6809 char *name,
6810 int hdisplay, int vdisplay)
e7b07cee
HW
6811{
6812 struct drm_device *dev = encoder->dev;
6813 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6814 struct drm_display_mode *mode = NULL;
6815 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6816
6817 mode = drm_mode_duplicate(dev, native_mode);
6818
b830ebc9 6819 if (mode == NULL)
e7b07cee
HW
6820 return NULL;
6821
6822 mode->hdisplay = hdisplay;
6823 mode->vdisplay = vdisplay;
6824 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6825 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6826
6827 return mode;
6828
6829}
6830
6831static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6832 struct drm_connector *connector)
e7b07cee
HW
6833{
6834 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6835 struct drm_display_mode *mode = NULL;
6836 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6837 struct amdgpu_dm_connector *amdgpu_dm_connector =
6838 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6839 int i;
6840 int n;
6841 struct mode_size {
6842 char name[DRM_DISPLAY_MODE_LEN];
6843 int w;
6844 int h;
b830ebc9 6845 } common_modes[] = {
e7b07cee
HW
6846 { "640x480", 640, 480},
6847 { "800x600", 800, 600},
6848 { "1024x768", 1024, 768},
6849 { "1280x720", 1280, 720},
6850 { "1280x800", 1280, 800},
6851 {"1280x1024", 1280, 1024},
6852 { "1440x900", 1440, 900},
6853 {"1680x1050", 1680, 1050},
6854 {"1600x1200", 1600, 1200},
6855 {"1920x1080", 1920, 1080},
6856 {"1920x1200", 1920, 1200}
6857 };
6858
b830ebc9 6859 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6860
6861 for (i = 0; i < n; i++) {
6862 struct drm_display_mode *curmode = NULL;
6863 bool mode_existed = false;
6864
6865 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6866 common_modes[i].h > native_mode->vdisplay ||
6867 (common_modes[i].w == native_mode->hdisplay &&
6868 common_modes[i].h == native_mode->vdisplay))
6869 continue;
e7b07cee
HW
6870
6871 list_for_each_entry(curmode, &connector->probed_modes, head) {
6872 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6873 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6874 mode_existed = true;
6875 break;
6876 }
6877 }
6878
6879 if (mode_existed)
6880 continue;
6881
6882 mode = amdgpu_dm_create_common_mode(encoder,
6883 common_modes[i].name, common_modes[i].w,
6884 common_modes[i].h);
6885 drm_mode_probed_add(connector, mode);
c84dec2f 6886 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6887 }
6888}
6889
3ee6b26b
AD
6890static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6891 struct edid *edid)
e7b07cee 6892{
c84dec2f
HW
6893 struct amdgpu_dm_connector *amdgpu_dm_connector =
6894 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6895
6896 if (edid) {
6897 /* empty probed_modes */
6898 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6899 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6900 drm_add_edid_modes(connector, edid);
6901
f1e5e913
YMM
6902 /* sorting the probed modes before calling function
6903 * amdgpu_dm_get_native_mode() since EDID can have
6904 * more than one preferred mode. The modes that are
6905 * later in the probed mode list could be of higher
6906 * and preferred resolution. For example, 3840x2160
6907 * resolution in base EDID preferred timing and 4096x2160
6908 * preferred resolution in DID extension block later.
6909 */
6910 drm_mode_sort(&connector->probed_modes);
e7b07cee 6911 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6912 } else {
c84dec2f 6913 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6914 }
e7b07cee
HW
6915}
6916
7578ecda 6917static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6918{
c84dec2f
HW
6919 struct amdgpu_dm_connector *amdgpu_dm_connector =
6920 to_amdgpu_dm_connector(connector);
e7b07cee 6921 struct drm_encoder *encoder;
c84dec2f 6922 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6923
2b4c1c05 6924 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6925
5c0e6840 6926 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
6927 amdgpu_dm_connector->num_modes =
6928 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6929 } else {
6930 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6931 amdgpu_dm_connector_add_common_modes(encoder, connector);
6932 }
3e332d3a 6933 amdgpu_dm_fbc_init(connector);
5099114b 6934
c84dec2f 6935 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6936}
6937
3ee6b26b
AD
6938void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6939 struct amdgpu_dm_connector *aconnector,
6940 int connector_type,
6941 struct dc_link *link,
6942 int link_index)
e7b07cee 6943{
1348969a 6944 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 6945
f04bee34
NK
6946 /*
6947 * Some of the properties below require access to state, like bpc.
6948 * Allocate some default initial connector state with our reset helper.
6949 */
6950 if (aconnector->base.funcs->reset)
6951 aconnector->base.funcs->reset(&aconnector->base);
6952
e7b07cee
HW
6953 aconnector->connector_id = link_index;
6954 aconnector->dc_link = link;
6955 aconnector->base.interlace_allowed = false;
6956 aconnector->base.doublescan_allowed = false;
6957 aconnector->base.stereo_allowed = false;
6958 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6959 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6960 aconnector->audio_inst = -1;
e7b07cee
HW
6961 mutex_init(&aconnector->hpd_lock);
6962
1f6010a9
DF
6963 /*
6964 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6965 * which means HPD hot plug not supported
6966 */
e7b07cee
HW
6967 switch (connector_type) {
6968 case DRM_MODE_CONNECTOR_HDMIA:
6969 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6970 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6971 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6972 break;
6973 case DRM_MODE_CONNECTOR_DisplayPort:
6974 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6975 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6976 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6977 break;
6978 case DRM_MODE_CONNECTOR_DVID:
6979 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6980 break;
6981 default:
6982 break;
6983 }
6984
6985 drm_object_attach_property(&aconnector->base.base,
6986 dm->ddev->mode_config.scaling_mode_property,
6987 DRM_MODE_SCALE_NONE);
6988
6989 drm_object_attach_property(&aconnector->base.base,
6990 adev->mode_info.underscan_property,
6991 UNDERSCAN_OFF);
6992 drm_object_attach_property(&aconnector->base.base,
6993 adev->mode_info.underscan_hborder_property,
6994 0);
6995 drm_object_attach_property(&aconnector->base.base,
6996 adev->mode_info.underscan_vborder_property,
6997 0);
1825fd34 6998
8c61b31e
JFZ
6999 if (!aconnector->mst_port)
7000 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7001
4a8ca46b
RL
7002 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7003 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7004 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7005
c1ee92f9 7006 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7007 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7008 drm_object_attach_property(&aconnector->base.base,
7009 adev->mode_info.abm_level_property, 0);
7010 }
bb47de73
NK
7011
7012 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7013 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7014 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
7015 drm_object_attach_property(
7016 &aconnector->base.base,
7017 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7018
8c61b31e
JFZ
7019 if (!aconnector->mst_port)
7020 drm_connector_attach_vrr_capable_property(&aconnector->base);
7021
0c8620d6 7022#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7023 if (adev->dm.hdcp_workqueue)
53e108aa 7024 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7025#endif
bb47de73 7026 }
e7b07cee
HW
7027}
7028
7578ecda
AD
7029static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7030 struct i2c_msg *msgs, int num)
e7b07cee
HW
7031{
7032 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7033 struct ddc_service *ddc_service = i2c->ddc_service;
7034 struct i2c_command cmd;
7035 int i;
7036 int result = -EIO;
7037
b830ebc9 7038 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7039
7040 if (!cmd.payloads)
7041 return result;
7042
7043 cmd.number_of_payloads = num;
7044 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7045 cmd.speed = 100;
7046
7047 for (i = 0; i < num; i++) {
7048 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7049 cmd.payloads[i].address = msgs[i].addr;
7050 cmd.payloads[i].length = msgs[i].len;
7051 cmd.payloads[i].data = msgs[i].buf;
7052 }
7053
c85e6e54
DF
7054 if (dc_submit_i2c(
7055 ddc_service->ctx->dc,
7056 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7057 &cmd))
7058 result = num;
7059
7060 kfree(cmd.payloads);
7061 return result;
7062}
7063
7578ecda 7064static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7065{
7066 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7067}
7068
7069static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7070 .master_xfer = amdgpu_dm_i2c_xfer,
7071 .functionality = amdgpu_dm_i2c_func,
7072};
7073
3ee6b26b
AD
7074static struct amdgpu_i2c_adapter *
7075create_i2c(struct ddc_service *ddc_service,
7076 int link_index,
7077 int *res)
e7b07cee
HW
7078{
7079 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7080 struct amdgpu_i2c_adapter *i2c;
7081
b830ebc9 7082 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7083 if (!i2c)
7084 return NULL;
e7b07cee
HW
7085 i2c->base.owner = THIS_MODULE;
7086 i2c->base.class = I2C_CLASS_DDC;
7087 i2c->base.dev.parent = &adev->pdev->dev;
7088 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7089 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7090 i2c_set_adapdata(&i2c->base, i2c);
7091 i2c->ddc_service = ddc_service;
c85e6e54 7092 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7093
7094 return i2c;
7095}
7096
89fc8d4e 7097
1f6010a9
DF
7098/*
7099 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7100 * dc_link which will be represented by this aconnector.
7101 */
7578ecda
AD
7102static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7103 struct amdgpu_dm_connector *aconnector,
7104 uint32_t link_index,
7105 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7106{
7107 int res = 0;
7108 int connector_type;
7109 struct dc *dc = dm->dc;
7110 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7111 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7112
7113 link->priv = aconnector;
e7b07cee 7114
f1ad2f5e 7115 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7116
7117 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7118 if (!i2c) {
7119 DRM_ERROR("Failed to create i2c adapter data\n");
7120 return -ENOMEM;
7121 }
7122
e7b07cee
HW
7123 aconnector->i2c = i2c;
7124 res = i2c_add_adapter(&i2c->base);
7125
7126 if (res) {
7127 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7128 goto out_free;
7129 }
7130
7131 connector_type = to_drm_connector_type(link->connector_signal);
7132
17165de2 7133 res = drm_connector_init_with_ddc(
e7b07cee
HW
7134 dm->ddev,
7135 &aconnector->base,
7136 &amdgpu_dm_connector_funcs,
17165de2
AP
7137 connector_type,
7138 &i2c->base);
e7b07cee
HW
7139
7140 if (res) {
7141 DRM_ERROR("connector_init failed\n");
7142 aconnector->connector_id = -1;
7143 goto out_free;
7144 }
7145
7146 drm_connector_helper_add(
7147 &aconnector->base,
7148 &amdgpu_dm_connector_helper_funcs);
7149
7150 amdgpu_dm_connector_init_helper(
7151 dm,
7152 aconnector,
7153 connector_type,
7154 link,
7155 link_index);
7156
cde4c44d 7157 drm_connector_attach_encoder(
e7b07cee
HW
7158 &aconnector->base, &aencoder->base);
7159
e7b07cee
HW
7160 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7161 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7162 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7163
e7b07cee
HW
7164out_free:
7165 if (res) {
7166 kfree(i2c);
7167 aconnector->i2c = NULL;
7168 }
7169 return res;
7170}
7171
7172int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7173{
7174 switch (adev->mode_info.num_crtc) {
7175 case 1:
7176 return 0x1;
7177 case 2:
7178 return 0x3;
7179 case 3:
7180 return 0x7;
7181 case 4:
7182 return 0xf;
7183 case 5:
7184 return 0x1f;
7185 case 6:
7186 default:
7187 return 0x3f;
7188 }
7189}
7190
7578ecda
AD
7191static int amdgpu_dm_encoder_init(struct drm_device *dev,
7192 struct amdgpu_encoder *aencoder,
7193 uint32_t link_index)
e7b07cee 7194{
1348969a 7195 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7196
7197 int res = drm_encoder_init(dev,
7198 &aencoder->base,
7199 &amdgpu_dm_encoder_funcs,
7200 DRM_MODE_ENCODER_TMDS,
7201 NULL);
7202
7203 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7204
7205 if (!res)
7206 aencoder->encoder_id = link_index;
7207 else
7208 aencoder->encoder_id = -1;
7209
7210 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7211
7212 return res;
7213}
7214
3ee6b26b
AD
7215static void manage_dm_interrupts(struct amdgpu_device *adev,
7216 struct amdgpu_crtc *acrtc,
7217 bool enable)
e7b07cee
HW
7218{
7219 /*
8fe684e9
NK
7220 * We have no guarantee that the frontend index maps to the same
7221 * backend index - some even map to more than one.
7222 *
7223 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7224 */
7225 int irq_type =
734dd01d 7226 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7227 adev,
7228 acrtc->crtc_id);
7229
7230 if (enable) {
7231 drm_crtc_vblank_on(&acrtc->base);
7232 amdgpu_irq_get(
7233 adev,
7234 &adev->pageflip_irq,
7235 irq_type);
7236 } else {
7237
7238 amdgpu_irq_put(
7239 adev,
7240 &adev->pageflip_irq,
7241 irq_type);
7242 drm_crtc_vblank_off(&acrtc->base);
7243 }
7244}
7245
8fe684e9
NK
7246static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7247 struct amdgpu_crtc *acrtc)
7248{
7249 int irq_type =
7250 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7251
7252 /**
7253 * This reads the current state for the IRQ and force reapplies
7254 * the setting to hardware.
7255 */
7256 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7257}
7258
3ee6b26b
AD
7259static bool
7260is_scaling_state_different(const struct dm_connector_state *dm_state,
7261 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7262{
7263 if (dm_state->scaling != old_dm_state->scaling)
7264 return true;
7265 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7266 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7267 return true;
7268 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7269 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7270 return true;
b830ebc9
HW
7271 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7272 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7273 return true;
e7b07cee
HW
7274 return false;
7275}
7276
0c8620d6
BL
7277#ifdef CONFIG_DRM_AMD_DC_HDCP
7278static bool is_content_protection_different(struct drm_connector_state *state,
7279 const struct drm_connector_state *old_state,
7280 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7281{
7282 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7283 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7284
31c0ed90 7285 /* Handle: Type0/1 change */
53e108aa
BL
7286 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7287 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7288 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7289 return true;
7290 }
7291
31c0ed90
BL
7292 /* CP is being re enabled, ignore this
7293 *
7294 * Handles: ENABLED -> DESIRED
7295 */
0c8620d6
BL
7296 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7297 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7298 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7299 return false;
7300 }
7301
31c0ed90
BL
7302 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7303 *
7304 * Handles: UNDESIRED -> ENABLED
7305 */
0c8620d6
BL
7306 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7307 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7308 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7309
7310 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7311 * hot-plug, headless s3, dpms
31c0ed90
BL
7312 *
7313 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7314 */
97f6c917
BL
7315 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7316 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7317 dm_con_state->update_hdcp = false;
0c8620d6 7318 return true;
97f6c917 7319 }
0c8620d6 7320
31c0ed90
BL
7321 /*
7322 * Handles: UNDESIRED -> UNDESIRED
7323 * DESIRED -> DESIRED
7324 * ENABLED -> ENABLED
7325 */
0c8620d6
BL
7326 if (old_state->content_protection == state->content_protection)
7327 return false;
7328
31c0ed90
BL
7329 /*
7330 * Handles: UNDESIRED -> DESIRED
7331 * DESIRED -> UNDESIRED
7332 * ENABLED -> UNDESIRED
7333 */
97f6c917 7334 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7335 return true;
7336
31c0ed90
BL
7337 /*
7338 * Handles: DESIRED -> ENABLED
7339 */
0c8620d6
BL
7340 return false;
7341}
7342
0c8620d6 7343#endif
3ee6b26b
AD
7344static void remove_stream(struct amdgpu_device *adev,
7345 struct amdgpu_crtc *acrtc,
7346 struct dc_stream_state *stream)
e7b07cee
HW
7347{
7348 /* this is the update mode case */
e7b07cee
HW
7349
7350 acrtc->otg_inst = -1;
7351 acrtc->enabled = false;
7352}
7353
7578ecda
AD
7354static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7355 struct dc_cursor_position *position)
2a8f6ccb 7356{
f4c2cc43 7357 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7358 int x, y;
7359 int xorigin = 0, yorigin = 0;
7360
e371e19c
NK
7361 position->enable = false;
7362 position->x = 0;
7363 position->y = 0;
7364
7365 if (!crtc || !plane->state->fb)
2a8f6ccb 7366 return 0;
2a8f6ccb
HW
7367
7368 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7369 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7370 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7371 __func__,
7372 plane->state->crtc_w,
7373 plane->state->crtc_h);
7374 return -EINVAL;
7375 }
7376
7377 x = plane->state->crtc_x;
7378 y = plane->state->crtc_y;
c14a005c 7379
e371e19c
NK
7380 if (x <= -amdgpu_crtc->max_cursor_width ||
7381 y <= -amdgpu_crtc->max_cursor_height)
7382 return 0;
7383
2a8f6ccb
HW
7384 if (x < 0) {
7385 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7386 x = 0;
7387 }
7388 if (y < 0) {
7389 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7390 y = 0;
7391 }
7392 position->enable = true;
d243b6ff 7393 position->translate_by_source = true;
2a8f6ccb
HW
7394 position->x = x;
7395 position->y = y;
7396 position->x_hotspot = xorigin;
7397 position->y_hotspot = yorigin;
7398
7399 return 0;
7400}
7401
3ee6b26b
AD
7402static void handle_cursor_update(struct drm_plane *plane,
7403 struct drm_plane_state *old_plane_state)
e7b07cee 7404{
1348969a 7405 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7406 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7407 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7408 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7409 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7410 uint64_t address = afb ? afb->address : 0;
7411 struct dc_cursor_position position;
7412 struct dc_cursor_attributes attributes;
7413 int ret;
7414
e7b07cee
HW
7415 if (!plane->state->fb && !old_plane_state->fb)
7416 return;
7417
f1ad2f5e 7418 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7419 __func__,
7420 amdgpu_crtc->crtc_id,
7421 plane->state->crtc_w,
7422 plane->state->crtc_h);
2a8f6ccb
HW
7423
7424 ret = get_cursor_position(plane, crtc, &position);
7425 if (ret)
7426 return;
7427
7428 if (!position.enable) {
7429 /* turn off cursor */
674e78ac
NK
7430 if (crtc_state && crtc_state->stream) {
7431 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7432 dc_stream_set_cursor_position(crtc_state->stream,
7433 &position);
674e78ac
NK
7434 mutex_unlock(&adev->dm.dc_lock);
7435 }
2a8f6ccb 7436 return;
e7b07cee 7437 }
e7b07cee 7438
2a8f6ccb
HW
7439 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7440 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7441
c1cefe11 7442 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7443 attributes.address.high_part = upper_32_bits(address);
7444 attributes.address.low_part = lower_32_bits(address);
7445 attributes.width = plane->state->crtc_w;
7446 attributes.height = plane->state->crtc_h;
7447 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7448 attributes.rotation_angle = 0;
7449 attributes.attribute_flags.value = 0;
7450
03a66367 7451 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 7452
886daac9 7453 if (crtc_state->stream) {
674e78ac 7454 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7455 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7456 &attributes))
7457 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7458
2a8f6ccb
HW
7459 if (!dc_stream_set_cursor_position(crtc_state->stream,
7460 &position))
7461 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7462 mutex_unlock(&adev->dm.dc_lock);
886daac9 7463 }
2a8f6ccb 7464}
e7b07cee
HW
7465
7466static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7467{
7468
7469 assert_spin_locked(&acrtc->base.dev->event_lock);
7470 WARN_ON(acrtc->event);
7471
7472 acrtc->event = acrtc->base.state->event;
7473
7474 /* Set the flip status */
7475 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7476
7477 /* Mark this event as consumed */
7478 acrtc->base.state->event = NULL;
7479
7480 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7481 acrtc->crtc_id);
7482}
7483
bb47de73
NK
7484static void update_freesync_state_on_stream(
7485 struct amdgpu_display_manager *dm,
7486 struct dm_crtc_state *new_crtc_state,
180db303
NK
7487 struct dc_stream_state *new_stream,
7488 struct dc_plane_state *surface,
7489 u32 flip_timestamp_in_us)
bb47de73 7490{
09aef2c4 7491 struct mod_vrr_params vrr_params;
bb47de73 7492 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7493 struct amdgpu_device *adev = dm->adev;
585d450c 7494 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7495 unsigned long flags;
bb47de73
NK
7496
7497 if (!new_stream)
7498 return;
7499
7500 /*
7501 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7502 * For now it's sufficient to just guard against these conditions.
7503 */
7504
7505 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7506 return;
7507
4a580877 7508 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7509 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7510
180db303
NK
7511 if (surface) {
7512 mod_freesync_handle_preflip(
7513 dm->freesync_module,
7514 surface,
7515 new_stream,
7516 flip_timestamp_in_us,
7517 &vrr_params);
09aef2c4
MK
7518
7519 if (adev->family < AMDGPU_FAMILY_AI &&
7520 amdgpu_dm_vrr_active(new_crtc_state)) {
7521 mod_freesync_handle_v_update(dm->freesync_module,
7522 new_stream, &vrr_params);
e63e2491
EB
7523
7524 /* Need to call this before the frame ends. */
7525 dc_stream_adjust_vmin_vmax(dm->dc,
7526 new_crtc_state->stream,
7527 &vrr_params.adjust);
09aef2c4 7528 }
180db303 7529 }
bb47de73
NK
7530
7531 mod_freesync_build_vrr_infopacket(
7532 dm->freesync_module,
7533 new_stream,
180db303 7534 &vrr_params,
ecd0136b
HT
7535 PACKET_TYPE_VRR,
7536 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7537 &vrr_infopacket);
7538
8a48b44c 7539 new_crtc_state->freesync_timing_changed |=
585d450c 7540 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7541 &vrr_params.adjust,
7542 sizeof(vrr_params.adjust)) != 0);
bb47de73 7543
8a48b44c 7544 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7545 (memcmp(&new_crtc_state->vrr_infopacket,
7546 &vrr_infopacket,
7547 sizeof(vrr_infopacket)) != 0);
7548
585d450c 7549 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7550 new_crtc_state->vrr_infopacket = vrr_infopacket;
7551
585d450c 7552 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7553 new_stream->vrr_infopacket = vrr_infopacket;
7554
7555 if (new_crtc_state->freesync_vrr_info_changed)
7556 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7557 new_crtc_state->base.crtc->base.id,
7558 (int)new_crtc_state->base.vrr_enabled,
180db303 7559 (int)vrr_params.state);
09aef2c4 7560
4a580877 7561 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7562}
7563
585d450c 7564static void update_stream_irq_parameters(
e854194c
MK
7565 struct amdgpu_display_manager *dm,
7566 struct dm_crtc_state *new_crtc_state)
7567{
7568 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7569 struct mod_vrr_params vrr_params;
e854194c 7570 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7571 struct amdgpu_device *adev = dm->adev;
585d450c 7572 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7573 unsigned long flags;
e854194c
MK
7574
7575 if (!new_stream)
7576 return;
7577
7578 /*
7579 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7580 * For now it's sufficient to just guard against these conditions.
7581 */
7582 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7583 return;
7584
4a580877 7585 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7586 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7587
e854194c
MK
7588 if (new_crtc_state->vrr_supported &&
7589 config.min_refresh_in_uhz &&
7590 config.max_refresh_in_uhz) {
7591 config.state = new_crtc_state->base.vrr_enabled ?
7592 VRR_STATE_ACTIVE_VARIABLE :
7593 VRR_STATE_INACTIVE;
7594 } else {
7595 config.state = VRR_STATE_UNSUPPORTED;
7596 }
7597
7598 mod_freesync_build_vrr_params(dm->freesync_module,
7599 new_stream,
7600 &config, &vrr_params);
7601
7602 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7603 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7604 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7605
585d450c
AP
7606 new_crtc_state->freesync_config = config;
7607 /* Copy state for access from DM IRQ handler */
7608 acrtc->dm_irq_params.freesync_config = config;
7609 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7610 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7611 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7612}
7613
66b0c973
MK
7614static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7615 struct dm_crtc_state *new_state)
7616{
7617 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7618 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7619
7620 if (!old_vrr_active && new_vrr_active) {
7621 /* Transition VRR inactive -> active:
7622 * While VRR is active, we must not disable vblank irq, as a
7623 * reenable after disable would compute bogus vblank/pflip
7624 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7625 *
7626 * We also need vupdate irq for the actual core vblank handling
7627 * at end of vblank.
66b0c973 7628 */
d2574c33 7629 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7630 drm_crtc_vblank_get(new_state->base.crtc);
7631 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7632 __func__, new_state->base.crtc->base.id);
7633 } else if (old_vrr_active && !new_vrr_active) {
7634 /* Transition VRR active -> inactive:
7635 * Allow vblank irq disable again for fixed refresh rate.
7636 */
d2574c33 7637 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7638 drm_crtc_vblank_put(new_state->base.crtc);
7639 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7640 __func__, new_state->base.crtc->base.id);
7641 }
7642}
7643
8ad27806
NK
7644static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7645{
7646 struct drm_plane *plane;
7647 struct drm_plane_state *old_plane_state, *new_plane_state;
7648 int i;
7649
7650 /*
7651 * TODO: Make this per-stream so we don't issue redundant updates for
7652 * commits with multiple streams.
7653 */
7654 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7655 new_plane_state, i)
7656 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7657 handle_cursor_update(plane, old_plane_state);
7658}
7659
3be5262e 7660static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7661 struct dc_state *dc_state,
3ee6b26b
AD
7662 struct drm_device *dev,
7663 struct amdgpu_display_manager *dm,
7664 struct drm_crtc *pcrtc,
420cd472 7665 bool wait_for_vblank)
e7b07cee 7666{
efc8278e 7667 uint32_t i;
8a48b44c 7668 uint64_t timestamp_ns;
e7b07cee 7669 struct drm_plane *plane;
0bc9706d 7670 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7671 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7672 struct drm_crtc_state *new_pcrtc_state =
7673 drm_atomic_get_new_crtc_state(state, pcrtc);
7674 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7675 struct dm_crtc_state *dm_old_crtc_state =
7676 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7677 int planes_count = 0, vpos, hpos;
570c91d5 7678 long r;
e7b07cee 7679 unsigned long flags;
8a48b44c 7680 struct amdgpu_bo *abo;
fdd1fe57
MK
7681 uint32_t target_vblank, last_flip_vblank;
7682 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7683 bool pflip_present = false;
bc7f670e
DF
7684 struct {
7685 struct dc_surface_update surface_updates[MAX_SURFACES];
7686 struct dc_plane_info plane_infos[MAX_SURFACES];
7687 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7688 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7689 struct dc_stream_update stream_update;
74aa7bd4 7690 } *bundle;
bc7f670e 7691
74aa7bd4 7692 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7693
74aa7bd4
DF
7694 if (!bundle) {
7695 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7696 goto cleanup;
7697 }
e7b07cee 7698
8ad27806
NK
7699 /*
7700 * Disable the cursor first if we're disabling all the planes.
7701 * It'll remain on the screen after the planes are re-enabled
7702 * if we don't.
7703 */
7704 if (acrtc_state->active_planes == 0)
7705 amdgpu_dm_commit_cursors(state);
7706
e7b07cee 7707 /* update planes when needed */
efc8278e 7708 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 7709 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7710 struct drm_crtc_state *new_crtc_state;
0bc9706d 7711 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7712 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7713 bool plane_needs_flip;
c7af5f77 7714 struct dc_plane_state *dc_plane;
54d76575 7715 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7716
80c218d5
NK
7717 /* Cursor plane is handled after stream updates */
7718 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7719 continue;
e7b07cee 7720
f5ba60fe
DD
7721 if (!fb || !crtc || pcrtc != crtc)
7722 continue;
7723
7724 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7725 if (!new_crtc_state->active)
e7b07cee
HW
7726 continue;
7727
bc7f670e 7728 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7729
74aa7bd4 7730 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7731 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7732 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7733 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7734 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7735 }
8a48b44c 7736
695af5f9
NK
7737 fill_dc_scaling_info(new_plane_state,
7738 &bundle->scaling_infos[planes_count]);
8a48b44c 7739
695af5f9
NK
7740 bundle->surface_updates[planes_count].scaling_info =
7741 &bundle->scaling_infos[planes_count];
8a48b44c 7742
f5031000 7743 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7744
f5031000 7745 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7746
f5031000
DF
7747 if (!plane_needs_flip) {
7748 planes_count += 1;
7749 continue;
7750 }
8a48b44c 7751
2fac0f53
CK
7752 abo = gem_to_amdgpu_bo(fb->obj[0]);
7753
f8308898
AG
7754 /*
7755 * Wait for all fences on this FB. Do limited wait to avoid
7756 * deadlock during GPU reset when this fence will not signal
7757 * but we hold reservation lock for the BO.
7758 */
52791eee 7759 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7760 false,
f8308898
AG
7761 msecs_to_jiffies(5000));
7762 if (unlikely(r <= 0))
ed8a5fb2 7763 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7764
695af5f9 7765 fill_dc_plane_info_and_addr(
8ce5d842 7766 dm->adev, new_plane_state,
6eed95b0 7767 afb->tiling_flags,
695af5f9 7768 &bundle->plane_infos[planes_count],
87b7ebc2 7769 &bundle->flip_addrs[planes_count].address,
6eed95b0 7770 afb->tmz_surface, false);
87b7ebc2
RS
7771
7772 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7773 new_plane_state->plane->index,
7774 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7775
7776 bundle->surface_updates[planes_count].plane_info =
7777 &bundle->plane_infos[planes_count];
8a48b44c 7778
caff0e66
NK
7779 /*
7780 * Only allow immediate flips for fast updates that don't
7781 * change FB pitch, DCC state, rotation or mirroing.
7782 */
f5031000 7783 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7784 crtc->state->async_flip &&
caff0e66 7785 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7786
f5031000
DF
7787 timestamp_ns = ktime_get_ns();
7788 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7789 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7790 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7791
f5031000
DF
7792 if (!bundle->surface_updates[planes_count].surface) {
7793 DRM_ERROR("No surface for CRTC: id=%d\n",
7794 acrtc_attach->crtc_id);
7795 continue;
bc7f670e
DF
7796 }
7797
f5031000
DF
7798 if (plane == pcrtc->primary)
7799 update_freesync_state_on_stream(
7800 dm,
7801 acrtc_state,
7802 acrtc_state->stream,
7803 dc_plane,
7804 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7805
f5031000
DF
7806 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7807 __func__,
7808 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7809 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7810
7811 planes_count += 1;
7812
8a48b44c
DF
7813 }
7814
74aa7bd4 7815 if (pflip_present) {
634092b1
MK
7816 if (!vrr_active) {
7817 /* Use old throttling in non-vrr fixed refresh rate mode
7818 * to keep flip scheduling based on target vblank counts
7819 * working in a backwards compatible way, e.g., for
7820 * clients using the GLX_OML_sync_control extension or
7821 * DRI3/Present extension with defined target_msc.
7822 */
e3eff4b5 7823 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7824 }
7825 else {
7826 /* For variable refresh rate mode only:
7827 * Get vblank of last completed flip to avoid > 1 vrr
7828 * flips per video frame by use of throttling, but allow
7829 * flip programming anywhere in the possibly large
7830 * variable vrr vblank interval for fine-grained flip
7831 * timing control and more opportunity to avoid stutter
7832 * on late submission of flips.
7833 */
7834 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7835 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7836 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7837 }
7838
fdd1fe57 7839 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7840
7841 /*
7842 * Wait until we're out of the vertical blank period before the one
7843 * targeted by the flip
7844 */
7845 while ((acrtc_attach->enabled &&
7846 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7847 0, &vpos, &hpos, NULL,
7848 NULL, &pcrtc->hwmode)
7849 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7850 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7851 (int)(target_vblank -
e3eff4b5 7852 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7853 usleep_range(1000, 1100);
7854 }
7855
8fe684e9
NK
7856 /**
7857 * Prepare the flip event for the pageflip interrupt to handle.
7858 *
7859 * This only works in the case where we've already turned on the
7860 * appropriate hardware blocks (eg. HUBP) so in the transition case
7861 * from 0 -> n planes we have to skip a hardware generated event
7862 * and rely on sending it from software.
7863 */
7864 if (acrtc_attach->base.state->event &&
7865 acrtc_state->active_planes > 0) {
8a48b44c
DF
7866 drm_crtc_vblank_get(pcrtc);
7867
7868 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7869
7870 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7871 prepare_flip_isr(acrtc_attach);
7872
7873 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7874 }
7875
7876 if (acrtc_state->stream) {
8a48b44c 7877 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7878 bundle->stream_update.vrr_infopacket =
8a48b44c 7879 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7880 }
e7b07cee
HW
7881 }
7882
bc92c065 7883 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7884 if ((planes_count || acrtc_state->active_planes == 0) &&
7885 acrtc_state->stream) {
b6e881c9 7886 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7887 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7888 bundle->stream_update.src = acrtc_state->stream->src;
7889 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7890 }
7891
cf020d49
NK
7892 if (new_pcrtc_state->color_mgmt_changed) {
7893 /*
7894 * TODO: This isn't fully correct since we've actually
7895 * already modified the stream in place.
7896 */
7897 bundle->stream_update.gamut_remap =
7898 &acrtc_state->stream->gamut_remap_matrix;
7899 bundle->stream_update.output_csc_transform =
7900 &acrtc_state->stream->csc_color_matrix;
7901 bundle->stream_update.out_transfer_func =
7902 acrtc_state->stream->out_transfer_func;
7903 }
bc7f670e 7904
8a48b44c 7905 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7906 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7907 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7908
e63e2491
EB
7909 /*
7910 * If FreeSync state on the stream has changed then we need to
7911 * re-adjust the min/max bounds now that DC doesn't handle this
7912 * as part of commit.
7913 */
7914 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7915 amdgpu_dm_vrr_active(acrtc_state)) {
7916 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7917 dc_stream_adjust_vmin_vmax(
7918 dm->dc, acrtc_state->stream,
585d450c 7919 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7920 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7921 }
bc7f670e 7922 mutex_lock(&dm->dc_lock);
8c322309 7923 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7924 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7925 amdgpu_dm_psr_disable(acrtc_state->stream);
7926
bc7f670e 7927 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7928 bundle->surface_updates,
bc7f670e
DF
7929 planes_count,
7930 acrtc_state->stream,
efc8278e
AJ
7931 &bundle->stream_update,
7932 dc_state);
8c322309 7933
8fe684e9
NK
7934 /**
7935 * Enable or disable the interrupts on the backend.
7936 *
7937 * Most pipes are put into power gating when unused.
7938 *
7939 * When power gating is enabled on a pipe we lose the
7940 * interrupt enablement state when power gating is disabled.
7941 *
7942 * So we need to update the IRQ control state in hardware
7943 * whenever the pipe turns on (since it could be previously
7944 * power gated) or off (since some pipes can't be power gated
7945 * on some ASICs).
7946 */
7947 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
7948 dm_update_pflip_irq_state(drm_to_adev(dev),
7949 acrtc_attach);
8fe684e9 7950
8c322309 7951 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7952 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7953 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7954 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7955 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7956 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7957 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7958 amdgpu_dm_psr_enable(acrtc_state->stream);
7959 }
7960
bc7f670e 7961 mutex_unlock(&dm->dc_lock);
e7b07cee 7962 }
4b510503 7963
8ad27806
NK
7964 /*
7965 * Update cursor state *after* programming all the planes.
7966 * This avoids redundant programming in the case where we're going
7967 * to be disabling a single plane - those pipes are being disabled.
7968 */
7969 if (acrtc_state->active_planes)
7970 amdgpu_dm_commit_cursors(state);
80c218d5 7971
4b510503 7972cleanup:
74aa7bd4 7973 kfree(bundle);
e7b07cee
HW
7974}
7975
6ce8f316
NK
7976static void amdgpu_dm_commit_audio(struct drm_device *dev,
7977 struct drm_atomic_state *state)
7978{
1348969a 7979 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
7980 struct amdgpu_dm_connector *aconnector;
7981 struct drm_connector *connector;
7982 struct drm_connector_state *old_con_state, *new_con_state;
7983 struct drm_crtc_state *new_crtc_state;
7984 struct dm_crtc_state *new_dm_crtc_state;
7985 const struct dc_stream_status *status;
7986 int i, inst;
7987
7988 /* Notify device removals. */
7989 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7990 if (old_con_state->crtc != new_con_state->crtc) {
7991 /* CRTC changes require notification. */
7992 goto notify;
7993 }
7994
7995 if (!new_con_state->crtc)
7996 continue;
7997
7998 new_crtc_state = drm_atomic_get_new_crtc_state(
7999 state, new_con_state->crtc);
8000
8001 if (!new_crtc_state)
8002 continue;
8003
8004 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8005 continue;
8006
8007 notify:
8008 aconnector = to_amdgpu_dm_connector(connector);
8009
8010 mutex_lock(&adev->dm.audio_lock);
8011 inst = aconnector->audio_inst;
8012 aconnector->audio_inst = -1;
8013 mutex_unlock(&adev->dm.audio_lock);
8014
8015 amdgpu_dm_audio_eld_notify(adev, inst);
8016 }
8017
8018 /* Notify audio device additions. */
8019 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8020 if (!new_con_state->crtc)
8021 continue;
8022
8023 new_crtc_state = drm_atomic_get_new_crtc_state(
8024 state, new_con_state->crtc);
8025
8026 if (!new_crtc_state)
8027 continue;
8028
8029 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8030 continue;
8031
8032 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8033 if (!new_dm_crtc_state->stream)
8034 continue;
8035
8036 status = dc_stream_get_status(new_dm_crtc_state->stream);
8037 if (!status)
8038 continue;
8039
8040 aconnector = to_amdgpu_dm_connector(connector);
8041
8042 mutex_lock(&adev->dm.audio_lock);
8043 inst = status->audio_inst;
8044 aconnector->audio_inst = inst;
8045 mutex_unlock(&adev->dm.audio_lock);
8046
8047 amdgpu_dm_audio_eld_notify(adev, inst);
8048 }
8049}
8050
1f6010a9 8051/*
27b3f4fc
LSL
8052 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8053 * @crtc_state: the DRM CRTC state
8054 * @stream_state: the DC stream state.
8055 *
8056 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8057 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8058 */
8059static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8060 struct dc_stream_state *stream_state)
8061{
b9952f93 8062 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8063}
e7b07cee 8064
b8592b48
LL
8065/**
8066 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8067 * @state: The atomic state to commit
8068 *
8069 * This will tell DC to commit the constructed DC state from atomic_check,
8070 * programming the hardware. Any failures here implies a hardware failure, since
8071 * atomic check should have filtered anything non-kosher.
8072 */
7578ecda 8073static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8074{
8075 struct drm_device *dev = state->dev;
1348969a 8076 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8077 struct amdgpu_display_manager *dm = &adev->dm;
8078 struct dm_atomic_state *dm_state;
eb3dc897 8079 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8080 uint32_t i, j;
5cc6dcbd 8081 struct drm_crtc *crtc;
0bc9706d 8082 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8083 unsigned long flags;
8084 bool wait_for_vblank = true;
8085 struct drm_connector *connector;
c2cea706 8086 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8087 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8088 int crtc_disable_count = 0;
6ee90e88 8089 bool mode_set_reset_required = false;
e7b07cee 8090
e8a98235
RS
8091 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8092
e7b07cee
HW
8093 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8094
eb3dc897
NK
8095 dm_state = dm_atomic_get_new_state(state);
8096 if (dm_state && dm_state->context) {
8097 dc_state = dm_state->context;
8098 } else {
8099 /* No state changes, retain current state. */
813d20dc 8100 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8101 ASSERT(dc_state_temp);
8102 dc_state = dc_state_temp;
8103 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8104 }
e7b07cee 8105
6d90a208
AP
8106 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8107 new_crtc_state, i) {
8108 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8109
8110 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8111
8112 if (old_crtc_state->active &&
8113 (!new_crtc_state->active ||
8114 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8115 manage_dm_interrupts(adev, acrtc, false);
8116 dc_stream_release(dm_old_crtc_state->stream);
8117 }
8118 }
8119
8976f73b
RS
8120 drm_atomic_helper_calc_timestamping_constants(state);
8121
e7b07cee 8122 /* update changed items */
0bc9706d 8123 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8124 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8125
54d76575
LSL
8126 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8127 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8128
f1ad2f5e 8129 DRM_DEBUG_DRIVER(
e7b07cee
HW
8130 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8131 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8132 "connectors_changed:%d\n",
8133 acrtc->crtc_id,
0bc9706d
LSL
8134 new_crtc_state->enable,
8135 new_crtc_state->active,
8136 new_crtc_state->planes_changed,
8137 new_crtc_state->mode_changed,
8138 new_crtc_state->active_changed,
8139 new_crtc_state->connectors_changed);
e7b07cee 8140
5c68c652
VL
8141 /* Disable cursor if disabling crtc */
8142 if (old_crtc_state->active && !new_crtc_state->active) {
8143 struct dc_cursor_position position;
8144
8145 memset(&position, 0, sizeof(position));
8146 mutex_lock(&dm->dc_lock);
8147 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8148 mutex_unlock(&dm->dc_lock);
8149 }
8150
27b3f4fc
LSL
8151 /* Copy all transient state flags into dc state */
8152 if (dm_new_crtc_state->stream) {
8153 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8154 dm_new_crtc_state->stream);
8155 }
8156
e7b07cee
HW
8157 /* handles headless hotplug case, updating new_state and
8158 * aconnector as needed
8159 */
8160
54d76575 8161 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8162
f1ad2f5e 8163 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8164
54d76575 8165 if (!dm_new_crtc_state->stream) {
e7b07cee 8166 /*
b830ebc9
HW
8167 * this could happen because of issues with
8168 * userspace notifications delivery.
8169 * In this case userspace tries to set mode on
1f6010a9
DF
8170 * display which is disconnected in fact.
8171 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8172 * We expect reset mode will come soon.
8173 *
8174 * This can also happen when unplug is done
8175 * during resume sequence ended
8176 *
8177 * In this case, we want to pretend we still
8178 * have a sink to keep the pipe running so that
8179 * hw state is consistent with the sw state
8180 */
f1ad2f5e 8181 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8182 __func__, acrtc->base.base.id);
8183 continue;
8184 }
8185
54d76575
LSL
8186 if (dm_old_crtc_state->stream)
8187 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8188
97028037
LP
8189 pm_runtime_get_noresume(dev->dev);
8190
e7b07cee 8191 acrtc->enabled = true;
0bc9706d
LSL
8192 acrtc->hw_mode = new_crtc_state->mode;
8193 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8194 mode_set_reset_required = true;
0bc9706d 8195 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8196 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8197 /* i.e. reset mode */
6ee90e88 8198 if (dm_old_crtc_state->stream)
54d76575 8199 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8200 mode_set_reset_required = true;
e7b07cee
HW
8201 }
8202 } /* for_each_crtc_in_state() */
8203
eb3dc897 8204 if (dc_state) {
6ee90e88 8205 /* if there mode set or reset, disable eDP PSR */
8206 if (mode_set_reset_required)
8207 amdgpu_dm_psr_disable_all(dm);
8208
eb3dc897 8209 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8210 mutex_lock(&dm->dc_lock);
eb3dc897 8211 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8212 mutex_unlock(&dm->dc_lock);
fa2123db 8213 }
e7b07cee 8214
0bc9706d 8215 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8216 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8217
54d76575 8218 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8219
54d76575 8220 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8221 const struct dc_stream_status *status =
54d76575 8222 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8223
eb3dc897 8224 if (!status)
09f609c3
LL
8225 status = dc_stream_get_status_from_state(dc_state,
8226 dm_new_crtc_state->stream);
e7b07cee 8227 if (!status)
54d76575 8228 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8229 else
8230 acrtc->otg_inst = status->primary_otg_inst;
8231 }
8232 }
0c8620d6
BL
8233#ifdef CONFIG_DRM_AMD_DC_HDCP
8234 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8235 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8236 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8237 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8238
8239 new_crtc_state = NULL;
8240
8241 if (acrtc)
8242 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8243
8244 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8245
8246 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8247 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8248 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8249 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8250 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8251 continue;
8252 }
8253
8254 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8255 hdcp_update_display(
8256 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8257 new_con_state->hdcp_content_type,
b1abe558
BL
8258 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8259 : false);
0c8620d6
BL
8260 }
8261#endif
e7b07cee 8262
02d6a6fc 8263 /* Handle connector state changes */
c2cea706 8264 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8265 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8266 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8267 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8268 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8269 struct dc_stream_update stream_update;
b232d4ed 8270 struct dc_info_packet hdr_packet;
e7b07cee 8271 struct dc_stream_status *status = NULL;
b232d4ed 8272 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8273
efc8278e 8274 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8275 memset(&stream_update, 0, sizeof(stream_update));
8276
44d09c6a 8277 if (acrtc) {
0bc9706d 8278 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8279 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8280 }
0bc9706d 8281
e7b07cee 8282 /* Skip any modesets/resets */
0bc9706d 8283 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8284 continue;
8285
54d76575 8286 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8287 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8288
b232d4ed
NK
8289 scaling_changed = is_scaling_state_different(dm_new_con_state,
8290 dm_old_con_state);
8291
8292 abm_changed = dm_new_crtc_state->abm_level !=
8293 dm_old_crtc_state->abm_level;
8294
8295 hdr_changed =
8296 is_hdr_metadata_different(old_con_state, new_con_state);
8297
8298 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8299 continue;
e7b07cee 8300
b6e881c9 8301 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8302 if (scaling_changed) {
02d6a6fc 8303 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8304 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8305
02d6a6fc
DF
8306 stream_update.src = dm_new_crtc_state->stream->src;
8307 stream_update.dst = dm_new_crtc_state->stream->dst;
8308 }
8309
b232d4ed 8310 if (abm_changed) {
02d6a6fc
DF
8311 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8312
8313 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8314 }
70e8ffc5 8315
b232d4ed
NK
8316 if (hdr_changed) {
8317 fill_hdr_info_packet(new_con_state, &hdr_packet);
8318 stream_update.hdr_static_metadata = &hdr_packet;
8319 }
8320
54d76575 8321 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8322 WARN_ON(!status);
3be5262e 8323 WARN_ON(!status->plane_count);
e7b07cee 8324
02d6a6fc
DF
8325 /*
8326 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8327 * Here we create an empty update on each plane.
8328 * To fix this, DC should permit updating only stream properties.
8329 */
8330 for (j = 0; j < status->plane_count; j++)
efc8278e 8331 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8332
8333
8334 mutex_lock(&dm->dc_lock);
8335 dc_commit_updates_for_stream(dm->dc,
efc8278e 8336 dummy_updates,
02d6a6fc
DF
8337 status->plane_count,
8338 dm_new_crtc_state->stream,
efc8278e
AJ
8339 &stream_update,
8340 dc_state);
02d6a6fc 8341 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8342 }
8343
b5e83f6f 8344 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8345 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8346 new_crtc_state, i) {
fe2a1965
LP
8347 if (old_crtc_state->active && !new_crtc_state->active)
8348 crtc_disable_count++;
8349
54d76575 8350 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8351 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8352
585d450c
AP
8353 /* For freesync config update on crtc state and params for irq */
8354 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8355
66b0c973
MK
8356 /* Handle vrr on->off / off->on transitions */
8357 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8358 dm_new_crtc_state);
e7b07cee
HW
8359 }
8360
8fe684e9
NK
8361 /**
8362 * Enable interrupts for CRTCs that are newly enabled or went through
8363 * a modeset. It was intentionally deferred until after the front end
8364 * state was modified to wait until the OTG was on and so the IRQ
8365 * handlers didn't access stale or invalid state.
8366 */
8367 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8368 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8369
585d450c
AP
8370 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8371
8fe684e9
NK
8372 if (new_crtc_state->active &&
8373 (!old_crtc_state->active ||
8374 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8375 dc_stream_retain(dm_new_crtc_state->stream);
8376 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8377 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 8378
24eb9374 8379#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
8380 /**
8381 * Frontend may have changed so reapply the CRC capture
8382 * settings for the stream.
8383 */
8384 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8385
e2881d6d 8386 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
c920888c 8387 amdgpu_dm_crtc_configure_crc_source(
e2881d6d
RS
8388 crtc, dm_new_crtc_state,
8389 dm_new_crtc_state->crc_src);
8390 }
24eb9374 8391#endif
8fe684e9
NK
8392 }
8393 }
e7b07cee 8394
420cd472 8395 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8396 if (new_crtc_state->async_flip)
420cd472
DF
8397 wait_for_vblank = false;
8398
e7b07cee 8399 /* update planes when needed per crtc*/
5cc6dcbd 8400 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8401 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8402
54d76575 8403 if (dm_new_crtc_state->stream)
eb3dc897 8404 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8405 dm, crtc, wait_for_vblank);
e7b07cee
HW
8406 }
8407
6ce8f316
NK
8408 /* Update audio instances for each connector. */
8409 amdgpu_dm_commit_audio(dev, state);
8410
e7b07cee
HW
8411 /*
8412 * send vblank event on all events not handled in flip and
8413 * mark consumed event for drm_atomic_helper_commit_hw_done
8414 */
4a580877 8415 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8416 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8417
0bc9706d
LSL
8418 if (new_crtc_state->event)
8419 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8420
0bc9706d 8421 new_crtc_state->event = NULL;
e7b07cee 8422 }
4a580877 8423 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8424
29c8f234
LL
8425 /* Signal HW programming completion */
8426 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8427
8428 if (wait_for_vblank)
320a1274 8429 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8430
8431 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8432
5f6fab24
AD
8433 /* return the stolen vga memory back to VRAM */
8434 if (!adev->mman.keep_stolen_vga_memory)
8435 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8436 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8437
1f6010a9
DF
8438 /*
8439 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8440 * so we can put the GPU into runtime suspend if we're not driving any
8441 * displays anymore
8442 */
fe2a1965
LP
8443 for (i = 0; i < crtc_disable_count; i++)
8444 pm_runtime_put_autosuspend(dev->dev);
97028037 8445 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8446
8447 if (dc_state_temp)
8448 dc_release_state(dc_state_temp);
e7b07cee
HW
8449}
8450
8451
8452static int dm_force_atomic_commit(struct drm_connector *connector)
8453{
8454 int ret = 0;
8455 struct drm_device *ddev = connector->dev;
8456 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8457 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8458 struct drm_plane *plane = disconnected_acrtc->base.primary;
8459 struct drm_connector_state *conn_state;
8460 struct drm_crtc_state *crtc_state;
8461 struct drm_plane_state *plane_state;
8462
8463 if (!state)
8464 return -ENOMEM;
8465
8466 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8467
8468 /* Construct an atomic state to restore previous display setting */
8469
8470 /*
8471 * Attach connectors to drm_atomic_state
8472 */
8473 conn_state = drm_atomic_get_connector_state(state, connector);
8474
8475 ret = PTR_ERR_OR_ZERO(conn_state);
8476 if (ret)
2dc39051 8477 goto out;
e7b07cee
HW
8478
8479 /* Attach crtc to drm_atomic_state*/
8480 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8481
8482 ret = PTR_ERR_OR_ZERO(crtc_state);
8483 if (ret)
2dc39051 8484 goto out;
e7b07cee
HW
8485
8486 /* force a restore */
8487 crtc_state->mode_changed = true;
8488
8489 /* Attach plane to drm_atomic_state */
8490 plane_state = drm_atomic_get_plane_state(state, plane);
8491
8492 ret = PTR_ERR_OR_ZERO(plane_state);
8493 if (ret)
2dc39051 8494 goto out;
e7b07cee
HW
8495
8496 /* Call commit internally with the state we just constructed */
8497 ret = drm_atomic_commit(state);
e7b07cee 8498
2dc39051 8499out:
e7b07cee 8500 drm_atomic_state_put(state);
2dc39051
VL
8501 if (ret)
8502 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
8503
8504 return ret;
8505}
8506
8507/*
1f6010a9
DF
8508 * This function handles all cases when set mode does not come upon hotplug.
8509 * This includes when a display is unplugged then plugged back into the
8510 * same port and when running without usermode desktop manager supprot
e7b07cee 8511 */
3ee6b26b
AD
8512void dm_restore_drm_connector_state(struct drm_device *dev,
8513 struct drm_connector *connector)
e7b07cee 8514{
c84dec2f 8515 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8516 struct amdgpu_crtc *disconnected_acrtc;
8517 struct dm_crtc_state *acrtc_state;
8518
8519 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8520 return;
8521
8522 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8523 if (!disconnected_acrtc)
8524 return;
e7b07cee 8525
70e8ffc5
HW
8526 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8527 if (!acrtc_state->stream)
e7b07cee
HW
8528 return;
8529
8530 /*
8531 * If the previous sink is not released and different from the current,
8532 * we deduce we are in a state where we can not rely on usermode call
8533 * to turn on the display, so we do it here
8534 */
8535 if (acrtc_state->stream->sink != aconnector->dc_sink)
8536 dm_force_atomic_commit(&aconnector->base);
8537}
8538
1f6010a9 8539/*
e7b07cee
HW
8540 * Grabs all modesetting locks to serialize against any blocking commits,
8541 * Waits for completion of all non blocking commits.
8542 */
3ee6b26b
AD
8543static int do_aquire_global_lock(struct drm_device *dev,
8544 struct drm_atomic_state *state)
e7b07cee
HW
8545{
8546 struct drm_crtc *crtc;
8547 struct drm_crtc_commit *commit;
8548 long ret;
8549
1f6010a9
DF
8550 /*
8551 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8552 * ensure that when the framework release it the
8553 * extra locks we are locking here will get released to
8554 */
8555 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8556 if (ret)
8557 return ret;
8558
8559 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8560 spin_lock(&crtc->commit_lock);
8561 commit = list_first_entry_or_null(&crtc->commit_list,
8562 struct drm_crtc_commit, commit_entry);
8563 if (commit)
8564 drm_crtc_commit_get(commit);
8565 spin_unlock(&crtc->commit_lock);
8566
8567 if (!commit)
8568 continue;
8569
1f6010a9
DF
8570 /*
8571 * Make sure all pending HW programming completed and
e7b07cee
HW
8572 * page flips done
8573 */
8574 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8575
8576 if (ret > 0)
8577 ret = wait_for_completion_interruptible_timeout(
8578 &commit->flip_done, 10*HZ);
8579
8580 if (ret == 0)
8581 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8582 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8583
8584 drm_crtc_commit_put(commit);
8585 }
8586
8587 return ret < 0 ? ret : 0;
8588}
8589
bb47de73
NK
8590static void get_freesync_config_for_crtc(
8591 struct dm_crtc_state *new_crtc_state,
8592 struct dm_connector_state *new_con_state)
98e6436d
AK
8593{
8594 struct mod_freesync_config config = {0};
98e6436d
AK
8595 struct amdgpu_dm_connector *aconnector =
8596 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8597 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8598 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8599
a057ec46 8600 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8601 vrefresh >= aconnector->min_vfreq &&
8602 vrefresh <= aconnector->max_vfreq;
bb47de73 8603
a057ec46
IB
8604 if (new_crtc_state->vrr_supported) {
8605 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8606 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8607 VRR_STATE_ACTIVE_VARIABLE :
8608 VRR_STATE_INACTIVE;
8609 config.min_refresh_in_uhz =
8610 aconnector->min_vfreq * 1000000;
8611 config.max_refresh_in_uhz =
8612 aconnector->max_vfreq * 1000000;
69ff8845 8613 config.vsif_supported = true;
180db303 8614 config.btr = true;
98e6436d
AK
8615 }
8616
bb47de73
NK
8617 new_crtc_state->freesync_config = config;
8618}
98e6436d 8619
bb47de73
NK
8620static void reset_freesync_config_for_crtc(
8621 struct dm_crtc_state *new_crtc_state)
8622{
8623 new_crtc_state->vrr_supported = false;
98e6436d 8624
bb47de73
NK
8625 memset(&new_crtc_state->vrr_infopacket, 0,
8626 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8627}
8628
4b9674e5
LL
8629static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8630 struct drm_atomic_state *state,
8631 struct drm_crtc *crtc,
8632 struct drm_crtc_state *old_crtc_state,
8633 struct drm_crtc_state *new_crtc_state,
8634 bool enable,
8635 bool *lock_and_validation_needed)
e7b07cee 8636{
eb3dc897 8637 struct dm_atomic_state *dm_state = NULL;
54d76575 8638 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8639 struct dc_stream_state *new_stream;
62f55537 8640 int ret = 0;
d4d4a645 8641
1f6010a9
DF
8642 /*
8643 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8644 * update changed items
8645 */
4b9674e5
LL
8646 struct amdgpu_crtc *acrtc = NULL;
8647 struct amdgpu_dm_connector *aconnector = NULL;
8648 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8649 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8650
4b9674e5 8651 new_stream = NULL;
9635b754 8652
4b9674e5
LL
8653 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8654 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8655 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8656 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8657
4b9674e5
LL
8658 /* TODO This hack should go away */
8659 if (aconnector && enable) {
8660 /* Make sure fake sink is created in plug-in scenario */
8661 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8662 &aconnector->base);
8663 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8664 &aconnector->base);
19f89e23 8665
4b9674e5
LL
8666 if (IS_ERR(drm_new_conn_state)) {
8667 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8668 goto fail;
8669 }
19f89e23 8670
4b9674e5
LL
8671 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8672 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8673
02d35a67
JFZ
8674 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8675 goto skip_modeset;
8676
cbd14ae7
SW
8677 new_stream = create_validate_stream_for_sink(aconnector,
8678 &new_crtc_state->mode,
8679 dm_new_conn_state,
8680 dm_old_crtc_state->stream);
19f89e23 8681
4b9674e5
LL
8682 /*
8683 * we can have no stream on ACTION_SET if a display
8684 * was disconnected during S3, in this case it is not an
8685 * error, the OS will be updated after detection, and
8686 * will do the right thing on next atomic commit
8687 */
19f89e23 8688
4b9674e5
LL
8689 if (!new_stream) {
8690 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8691 __func__, acrtc->base.base.id);
8692 ret = -ENOMEM;
8693 goto fail;
8694 }
e7b07cee 8695
3d4e52d0
VL
8696 /*
8697 * TODO: Check VSDB bits to decide whether this should
8698 * be enabled or not.
8699 */
8700 new_stream->triggered_crtc_reset.enabled =
8701 dm->force_timing_sync;
8702
4b9674e5 8703 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8704
88694af9
NK
8705 ret = fill_hdr_info_packet(drm_new_conn_state,
8706 &new_stream->hdr_static_metadata);
8707 if (ret)
8708 goto fail;
8709
7e930949
NK
8710 /*
8711 * If we already removed the old stream from the context
8712 * (and set the new stream to NULL) then we can't reuse
8713 * the old stream even if the stream and scaling are unchanged.
8714 * We'll hit the BUG_ON and black screen.
8715 *
8716 * TODO: Refactor this function to allow this check to work
8717 * in all conditions.
8718 */
8719 if (dm_new_crtc_state->stream &&
8720 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8721 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8722 new_crtc_state->mode_changed = false;
8723 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8724 new_crtc_state->mode_changed);
62f55537 8725 }
4b9674e5 8726 }
b830ebc9 8727
02d35a67 8728 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8729 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8730 goto skip_modeset;
e7b07cee 8731
4b9674e5
LL
8732 DRM_DEBUG_DRIVER(
8733 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8734 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8735 "connectors_changed:%d\n",
8736 acrtc->crtc_id,
8737 new_crtc_state->enable,
8738 new_crtc_state->active,
8739 new_crtc_state->planes_changed,
8740 new_crtc_state->mode_changed,
8741 new_crtc_state->active_changed,
8742 new_crtc_state->connectors_changed);
62f55537 8743
4b9674e5
LL
8744 /* Remove stream for any changed/disabled CRTC */
8745 if (!enable) {
62f55537 8746
4b9674e5
LL
8747 if (!dm_old_crtc_state->stream)
8748 goto skip_modeset;
eb3dc897 8749
4b9674e5
LL
8750 ret = dm_atomic_get_state(state, &dm_state);
8751 if (ret)
8752 goto fail;
e7b07cee 8753
4b9674e5
LL
8754 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8755 crtc->base.id);
62f55537 8756
4b9674e5
LL
8757 /* i.e. reset mode */
8758 if (dc_remove_stream_from_ctx(
8759 dm->dc,
8760 dm_state->context,
8761 dm_old_crtc_state->stream) != DC_OK) {
8762 ret = -EINVAL;
8763 goto fail;
8764 }
62f55537 8765
4b9674e5
LL
8766 dc_stream_release(dm_old_crtc_state->stream);
8767 dm_new_crtc_state->stream = NULL;
bb47de73 8768
4b9674e5 8769 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8770
4b9674e5 8771 *lock_and_validation_needed = true;
62f55537 8772
4b9674e5
LL
8773 } else {/* Add stream for any updated/enabled CRTC */
8774 /*
8775 * Quick fix to prevent NULL pointer on new_stream when
8776 * added MST connectors not found in existing crtc_state in the chained mode
8777 * TODO: need to dig out the root cause of that
8778 */
8779 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8780 goto skip_modeset;
62f55537 8781
4b9674e5
LL
8782 if (modereset_required(new_crtc_state))
8783 goto skip_modeset;
62f55537 8784
4b9674e5
LL
8785 if (modeset_required(new_crtc_state, new_stream,
8786 dm_old_crtc_state->stream)) {
62f55537 8787
4b9674e5 8788 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8789
4b9674e5
LL
8790 ret = dm_atomic_get_state(state, &dm_state);
8791 if (ret)
8792 goto fail;
27b3f4fc 8793
4b9674e5 8794 dm_new_crtc_state->stream = new_stream;
62f55537 8795
4b9674e5 8796 dc_stream_retain(new_stream);
1dc90497 8797
4b9674e5
LL
8798 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8799 crtc->base.id);
1dc90497 8800
4b9674e5
LL
8801 if (dc_add_stream_to_ctx(
8802 dm->dc,
8803 dm_state->context,
8804 dm_new_crtc_state->stream) != DC_OK) {
8805 ret = -EINVAL;
8806 goto fail;
9b690ef3
BL
8807 }
8808
4b9674e5
LL
8809 *lock_and_validation_needed = true;
8810 }
8811 }
e277adc5 8812
4b9674e5
LL
8813skip_modeset:
8814 /* Release extra reference */
8815 if (new_stream)
8816 dc_stream_release(new_stream);
e277adc5 8817
4b9674e5
LL
8818 /*
8819 * We want to do dc stream updates that do not require a
8820 * full modeset below.
8821 */
2afda735 8822 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8823 return 0;
8824 /*
8825 * Given above conditions, the dc state cannot be NULL because:
8826 * 1. We're in the process of enabling CRTCs (just been added
8827 * to the dc context, or already is on the context)
8828 * 2. Has a valid connector attached, and
8829 * 3. Is currently active and enabled.
8830 * => The dc stream state currently exists.
8831 */
8832 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8833
4b9674e5
LL
8834 /* Scaling or underscan settings */
8835 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8836 update_stream_scaling_settings(
8837 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8838
b05e2c5e
DF
8839 /* ABM settings */
8840 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8841
4b9674e5
LL
8842 /*
8843 * Color management settings. We also update color properties
8844 * when a modeset is needed, to ensure it gets reprogrammed.
8845 */
8846 if (dm_new_crtc_state->base.color_mgmt_changed ||
8847 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8848 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8849 if (ret)
8850 goto fail;
62f55537 8851 }
e7b07cee 8852
4b9674e5
LL
8853 /* Update Freesync settings. */
8854 get_freesync_config_for_crtc(dm_new_crtc_state,
8855 dm_new_conn_state);
8856
62f55537 8857 return ret;
9635b754
DS
8858
8859fail:
8860 if (new_stream)
8861 dc_stream_release(new_stream);
8862 return ret;
62f55537 8863}
9b690ef3 8864
f6ff2a08
NK
8865static bool should_reset_plane(struct drm_atomic_state *state,
8866 struct drm_plane *plane,
8867 struct drm_plane_state *old_plane_state,
8868 struct drm_plane_state *new_plane_state)
8869{
8870 struct drm_plane *other;
8871 struct drm_plane_state *old_other_state, *new_other_state;
8872 struct drm_crtc_state *new_crtc_state;
8873 int i;
8874
70a1efac
NK
8875 /*
8876 * TODO: Remove this hack once the checks below are sufficient
8877 * enough to determine when we need to reset all the planes on
8878 * the stream.
8879 */
8880 if (state->allow_modeset)
8881 return true;
8882
f6ff2a08
NK
8883 /* Exit early if we know that we're adding or removing the plane. */
8884 if (old_plane_state->crtc != new_plane_state->crtc)
8885 return true;
8886
8887 /* old crtc == new_crtc == NULL, plane not in context. */
8888 if (!new_plane_state->crtc)
8889 return false;
8890
8891 new_crtc_state =
8892 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8893
8894 if (!new_crtc_state)
8895 return true;
8896
7316c4ad
NK
8897 /* CRTC Degamma changes currently require us to recreate planes. */
8898 if (new_crtc_state->color_mgmt_changed)
8899 return true;
8900
f6ff2a08
NK
8901 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8902 return true;
8903
8904 /*
8905 * If there are any new primary or overlay planes being added or
8906 * removed then the z-order can potentially change. To ensure
8907 * correct z-order and pipe acquisition the current DC architecture
8908 * requires us to remove and recreate all existing planes.
8909 *
8910 * TODO: Come up with a more elegant solution for this.
8911 */
8912 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 8913 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
8914 if (other->type == DRM_PLANE_TYPE_CURSOR)
8915 continue;
8916
8917 if (old_other_state->crtc != new_plane_state->crtc &&
8918 new_other_state->crtc != new_plane_state->crtc)
8919 continue;
8920
8921 if (old_other_state->crtc != new_other_state->crtc)
8922 return true;
8923
dc4cb30d
NK
8924 /* Src/dst size and scaling updates. */
8925 if (old_other_state->src_w != new_other_state->src_w ||
8926 old_other_state->src_h != new_other_state->src_h ||
8927 old_other_state->crtc_w != new_other_state->crtc_w ||
8928 old_other_state->crtc_h != new_other_state->crtc_h)
8929 return true;
8930
8931 /* Rotation / mirroring updates. */
8932 if (old_other_state->rotation != new_other_state->rotation)
8933 return true;
8934
8935 /* Blending updates. */
8936 if (old_other_state->pixel_blend_mode !=
8937 new_other_state->pixel_blend_mode)
8938 return true;
8939
8940 /* Alpha updates. */
8941 if (old_other_state->alpha != new_other_state->alpha)
8942 return true;
8943
8944 /* Colorspace changes. */
8945 if (old_other_state->color_range != new_other_state->color_range ||
8946 old_other_state->color_encoding != new_other_state->color_encoding)
8947 return true;
8948
9a81cc60
NK
8949 /* Framebuffer checks fall at the end. */
8950 if (!old_other_state->fb || !new_other_state->fb)
8951 continue;
8952
8953 /* Pixel format changes can require bandwidth updates. */
8954 if (old_other_state->fb->format != new_other_state->fb->format)
8955 return true;
8956
6eed95b0
BN
8957 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8958 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
8959
8960 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
8961 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8962 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
8963 return true;
8964 }
8965
8966 return false;
8967}
8968
b0455fda
SS
8969static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8970 struct drm_plane_state *new_plane_state,
8971 struct drm_framebuffer *fb)
8972{
e72868c4
SS
8973 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8974 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 8975 unsigned int pitch;
e72868c4 8976 bool linear;
b0455fda
SS
8977
8978 if (fb->width > new_acrtc->max_cursor_width ||
8979 fb->height > new_acrtc->max_cursor_height) {
8980 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8981 new_plane_state->fb->width,
8982 new_plane_state->fb->height);
8983 return -EINVAL;
8984 }
8985 if (new_plane_state->src_w != fb->width << 16 ||
8986 new_plane_state->src_h != fb->height << 16) {
8987 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8988 return -EINVAL;
8989 }
8990
8991 /* Pitch in pixels */
8992 pitch = fb->pitches[0] / fb->format->cpp[0];
8993
8994 if (fb->width != pitch) {
8995 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8996 fb->width, pitch);
8997 return -EINVAL;
8998 }
8999
9000 switch (pitch) {
9001 case 64:
9002 case 128:
9003 case 256:
9004 /* FB pitch is supported by cursor plane */
9005 break;
9006 default:
9007 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9008 return -EINVAL;
9009 }
9010
e72868c4
SS
9011 /* Core DRM takes care of checking FB modifiers, so we only need to
9012 * check tiling flags when the FB doesn't have a modifier. */
9013 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9014 if (adev->family < AMDGPU_FAMILY_AI) {
9015 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9016 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9017 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9018 } else {
9019 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9020 }
9021 if (!linear) {
9022 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9023 return -EINVAL;
9024 }
9025 }
9026
b0455fda
SS
9027 return 0;
9028}
9029
9e869063
LL
9030static int dm_update_plane_state(struct dc *dc,
9031 struct drm_atomic_state *state,
9032 struct drm_plane *plane,
9033 struct drm_plane_state *old_plane_state,
9034 struct drm_plane_state *new_plane_state,
9035 bool enable,
9036 bool *lock_and_validation_needed)
62f55537 9037{
eb3dc897
NK
9038
9039 struct dm_atomic_state *dm_state = NULL;
62f55537 9040 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9041 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9042 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9043 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9044 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9045 bool needs_reset;
62f55537 9046 int ret = 0;
e7b07cee 9047
9b690ef3 9048
9e869063
LL
9049 new_plane_crtc = new_plane_state->crtc;
9050 old_plane_crtc = old_plane_state->crtc;
9051 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9052 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9053
626bf90f
SS
9054 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9055 if (!enable || !new_plane_crtc ||
9056 drm_atomic_plane_disabling(plane->state, new_plane_state))
9057 return 0;
9058
9059 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9060
5f581248
SS
9061 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9062 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9063 return -EINVAL;
9064 }
9065
24f99d2b 9066 if (new_plane_state->fb) {
b0455fda
SS
9067 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9068 new_plane_state->fb);
9069 if (ret)
9070 return ret;
24f99d2b
SS
9071 }
9072
9e869063 9073 return 0;
626bf90f 9074 }
9b690ef3 9075
f6ff2a08
NK
9076 needs_reset = should_reset_plane(state, plane, old_plane_state,
9077 new_plane_state);
9078
9e869063
LL
9079 /* Remove any changed/removed planes */
9080 if (!enable) {
f6ff2a08 9081 if (!needs_reset)
9e869063 9082 return 0;
a7b06724 9083
9e869063
LL
9084 if (!old_plane_crtc)
9085 return 0;
62f55537 9086
9e869063
LL
9087 old_crtc_state = drm_atomic_get_old_crtc_state(
9088 state, old_plane_crtc);
9089 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9090
9e869063
LL
9091 if (!dm_old_crtc_state->stream)
9092 return 0;
62f55537 9093
9e869063
LL
9094 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9095 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9096
9e869063
LL
9097 ret = dm_atomic_get_state(state, &dm_state);
9098 if (ret)
9099 return ret;
eb3dc897 9100
9e869063
LL
9101 if (!dc_remove_plane_from_context(
9102 dc,
9103 dm_old_crtc_state->stream,
9104 dm_old_plane_state->dc_state,
9105 dm_state->context)) {
62f55537 9106
c3537613 9107 return -EINVAL;
9e869063 9108 }
e7b07cee 9109
9b690ef3 9110
9e869063
LL
9111 dc_plane_state_release(dm_old_plane_state->dc_state);
9112 dm_new_plane_state->dc_state = NULL;
1dc90497 9113
9e869063 9114 *lock_and_validation_needed = true;
1dc90497 9115
9e869063
LL
9116 } else { /* Add new planes */
9117 struct dc_plane_state *dc_new_plane_state;
1dc90497 9118
9e869063
LL
9119 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9120 return 0;
e7b07cee 9121
9e869063
LL
9122 if (!new_plane_crtc)
9123 return 0;
e7b07cee 9124
9e869063
LL
9125 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9126 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9127
9e869063
LL
9128 if (!dm_new_crtc_state->stream)
9129 return 0;
62f55537 9130
f6ff2a08 9131 if (!needs_reset)
9e869063 9132 return 0;
62f55537 9133
8c44515b
AP
9134 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9135 if (ret)
9136 return ret;
9137
9e869063 9138 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9139
9e869063
LL
9140 dc_new_plane_state = dc_create_plane_state(dc);
9141 if (!dc_new_plane_state)
9142 return -ENOMEM;
62f55537 9143
9e869063
LL
9144 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9145 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9146
695af5f9 9147 ret = fill_dc_plane_attributes(
1348969a 9148 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9149 dc_new_plane_state,
9150 new_plane_state,
9151 new_crtc_state);
9152 if (ret) {
9153 dc_plane_state_release(dc_new_plane_state);
9154 return ret;
9155 }
62f55537 9156
9e869063
LL
9157 ret = dm_atomic_get_state(state, &dm_state);
9158 if (ret) {
9159 dc_plane_state_release(dc_new_plane_state);
9160 return ret;
9161 }
eb3dc897 9162
9e869063
LL
9163 /*
9164 * Any atomic check errors that occur after this will
9165 * not need a release. The plane state will be attached
9166 * to the stream, and therefore part of the atomic
9167 * state. It'll be released when the atomic state is
9168 * cleaned.
9169 */
9170 if (!dc_add_plane_to_context(
9171 dc,
9172 dm_new_crtc_state->stream,
9173 dc_new_plane_state,
9174 dm_state->context)) {
62f55537 9175
9e869063
LL
9176 dc_plane_state_release(dc_new_plane_state);
9177 return -EINVAL;
9178 }
8c45c5db 9179
9e869063 9180 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9181
9e869063
LL
9182 /* Tell DC to do a full surface update every time there
9183 * is a plane change. Inefficient, but works for now.
9184 */
9185 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9186
9187 *lock_and_validation_needed = true;
62f55537 9188 }
e7b07cee
HW
9189
9190
62f55537
AG
9191 return ret;
9192}
a87fa993 9193
12f4849a
SS
9194static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9195 struct drm_crtc *crtc,
9196 struct drm_crtc_state *new_crtc_state)
9197{
9198 struct drm_plane_state *new_cursor_state, *new_primary_state;
9199 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9200
9201 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9202 * cursor per pipe but it's going to inherit the scaling and
9203 * positioning from the underlying pipe. Check the cursor plane's
9204 * blending properties match the primary plane's. */
9205
9206 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9207 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9208 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9209 return 0;
9210 }
9211
9212 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9213 (new_cursor_state->src_w >> 16);
9214 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9215 (new_cursor_state->src_h >> 16);
9216
9217 primary_scale_w = new_primary_state->crtc_w * 1000 /
9218 (new_primary_state->src_w >> 16);
9219 primary_scale_h = new_primary_state->crtc_h * 1000 /
9220 (new_primary_state->src_h >> 16);
9221
9222 if (cursor_scale_w != primary_scale_w ||
9223 cursor_scale_h != primary_scale_h) {
9224 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9225 return -EINVAL;
9226 }
9227
9228 return 0;
9229}
9230
e10517b3 9231#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9232static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9233{
9234 struct drm_connector *connector;
9235 struct drm_connector_state *conn_state;
9236 struct amdgpu_dm_connector *aconnector = NULL;
9237 int i;
9238 for_each_new_connector_in_state(state, connector, conn_state, i) {
9239 if (conn_state->crtc != crtc)
9240 continue;
9241
9242 aconnector = to_amdgpu_dm_connector(connector);
9243 if (!aconnector->port || !aconnector->mst_port)
9244 aconnector = NULL;
9245 else
9246 break;
9247 }
9248
9249 if (!aconnector)
9250 return 0;
9251
9252 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9253}
e10517b3 9254#endif
44be939f 9255
b8592b48
LL
9256/**
9257 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9258 * @dev: The DRM device
9259 * @state: The atomic state to commit
9260 *
9261 * Validate that the given atomic state is programmable by DC into hardware.
9262 * This involves constructing a &struct dc_state reflecting the new hardware
9263 * state we wish to commit, then querying DC to see if it is programmable. It's
9264 * important not to modify the existing DC state. Otherwise, atomic_check
9265 * may unexpectedly commit hardware changes.
9266 *
9267 * When validating the DC state, it's important that the right locks are
9268 * acquired. For full updates case which removes/adds/updates streams on one
9269 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9270 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9271 * flip using DRMs synchronization events.
b8592b48
LL
9272 *
9273 * Note that DM adds the affected connectors for all CRTCs in state, when that
9274 * might not seem necessary. This is because DC stream creation requires the
9275 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9276 * be possible but non-trivial - a possible TODO item.
9277 *
9278 * Return: -Error code if validation failed.
9279 */
7578ecda
AD
9280static int amdgpu_dm_atomic_check(struct drm_device *dev,
9281 struct drm_atomic_state *state)
62f55537 9282{
1348969a 9283 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9284 struct dm_atomic_state *dm_state = NULL;
62f55537 9285 struct dc *dc = adev->dm.dc;
62f55537 9286 struct drm_connector *connector;
c2cea706 9287 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9288 struct drm_crtc *crtc;
fc9e9920 9289 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9290 struct drm_plane *plane;
9291 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9292 enum dc_status status;
1e88ad0a 9293 int ret, i;
62f55537 9294 bool lock_and_validation_needed = false;
886876ec 9295 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9296
e8a98235 9297 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9298
62f55537 9299 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9300 if (ret)
9301 goto fail;
62f55537 9302
c5892a10
SW
9303 /* Check connector changes */
9304 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9305 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9306 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9307
9308 /* Skip connectors that are disabled or part of modeset already. */
9309 if (!old_con_state->crtc && !new_con_state->crtc)
9310 continue;
9311
9312 if (!new_con_state->crtc)
9313 continue;
9314
9315 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9316 if (IS_ERR(new_crtc_state)) {
9317 ret = PTR_ERR(new_crtc_state);
9318 goto fail;
9319 }
9320
9321 if (dm_old_con_state->abm_level !=
9322 dm_new_con_state->abm_level)
9323 new_crtc_state->connectors_changed = true;
9324 }
9325
e10517b3 9326#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9327 if (adev->asic_type >= CHIP_NAVI10) {
9328 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9329 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9330 ret = add_affected_mst_dsc_crtcs(state, crtc);
9331 if (ret)
9332 goto fail;
9333 }
9334 }
9335 }
e10517b3 9336#endif
1e88ad0a 9337 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9338 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9339
1e88ad0a 9340 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9341 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9342 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9343 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9344 continue;
7bef1af3 9345
1e88ad0a
S
9346 if (!new_crtc_state->enable)
9347 continue;
fc9e9920 9348
1e88ad0a
S
9349 ret = drm_atomic_add_affected_connectors(state, crtc);
9350 if (ret)
9351 return ret;
fc9e9920 9352
1e88ad0a
S
9353 ret = drm_atomic_add_affected_planes(state, crtc);
9354 if (ret)
9355 goto fail;
115a385c 9356
cbac53f7 9357 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9358 new_crtc_state->mode_changed = true;
e7b07cee
HW
9359 }
9360
2d9e6431
NK
9361 /*
9362 * Add all primary and overlay planes on the CRTC to the state
9363 * whenever a plane is enabled to maintain correct z-ordering
9364 * and to enable fast surface updates.
9365 */
9366 drm_for_each_crtc(crtc, dev) {
9367 bool modified = false;
9368
9369 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9370 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9371 continue;
9372
9373 if (new_plane_state->crtc == crtc ||
9374 old_plane_state->crtc == crtc) {
9375 modified = true;
9376 break;
9377 }
9378 }
9379
9380 if (!modified)
9381 continue;
9382
9383 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9384 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9385 continue;
9386
9387 new_plane_state =
9388 drm_atomic_get_plane_state(state, plane);
9389
9390 if (IS_ERR(new_plane_state)) {
9391 ret = PTR_ERR(new_plane_state);
9392 goto fail;
9393 }
9394 }
9395 }
9396
62f55537 9397 /* Remove exiting planes if they are modified */
9e869063
LL
9398 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9399 ret = dm_update_plane_state(dc, state, plane,
9400 old_plane_state,
9401 new_plane_state,
9402 false,
9403 &lock_and_validation_needed);
9404 if (ret)
9405 goto fail;
62f55537
AG
9406 }
9407
9408 /* Disable all crtcs which require disable */
4b9674e5
LL
9409 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9410 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9411 old_crtc_state,
9412 new_crtc_state,
9413 false,
9414 &lock_and_validation_needed);
9415 if (ret)
9416 goto fail;
62f55537
AG
9417 }
9418
9419 /* Enable all crtcs which require enable */
4b9674e5
LL
9420 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9421 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9422 old_crtc_state,
9423 new_crtc_state,
9424 true,
9425 &lock_and_validation_needed);
9426 if (ret)
9427 goto fail;
62f55537
AG
9428 }
9429
9430 /* Add new/modified planes */
9e869063
LL
9431 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9432 ret = dm_update_plane_state(dc, state, plane,
9433 old_plane_state,
9434 new_plane_state,
9435 true,
9436 &lock_and_validation_needed);
9437 if (ret)
9438 goto fail;
62f55537
AG
9439 }
9440
b349f76e
ES
9441 /* Run this here since we want to validate the streams we created */
9442 ret = drm_atomic_helper_check_planes(dev, state);
9443 if (ret)
9444 goto fail;
62f55537 9445
12f4849a
SS
9446 /* Check cursor planes scaling */
9447 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9448 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9449 if (ret)
9450 goto fail;
9451 }
9452
43d10d30
NK
9453 if (state->legacy_cursor_update) {
9454 /*
9455 * This is a fast cursor update coming from the plane update
9456 * helper, check if it can be done asynchronously for better
9457 * performance.
9458 */
9459 state->async_update =
9460 !drm_atomic_helper_async_check(dev, state);
9461
9462 /*
9463 * Skip the remaining global validation if this is an async
9464 * update. Cursor updates can be done without affecting
9465 * state or bandwidth calcs and this avoids the performance
9466 * penalty of locking the private state object and
9467 * allocating a new dc_state.
9468 */
9469 if (state->async_update)
9470 return 0;
9471 }
9472
ebdd27e1 9473 /* Check scaling and underscan changes*/
1f6010a9 9474 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9475 * new stream into context w\o causing full reset. Need to
9476 * decide how to handle.
9477 */
c2cea706 9478 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9479 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9480 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9481 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9482
9483 /* Skip any modesets/resets */
0bc9706d
LSL
9484 if (!acrtc || drm_atomic_crtc_needs_modeset(
9485 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9486 continue;
9487
b830ebc9 9488 /* Skip any thing not scale or underscan changes */
54d76575 9489 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9490 continue;
9491
9492 lock_and_validation_needed = true;
9493 }
9494
f6d7c7fa
NK
9495 /**
9496 * Streams and planes are reset when there are changes that affect
9497 * bandwidth. Anything that affects bandwidth needs to go through
9498 * DC global validation to ensure that the configuration can be applied
9499 * to hardware.
9500 *
9501 * We have to currently stall out here in atomic_check for outstanding
9502 * commits to finish in this case because our IRQ handlers reference
9503 * DRM state directly - we can end up disabling interrupts too early
9504 * if we don't.
9505 *
9506 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9507 */
f6d7c7fa 9508 if (lock_and_validation_needed) {
eb3dc897
NK
9509 ret = dm_atomic_get_state(state, &dm_state);
9510 if (ret)
9511 goto fail;
e7b07cee
HW
9512
9513 ret = do_aquire_global_lock(dev, state);
9514 if (ret)
9515 goto fail;
1dc90497 9516
d9fe1a4c 9517#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9518 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9519 goto fail;
9520
29b9ba74
ML
9521 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9522 if (ret)
9523 goto fail;
d9fe1a4c 9524#endif
29b9ba74 9525
ded58c7b
ZL
9526 /*
9527 * Perform validation of MST topology in the state:
9528 * We need to perform MST atomic check before calling
9529 * dc_validate_global_state(), or there is a chance
9530 * to get stuck in an infinite loop and hang eventually.
9531 */
9532 ret = drm_dp_mst_atomic_check(state);
9533 if (ret)
9534 goto fail;
74a16675
RS
9535 status = dc_validate_global_state(dc, dm_state->context, false);
9536 if (status != DC_OK) {
9537 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9538 dc_status_to_str(status), status);
e7b07cee
HW
9539 ret = -EINVAL;
9540 goto fail;
9541 }
bd200d19 9542 } else {
674e78ac 9543 /*
bd200d19
NK
9544 * The commit is a fast update. Fast updates shouldn't change
9545 * the DC context, affect global validation, and can have their
9546 * commit work done in parallel with other commits not touching
9547 * the same resource. If we have a new DC context as part of
9548 * the DM atomic state from validation we need to free it and
9549 * retain the existing one instead.
fde9f39a
MR
9550 *
9551 * Furthermore, since the DM atomic state only contains the DC
9552 * context and can safely be annulled, we can free the state
9553 * and clear the associated private object now to free
9554 * some memory and avoid a possible use-after-free later.
674e78ac 9555 */
bd200d19 9556
fde9f39a
MR
9557 for (i = 0; i < state->num_private_objs; i++) {
9558 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9559
fde9f39a
MR
9560 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9561 int j = state->num_private_objs-1;
bd200d19 9562
fde9f39a
MR
9563 dm_atomic_destroy_state(obj,
9564 state->private_objs[i].state);
9565
9566 /* If i is not at the end of the array then the
9567 * last element needs to be moved to where i was
9568 * before the array can safely be truncated.
9569 */
9570 if (i != j)
9571 state->private_objs[i] =
9572 state->private_objs[j];
bd200d19 9573
fde9f39a
MR
9574 state->private_objs[j].ptr = NULL;
9575 state->private_objs[j].state = NULL;
9576 state->private_objs[j].old_state = NULL;
9577 state->private_objs[j].new_state = NULL;
9578
9579 state->num_private_objs = j;
9580 break;
9581 }
bd200d19 9582 }
e7b07cee
HW
9583 }
9584
caff0e66
NK
9585 /* Store the overall update type for use later in atomic check. */
9586 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9587 struct dm_crtc_state *dm_new_crtc_state =
9588 to_dm_crtc_state(new_crtc_state);
9589
f6d7c7fa
NK
9590 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9591 UPDATE_TYPE_FULL :
9592 UPDATE_TYPE_FAST;
e7b07cee
HW
9593 }
9594
9595 /* Must be success */
9596 WARN_ON(ret);
e8a98235
RS
9597
9598 trace_amdgpu_dm_atomic_check_finish(state, ret);
9599
e7b07cee
HW
9600 return ret;
9601
9602fail:
9603 if (ret == -EDEADLK)
01e28f9c 9604 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9605 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9606 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9607 else
01e28f9c 9608 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9609
e8a98235
RS
9610 trace_amdgpu_dm_atomic_check_finish(state, ret);
9611
e7b07cee
HW
9612 return ret;
9613}
9614
3ee6b26b
AD
9615static bool is_dp_capable_without_timing_msa(struct dc *dc,
9616 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9617{
9618 uint8_t dpcd_data;
9619 bool capable = false;
9620
c84dec2f 9621 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9622 dm_helpers_dp_read_dpcd(
9623 NULL,
c84dec2f 9624 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9625 DP_DOWN_STREAM_PORT_COUNT,
9626 &dpcd_data,
9627 sizeof(dpcd_data))) {
9628 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9629 }
9630
9631 return capable;
9632}
98e6436d
AK
9633void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9634 struct edid *edid)
e7b07cee
HW
9635{
9636 int i;
e7b07cee
HW
9637 bool edid_check_required;
9638 struct detailed_timing *timing;
9639 struct detailed_non_pixel *data;
9640 struct detailed_data_monitor_range *range;
c84dec2f
HW
9641 struct amdgpu_dm_connector *amdgpu_dm_connector =
9642 to_amdgpu_dm_connector(connector);
bb47de73 9643 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9644
9645 struct drm_device *dev = connector->dev;
1348969a 9646 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9647 bool freesync_capable = false;
b830ebc9 9648
8218d7f1
HW
9649 if (!connector->state) {
9650 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9651 goto update;
8218d7f1
HW
9652 }
9653
98e6436d
AK
9654 if (!edid) {
9655 dm_con_state = to_dm_connector_state(connector->state);
9656
9657 amdgpu_dm_connector->min_vfreq = 0;
9658 amdgpu_dm_connector->max_vfreq = 0;
9659 amdgpu_dm_connector->pixel_clock_mhz = 0;
9660
bb47de73 9661 goto update;
98e6436d
AK
9662 }
9663
8218d7f1
HW
9664 dm_con_state = to_dm_connector_state(connector->state);
9665
e7b07cee 9666 edid_check_required = false;
c84dec2f 9667 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9668 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9669 goto update;
e7b07cee
HW
9670 }
9671 if (!adev->dm.freesync_module)
bb47de73 9672 goto update;
e7b07cee
HW
9673 /*
9674 * if edid non zero restrict freesync only for dp and edp
9675 */
9676 if (edid) {
c84dec2f
HW
9677 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9678 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9679 edid_check_required = is_dp_capable_without_timing_msa(
9680 adev->dm.dc,
c84dec2f 9681 amdgpu_dm_connector);
e7b07cee
HW
9682 }
9683 }
e7b07cee
HW
9684 if (edid_check_required == true && (edid->version > 1 ||
9685 (edid->version == 1 && edid->revision > 1))) {
9686 for (i = 0; i < 4; i++) {
9687
9688 timing = &edid->detailed_timings[i];
9689 data = &timing->data.other_data;
9690 range = &data->data.range;
9691 /*
9692 * Check if monitor has continuous frequency mode
9693 */
9694 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9695 continue;
9696 /*
9697 * Check for flag range limits only. If flag == 1 then
9698 * no additional timing information provided.
9699 * Default GTF, GTF Secondary curve and CVT are not
9700 * supported
9701 */
9702 if (range->flags != 1)
9703 continue;
9704
c84dec2f
HW
9705 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9706 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9707 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee 9708 range->pixel_clock_mhz * 10;
a0ffc3fd
SW
9709
9710 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9711 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9712
e7b07cee
HW
9713 break;
9714 }
9715
c84dec2f 9716 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9717 amdgpu_dm_connector->min_vfreq > 10) {
9718
bb47de73 9719 freesync_capable = true;
e7b07cee
HW
9720 }
9721 }
bb47de73
NK
9722
9723update:
9724 if (dm_con_state)
9725 dm_con_state->freesync_capable = freesync_capable;
9726
9727 if (connector->vrr_capable_property)
9728 drm_connector_set_vrr_capable_property(connector,
9729 freesync_capable);
e7b07cee
HW
9730}
9731
8c322309
RL
9732static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9733{
9734 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9735
9736 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9737 return;
9738 if (link->type == dc_connection_none)
9739 return;
9740 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9741 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9742 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9743
9744 if (dpcd_data[0] == 0) {
1cfbbdde 9745 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9746 link->psr_settings.psr_feature_enabled = false;
9747 } else {
1cfbbdde 9748 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9749 link->psr_settings.psr_feature_enabled = true;
9750 }
9751
9752 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9753 }
9754}
9755
9756/*
9757 * amdgpu_dm_link_setup_psr() - configure psr link
9758 * @stream: stream state
9759 *
9760 * Return: true if success
9761 */
9762static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9763{
9764 struct dc_link *link = NULL;
9765 struct psr_config psr_config = {0};
9766 struct psr_context psr_context = {0};
8c322309
RL
9767 bool ret = false;
9768
9769 if (stream == NULL)
9770 return false;
9771
9772 link = stream->link;
8c322309 9773
d1ebfdd8 9774 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9775
9776 if (psr_config.psr_version > 0) {
9777 psr_config.psr_exit_link_training_required = 0x1;
9778 psr_config.psr_frame_capture_indication_req = 0;
9779 psr_config.psr_rfb_setup_time = 0x37;
9780 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9781 psr_config.allow_smu_optimizations = 0x0;
9782
9783 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9784
9785 }
d1ebfdd8 9786 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9787
9788 return ret;
9789}
9790
9791/*
9792 * amdgpu_dm_psr_enable() - enable psr f/w
9793 * @stream: stream state
9794 *
9795 * Return: true if success
9796 */
9797bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9798{
9799 struct dc_link *link = stream->link;
5b5abe95
AK
9800 unsigned int vsync_rate_hz = 0;
9801 struct dc_static_screen_params params = {0};
9802 /* Calculate number of static frames before generating interrupt to
9803 * enter PSR.
9804 */
5b5abe95
AK
9805 // Init fail safe of 2 frames static
9806 unsigned int num_frames_static = 2;
8c322309
RL
9807
9808 DRM_DEBUG_DRIVER("Enabling psr...\n");
9809
5b5abe95
AK
9810 vsync_rate_hz = div64_u64(div64_u64((
9811 stream->timing.pix_clk_100hz * 100),
9812 stream->timing.v_total),
9813 stream->timing.h_total);
9814
9815 /* Round up
9816 * Calculate number of frames such that at least 30 ms of time has
9817 * passed.
9818 */
7aa62404
RL
9819 if (vsync_rate_hz != 0) {
9820 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9821 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9822 }
5b5abe95
AK
9823
9824 params.triggers.cursor_update = true;
9825 params.triggers.overlay_update = true;
9826 params.triggers.surface_update = true;
9827 params.num_frames = num_frames_static;
8c322309 9828
5b5abe95 9829 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9830 &stream, 1,
5b5abe95 9831 &params);
8c322309 9832
1d496907 9833 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
9834}
9835
9836/*
9837 * amdgpu_dm_psr_disable() - disable psr f/w
9838 * @stream: stream state
9839 *
9840 * Return: true if success
9841 */
9842static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9843{
9844
9845 DRM_DEBUG_DRIVER("Disabling psr...\n");
9846
1d496907 9847 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 9848}
3d4e52d0 9849
6ee90e88 9850/*
9851 * amdgpu_dm_psr_disable() - disable psr f/w
9852 * if psr is enabled on any stream
9853 *
9854 * Return: true if success
9855 */
9856static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9857{
9858 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9859 return dc_set_psr_allow_active(dm->dc, false);
9860}
9861
3d4e52d0
VL
9862void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9863{
1348969a 9864 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9865 struct dc *dc = adev->dm.dc;
9866 int i;
9867
9868 mutex_lock(&adev->dm.dc_lock);
9869 if (dc->current_state) {
9870 for (i = 0; i < dc->current_state->stream_count; ++i)
9871 dc->current_state->streams[i]
9872 ->triggered_crtc_reset.enabled =
9873 adev->dm.force_timing_sync;
9874
9875 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9876 dc_trigger_sync(dc, dc->current_state);
9877 }
9878 mutex_unlock(&adev->dm.dc_lock);
9879}
9d83722d
RS
9880
9881void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9882 uint32_t value, const char *func_name)
9883{
9884#ifdef DM_CHECK_ADDR_0
9885 if (address == 0) {
9886 DC_ERR("invalid register write. address = 0");
9887 return;
9888 }
9889#endif
9890 cgs_write_register(ctx->cgs_device, address, value);
9891 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9892}
9893
9894uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9895 const char *func_name)
9896{
9897 uint32_t value;
9898#ifdef DM_CHECK_ADDR_0
9899 if (address == 0) {
9900 DC_ERR("invalid register read; address = 0\n");
9901 return 0;
9902 }
9903#endif
9904
9905 if (ctx->dmub_srv &&
9906 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9907 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9908 ASSERT(false);
9909 return 0;
9910 }
9911
9912 value = cgs_read_register(ctx->cgs_device, address);
9913
9914 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9915
9916 return value;
9917}