drm/amdgpu/display: use GFP_ATOMIC in dcn21_validate_bandwidth_fp()
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
9d83722d 37#include "amdgpu_dm_trace.h"
4562236b
HW
38
39#include "vid.h"
40#include "amdgpu.h"
a49dcb88 41#include "amdgpu_display.h"
a94d5569 42#include "amdgpu_ucode.h"
4562236b
HW
43#include "atom.h"
44#include "amdgpu_dm.h"
52704fca
BL
45#ifdef CONFIG_DRM_AMD_DC_HDCP
46#include "amdgpu_dm_hdcp.h"
53e108aa 47#include <drm/drm_hdcp.h>
52704fca 48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
99#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
101#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
103#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
105#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 107
a94d5569
DF
108#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 110
5ea23931
RL
111#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
8c7aea40
NK
114/* Number of bytes in PSP header for firmware. */
115#define PSP_HEADER_BYTES 0x100
116
117/* Number of bytes in PSP footer for firmware. */
118#define PSP_FOOTER_BYTES 0x100
119
b8592b48
LL
120/**
121 * DOC: overview
122 *
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
126 *
127 * The root control structure is &struct amdgpu_display_manager.
128 */
129
7578ecda
AD
130/* basic init/fini API */
131static int amdgpu_dm_init(struct amdgpu_device *adev);
132static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
0f877894
OV
134static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135{
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 default:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
150 }
151}
152
153static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154{
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 return;
161
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
164
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
167 subconnector);
168}
169
1f6010a9
DF
170/*
171 * initializes drm_device display related structures, based on the information
7578ecda
AD
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
174 *
175 * Returns 0 on success
176 */
177static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178/* removes and deallocates the drm structures, created by the above function */
179static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
7578ecda 181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 182 struct drm_plane *plane,
cc1fec57
NK
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
7578ecda
AD
185static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
190 uint32_t link_index,
191 struct amdgpu_encoder *amdgpu_encoder);
192static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
195
196static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
7578ecda
AD
198static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
202
674e78ac
NK
203static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
7578ecda 205
8c322309
RL
206static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 210static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 211
dfbbfe3c
BN
212static const struct drm_format_info *
213amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
4562236b
HW
215/*
216 * dm_vblank_get_counter
217 *
218 * @brief
219 * Get counter for number of vertical blanks
220 *
221 * @param
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
224 *
225 * @return
226 * Counter for vertical blanks
227 */
228static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229{
230 if (crtc >= adev->mode_info.num_crtc)
231 return 0;
232 else {
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
585d450c 235 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 crtc);
4562236b
HW
238 return 0;
239 }
240
585d450c 241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
242 }
243}
244
245static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 246 u32 *vbl, u32 *position)
4562236b 247{
81c50963
ST
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
4562236b
HW
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 return -EINVAL;
252 else {
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
585d450c 255 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 crtc);
4562236b
HW
258 return 0;
259 }
260
81c50963
ST
261 /*
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
264 */
585d450c 265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
266 &v_blank_start,
267 &v_blank_end,
268 &h_position,
269 &v_position);
270
e806208d
AG
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
273 }
274
275 return 0;
276}
277
278static bool dm_is_idle(void *handle)
279{
280 /* XXX todo */
281 return true;
282}
283
284static int dm_wait_for_idle(void *handle)
285{
286 /* XXX todo */
287 return 0;
288}
289
290static bool dm_check_soft_reset(void *handle)
291{
292 return false;
293}
294
295static int dm_soft_reset(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
3ee6b26b
AD
301static struct amdgpu_crtc *
302get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 int otg_inst)
4562236b 304{
4a580877 305 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
308
4562236b
HW
309 if (otg_inst == -1) {
310 WARN_ON(1);
311 return adev->mode_info.crtcs[0];
312 }
313
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317 if (amdgpu_crtc->otg_inst == otg_inst)
318 return amdgpu_crtc;
319 }
320
321 return NULL;
322}
323
585d450c
AP
324static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325{
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
330}
331
66b0c973
MK
332static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333{
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336}
337
b8e8c934
HW
338/**
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
341 *
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
344 */
4562236b
HW
345static void dm_pflip_high_irq(void *interrupt_params)
346{
4562236b
HW
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
350 unsigned long flags;
71bbe51a 351 struct drm_pending_vblank_event *e;
71bbe51a
MK
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 bool vrr_active;
4562236b
HW
354
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357 /* IRQ could occur when in initial stage */
1f6010a9 358 /* TODO work and BO cleanup */
4562236b
HW
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 return;
362 }
363
4a580877 364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
365
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
371 amdgpu_crtc);
4a580877 372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
373 return;
374 }
375
71bbe51a
MK
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
4562236b 379
71bbe51a
MK
380 if (!e)
381 WARN_ON(1);
1159898a 382
585d450c 383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
384
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 if (!vrr_active ||
585d450c 387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
393 */
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 395
71bbe51a
MK
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
398 */
399 if (e) {
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
404 }
405 } else if (e) {
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
412 *
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
417 */
418
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
422
4a580877 423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
424 e = NULL;
425 }
4562236b 426
fdd1fe57
MK
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
431 */
5d1c59c4 432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 434
54f5499a 435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 437
71bbe51a
MK
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
4562236b
HW
441}
442
d2574c33
MK
443static void dm_vupdate_high_irq(void *interrupt_params)
444{
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
09aef2c4 448 unsigned long flags;
585d450c 449 int vrr_active;
d2574c33
MK
450
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453 if (acrtc) {
585d450c 454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 455
7f2be468
LP
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 acrtc->crtc_id,
585d450c 458 vrr_active);
d2574c33
MK
459
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
465 */
585d450c 466 if (vrr_active) {
d2574c33 467 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
468
469 /* BTR processing for pre-DCE12 ASICs */
585d450c 470 if (acrtc->dm_irq_params.stream &&
09aef2c4 471 adev->family < AMDGPU_FAMILY_AI) {
4a580877 472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
585d450c
AP
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
477
478 dc_stream_adjust_vmin_vmax(
479 adev->dm.dc,
585d450c
AP
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
483 }
484 }
d2574c33
MK
485 }
486}
487
b8e8c934
HW
488/**
489 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 490 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
491 *
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493 * event handler.
494 */
4562236b
HW
495static void dm_crtc_high_irq(void *interrupt_params)
496{
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
4562236b 499 struct amdgpu_crtc *acrtc;
09aef2c4 500 unsigned long flags;
585d450c 501 int vrr_active;
4562236b 502
b57de80a 503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
504 if (!acrtc)
505 return;
506
585d450c 507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 508
2b5aed9a 509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 510 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 511
2346ef47
NK
512 /**
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
517 */
585d450c 518 if (!vrr_active)
2346ef47
NK
519 drm_crtc_handle_vblank(&acrtc->base);
520
521 /**
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
524 */
16f17eda 525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
526
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
529 return;
16f17eda 530
4a580877 531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 532
585d450c
AP
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 537 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
16f17eda 540
585d450c
AP
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
543 }
544
2b5aed9a
MK
545 /*
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
550 *
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
554 */
2346ef47
NK
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 557 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
558 if (acrtc->event) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 acrtc->event = NULL;
561 drm_crtc_vblank_put(&acrtc->base);
562 }
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 }
565
4a580877 566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
567}
568
4562236b
HW
569static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
571{
572 return 0;
573}
574
575static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
577{
578 return 0;
579}
580
581/* Prototypes of private functions */
582static int dm_early_init(void* handle);
583
a32e24b4 584/* Allocate memory for FBC compressed data */
3e332d3a 585static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 586{
3e332d3a 587 struct drm_device *dev = connector->dev;
1348969a 588 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 589 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
42e67c3b
RL
592 unsigned long max_size = 0;
593
594 if (adev->dm.dc->fbc_compressor == NULL)
595 return;
a32e24b4 596
3e332d3a 597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
598 return;
599
3e332d3a
RL
600 if (compressor->bo_ptr)
601 return;
42e67c3b 602
42e67c3b 603
3e332d3a
RL
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
607 }
608
609 if (max_size) {
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 612 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
613
614 if (r)
42e67c3b
RL
615 DRM_ERROR("DM: Failed to initialize FBC\n");
616 else {
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 }
620
a32e24b4
RL
621 }
622
623}
a32e24b4 624
6ce8f316
NK
625static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
628{
629 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 630 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
634 int ret = 0;
635
636 *enabled = false;
637
638 mutex_lock(&adev->dm.audio_lock);
639
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
644 continue;
645
646 *enabled = true;
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650 break;
651 }
652 drm_connector_list_iter_end(&conn_iter);
653
654 mutex_unlock(&adev->dm.audio_lock);
655
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658 return ret;
659}
660
661static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
663};
664
665static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
667{
668 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 669 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
670 struct drm_audio_component *acomp = data;
671
672 acomp->ops = &amdgpu_dm_audio_component_ops;
673 acomp->dev = kdev;
674 adev->dm.audio_component = acomp;
675
676 return 0;
677}
678
679static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
681{
682 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 683 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
684 struct drm_audio_component *acomp = data;
685
686 acomp->ops = NULL;
687 acomp->dev = NULL;
688 adev->dm.audio_component = NULL;
689}
690
691static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
694};
695
696static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697{
698 int i, ret;
699
700 if (!amdgpu_audio)
701 return 0;
702
703 adev->mode_info.audio.enabled = true;
704
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
717 }
718
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 if (ret < 0)
721 return ret;
722
723 adev->dm.audio_registered = true;
724
725 return 0;
726}
727
728static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729{
730 if (!amdgpu_audio)
731 return;
732
733 if (!adev->mode_info.audio.enabled)
734 return;
735
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
739 }
740
741 /* TODO: Disable audio? */
742
743 adev->mode_info.audio.enabled = false;
744}
745
dfd84d90 746static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
747{
748 struct drm_audio_component *acomp = adev->dm.audio_component;
749
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 pin, -1);
755 }
756}
757
743b9786
NK
758static int dm_dmub_hw_init(struct amdgpu_device *adev)
759{
743b9786
NK
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
770 bool has_hw_support;
771
772 if (!dmub_srv)
773 /* DMUB isn't supported on the ASIC. */
774 return 0;
775
8c7aea40
NK
776 if (!fb_info) {
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
778 return -EINVAL;
779 }
780
743b9786
NK
781 if (!dmub_fw) {
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
784 return -EINVAL;
785 }
786
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 return -EINVAL;
791 }
792
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
795 return 0;
796 }
797
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
743b9786
NK
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 802 PSP_HEADER_BYTES;
743b9786
NK
803
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
807
808 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
ddde28a5
HW
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
818 */
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 fw_inst_const_size);
822 }
823
a576b345
NK
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
827
828 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 adev->bios_size);
831
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
841
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
846
31a7f4bb
HW
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
850
743b9786
NK
851 if (dmcu)
852 hw_params.psp_version = dmcu->psp_version;
853
8c7aea40
NK
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
856
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 return -EINVAL;
861 }
862
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868 /* Init DMCU and ABM if available. */
869 if (dmcu && abm) {
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 }
873
9a71c7d3
NK
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 return -ENOMEM;
878 }
879
743b9786
NK
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
882
883 return 0;
884}
885
e6cd859d 886#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 887static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 888{
c0fb85ae
YZ
889 uint64_t pt_base;
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 894
c0fb85ae
YZ
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 897
c0fb85ae
YZ
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 /*
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
904 */
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 else
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 908
c0fb85ae
YZ
909 agp_base = 0;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 912
c44a22b3 913
c0fb85ae
YZ
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 920
c0fb85ae
YZ
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936 pa_config->is_hvm_enabled = 0;
c44a22b3 937
c44a22b3 938}
e6cd859d 939#endif
ea3b4242
QZ
940#if defined(CONFIG_DRM_AMD_DC_DCN)
941static void event_mall_stutter(struct work_struct *work)
942{
943
944 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945 struct amdgpu_display_manager *dm = vblank_work->dm;
946
947 mutex_lock(&dm->dc_lock);
948
949 if (vblank_work->enable)
950 dm->active_vblank_irq_count++;
951 else
952 dm->active_vblank_irq_count--;
953
954
955 dc_allow_idle_optimizations(
956 dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
957
958 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
959
960
961 mutex_unlock(&dm->dc_lock);
962}
963
964static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
965{
966
967 int max_caps = dc->caps.max_links;
968 struct vblank_workqueue *vblank_work;
969 int i = 0;
970
971 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972 if (ZERO_OR_NULL_PTR(vblank_work)) {
973 kfree(vblank_work);
974 return NULL;
975 }
c44a22b3 976
ea3b4242
QZ
977 for (i = 0; i < max_caps; i++)
978 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
979
980 return vblank_work;
981}
982#endif
7578ecda 983static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
984{
985 struct dc_init_data init_data;
52704fca
BL
986#ifdef CONFIG_DRM_AMD_DC_HDCP
987 struct dc_callback_init init_params;
988#endif
743b9786 989 int r;
52704fca 990
4a580877 991 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
992 adev->dm.adev = adev;
993
4562236b
HW
994 /* Zero all the fields */
995 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
996#ifdef CONFIG_DRM_AMD_DC_HDCP
997 memset(&init_params, 0, sizeof(init_params));
998#endif
4562236b 999
674e78ac 1000 mutex_init(&adev->dm.dc_lock);
6ce8f316 1001 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1002#if defined(CONFIG_DRM_AMD_DC_DCN)
1003 spin_lock_init(&adev->dm.vblank_lock);
1004#endif
674e78ac 1005
4562236b
HW
1006 if(amdgpu_dm_irq_init(adev)) {
1007 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1008 goto error;
1009 }
1010
1011 init_data.asic_id.chip_family = adev->family;
1012
2dc31ca1 1013 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1014 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1015
770d13b1 1016 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1017 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018 init_data.asic_id.atombios_base_address =
1019 adev->mode_info.atom_context->bios;
1020
1021 init_data.driver = adev;
1022
1023 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1024
1025 if (!adev->dm.cgs_device) {
1026 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1027 goto error;
1028 }
1029
1030 init_data.cgs_device = adev->dm.cgs_device;
1031
4562236b
HW
1032 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1033
60fb100b
AD
1034 switch (adev->asic_type) {
1035 case CHIP_CARRIZO:
1036 case CHIP_STONEY:
1037 case CHIP_RAVEN:
fe3db437 1038 case CHIP_RENOIR:
6e227308 1039 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1040 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041 init_data.flags.disable_dmcu = true;
60fb100b 1042 break;
6df9218a
CL
1043#if defined(CONFIG_DRM_AMD_DC_DCN)
1044 case CHIP_VANGOGH:
1045 init_data.flags.gpu_vm_support = true;
1046 break;
1047#endif
60fb100b
AD
1048 default:
1049 break;
1050 }
6e227308 1051
04b94af4
AD
1052 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 init_data.flags.fbc_support = true;
1054
d99f38ae
AD
1055 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 init_data.flags.multi_mon_pp_mclk_switch = true;
1057
eaf56410
LL
1058 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 init_data.flags.disable_fractional_pwm = true;
1060
27eaa492 1061 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1062
4562236b
HW
1063 /* Display Core create. */
1064 adev->dm.dc = dc_create(&init_data);
1065
423788c7 1066 if (adev->dm.dc) {
76121231 1067 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1068 } else {
76121231 1069 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1070 goto error;
1071 }
4562236b 1072
8a791dab
HW
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1076 }
1077
f99d8762
HW
1078 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1080
8a791dab
HW
1081 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082 adev->dm.dc->debug.disable_stutter = true;
1083
1084 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085 adev->dm.dc->debug.disable_dsc = true;
1086
1087 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088 adev->dm.dc->debug.disable_clock_gate = true;
1089
743b9786
NK
1090 r = dm_dmub_hw_init(adev);
1091 if (r) {
1092 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093 goto error;
1094 }
1095
bb6785c1
NK
1096 dc_hardware_init(adev->dm.dc);
1097
0b08c54b 1098#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1099 if (adev->apu_flags) {
e6cd859d
AD
1100 struct dc_phy_addr_space_config pa_config;
1101
0b08c54b 1102 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1103
0b08c54b
YZ
1104 // Call the DC init_memory func
1105 dc_setup_system_context(adev->dm.dc, &pa_config);
1106 }
1107#endif
c0fb85ae 1108
4562236b
HW
1109 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 if (!adev->dm.freesync_module) {
1111 DRM_ERROR(
1112 "amdgpu: failed to initialize freesync_module.\n");
1113 } else
f1ad2f5e 1114 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1115 adev->dm.freesync_module);
1116
e277adc5
LSL
1117 amdgpu_dm_init_color_mod();
1118
ea3b4242
QZ
1119#if defined(CONFIG_DRM_AMD_DC_DCN)
1120 if (adev->dm.dc->caps.max_links > 0) {
1121 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1122
1123 if (!adev->dm.vblank_workqueue)
1124 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1125 else
1126 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1127 }
1128#endif
1129
52704fca 1130#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1131 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1132 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1133
96a3b32e
BL
1134 if (!adev->dm.hdcp_workqueue)
1135 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1136 else
1137 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1138
96a3b32e
BL
1139 dc_init_callbacks(adev->dm.dc, &init_params);
1140 }
52704fca 1141#endif
4562236b
HW
1142 if (amdgpu_dm_initialize_drm_device(adev)) {
1143 DRM_ERROR(
1144 "amdgpu: failed to initialize sw for display support.\n");
1145 goto error;
1146 }
1147
f74367e4
AD
1148 /* create fake encoders for MST */
1149 dm_dp_create_fake_mst_encoders(adev);
1150
4562236b
HW
1151 /* TODO: Add_display_info? */
1152
1153 /* TODO use dynamic cursor width */
4a580877
LT
1154 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1156
4a580877 1157 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1158 DRM_ERROR(
1159 "amdgpu: failed to initialize sw for display support.\n");
1160 goto error;
1161 }
1162
c0fb85ae 1163
f1ad2f5e 1164 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1165
1166 return 0;
1167error:
1168 amdgpu_dm_fini(adev);
1169
59d0f396 1170 return -EINVAL;
4562236b
HW
1171}
1172
7578ecda 1173static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1174{
f74367e4
AD
1175 int i;
1176
1177 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1179 }
1180
6ce8f316
NK
1181 amdgpu_dm_audio_fini(adev);
1182
4562236b 1183 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1184
52704fca
BL
1185#ifdef CONFIG_DRM_AMD_DC_HDCP
1186 if (adev->dm.hdcp_workqueue) {
e96b1b29 1187 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1188 adev->dm.hdcp_workqueue = NULL;
1189 }
1190
1191 if (adev->dm.dc)
1192 dc_deinit_callbacks(adev->dm.dc);
1193#endif
9a71c7d3
NK
1194 if (adev->dm.dc->ctx->dmub_srv) {
1195 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196 adev->dm.dc->ctx->dmub_srv = NULL;
1197 }
1198
743b9786
NK
1199 if (adev->dm.dmub_bo)
1200 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201 &adev->dm.dmub_bo_gpu_addr,
1202 &adev->dm.dmub_bo_cpu_addr);
52704fca 1203
c8bdf2b6
ED
1204 /* DC Destroy TODO: Replace destroy DAL */
1205 if (adev->dm.dc)
1206 dc_destroy(&adev->dm.dc);
4562236b
HW
1207 /*
1208 * TODO: pageflip, vlank interrupt
1209 *
1210 * amdgpu_dm_irq_fini(adev);
1211 */
1212
1213 if (adev->dm.cgs_device) {
1214 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215 adev->dm.cgs_device = NULL;
1216 }
1217 if (adev->dm.freesync_module) {
1218 mod_freesync_destroy(adev->dm.freesync_module);
1219 adev->dm.freesync_module = NULL;
1220 }
674e78ac 1221
6ce8f316 1222 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1223 mutex_destroy(&adev->dm.dc_lock);
1224
4562236b
HW
1225 return;
1226}
1227
a94d5569 1228static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1229{
a7669aff 1230 const char *fw_name_dmcu = NULL;
a94d5569
DF
1231 int r;
1232 const struct dmcu_firmware_header_v1_0 *hdr;
1233
1234 switch(adev->asic_type) {
55e56389
MR
1235#if defined(CONFIG_DRM_AMD_DC_SI)
1236 case CHIP_TAHITI:
1237 case CHIP_PITCAIRN:
1238 case CHIP_VERDE:
1239 case CHIP_OLAND:
1240#endif
a94d5569
DF
1241 case CHIP_BONAIRE:
1242 case CHIP_HAWAII:
1243 case CHIP_KAVERI:
1244 case CHIP_KABINI:
1245 case CHIP_MULLINS:
1246 case CHIP_TONGA:
1247 case CHIP_FIJI:
1248 case CHIP_CARRIZO:
1249 case CHIP_STONEY:
1250 case CHIP_POLARIS11:
1251 case CHIP_POLARIS10:
1252 case CHIP_POLARIS12:
1253 case CHIP_VEGAM:
1254 case CHIP_VEGA10:
1255 case CHIP_VEGA12:
1256 case CHIP_VEGA20:
476e955d 1257 case CHIP_NAVI10:
baebcf2e 1258 case CHIP_NAVI14:
30221ad8 1259 case CHIP_RENOIR:
79037324 1260 case CHIP_SIENNA_CICHLID:
a6c5308f 1261 case CHIP_NAVY_FLOUNDER:
2a411205 1262 case CHIP_DIMGREY_CAVEFISH:
469989ca 1263 case CHIP_VANGOGH:
a94d5569 1264 return 0;
5ea23931
RL
1265 case CHIP_NAVI12:
1266 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267 break;
a94d5569 1268 case CHIP_RAVEN:
a7669aff
HW
1269 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1273 else
a7669aff 1274 return 0;
a94d5569
DF
1275 break;
1276 default:
1277 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1278 return -EINVAL;
a94d5569
DF
1279 }
1280
1281 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1283 return 0;
1284 }
1285
1286 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1287 if (r == -ENOENT) {
1288 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290 adev->dm.fw_dmcu = NULL;
1291 return 0;
1292 }
1293 if (r) {
1294 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1295 fw_name_dmcu);
1296 return r;
1297 }
1298
1299 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1300 if (r) {
1301 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1302 fw_name_dmcu);
1303 release_firmware(adev->dm.fw_dmcu);
1304 adev->dm.fw_dmcu = NULL;
1305 return r;
1306 }
1307
1308 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311 adev->firmware.fw_size +=
1312 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1313
1314 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316 adev->firmware.fw_size +=
1317 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1318
ee6e89c0
DF
1319 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320
a94d5569
DF
1321 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1322
4562236b
HW
1323 return 0;
1324}
1325
743b9786
NK
1326static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1327{
1328 struct amdgpu_device *adev = ctx;
1329
1330 return dm_read_reg(adev->dm.dc->ctx, address);
1331}
1332
1333static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334 uint32_t value)
1335{
1336 struct amdgpu_device *adev = ctx;
1337
1338 return dm_write_reg(adev->dm.dc->ctx, address, value);
1339}
1340
1341static int dm_dmub_sw_init(struct amdgpu_device *adev)
1342{
1343 struct dmub_srv_create_params create_params;
8c7aea40
NK
1344 struct dmub_srv_region_params region_params;
1345 struct dmub_srv_region_info region_info;
1346 struct dmub_srv_fb_params fb_params;
1347 struct dmub_srv_fb_info *fb_info;
1348 struct dmub_srv *dmub_srv;
743b9786
NK
1349 const struct dmcub_firmware_header_v1_0 *hdr;
1350 const char *fw_name_dmub;
1351 enum dmub_asic dmub_asic;
1352 enum dmub_status status;
1353 int r;
1354
1355 switch (adev->asic_type) {
1356 case CHIP_RENOIR:
1357 dmub_asic = DMUB_ASIC_DCN21;
1358 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1359 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1361 break;
79037324
BL
1362 case CHIP_SIENNA_CICHLID:
1363 dmub_asic = DMUB_ASIC_DCN30;
1364 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1365 break;
5ce868fc
BL
1366 case CHIP_NAVY_FLOUNDER:
1367 dmub_asic = DMUB_ASIC_DCN30;
1368 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1369 break;
469989ca
RL
1370 case CHIP_VANGOGH:
1371 dmub_asic = DMUB_ASIC_DCN301;
1372 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1373 break;
2a411205
BL
1374 case CHIP_DIMGREY_CAVEFISH:
1375 dmub_asic = DMUB_ASIC_DCN302;
1376 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1377 break;
743b9786
NK
1378
1379 default:
1380 /* ASIC doesn't support DMUB. */
1381 return 0;
1382 }
1383
743b9786
NK
1384 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1385 if (r) {
1386 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1387 return 0;
1388 }
1389
1390 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1391 if (r) {
1392 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1393 return 0;
1394 }
1395
743b9786 1396 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1397
9a6ed547
NK
1398 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400 AMDGPU_UCODE_ID_DMCUB;
1401 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1402 adev->dm.dmub_fw;
1403 adev->firmware.fw_size +=
1404 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1405
9a6ed547
NK
1406 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407 adev->dm.dmcub_fw_version);
1408 }
1409
1410 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1411
8c7aea40
NK
1412 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413 dmub_srv = adev->dm.dmub_srv;
1414
1415 if (!dmub_srv) {
1416 DRM_ERROR("Failed to allocate DMUB service!\n");
1417 return -ENOMEM;
1418 }
1419
1420 memset(&create_params, 0, sizeof(create_params));
1421 create_params.user_ctx = adev;
1422 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424 create_params.asic = dmub_asic;
1425
1426 /* Create the DMUB service. */
1427 status = dmub_srv_create(dmub_srv, &create_params);
1428 if (status != DMUB_STATUS_OK) {
1429 DRM_ERROR("Error creating DMUB service: %d\n", status);
1430 return -EINVAL;
1431 }
1432
1433 /* Calculate the size of all the regions for the DMUB service. */
1434 memset(&region_params, 0, sizeof(region_params));
1435
1436 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439 region_params.vbios_size = adev->bios_size;
0922b899 1440 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1441 adev->dm.dmub_fw->data +
1442 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1443 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1444 region_params.fw_inst_const =
1445 adev->dm.dmub_fw->data +
1446 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1447 PSP_HEADER_BYTES;
8c7aea40
NK
1448
1449 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1450 &region_info);
1451
1452 if (status != DMUB_STATUS_OK) {
1453 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1454 return -EINVAL;
1455 }
1456
1457 /*
1458 * Allocate a framebuffer based on the total size of all the regions.
1459 * TODO: Move this into GART.
1460 */
1461 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463 &adev->dm.dmub_bo_gpu_addr,
1464 &adev->dm.dmub_bo_cpu_addr);
1465 if (r)
1466 return r;
1467
1468 /* Rebase the regions on the framebuffer address. */
1469 memset(&fb_params, 0, sizeof(fb_params));
1470 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472 fb_params.region_info = &region_info;
1473
1474 adev->dm.dmub_fb_info =
1475 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476 fb_info = adev->dm.dmub_fb_info;
1477
1478 if (!fb_info) {
1479 DRM_ERROR(
1480 "Failed to allocate framebuffer info for DMUB service!\n");
1481 return -ENOMEM;
1482 }
1483
1484 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485 if (status != DMUB_STATUS_OK) {
1486 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1487 return -EINVAL;
1488 }
1489
743b9786
NK
1490 return 0;
1491}
1492
a94d5569
DF
1493static int dm_sw_init(void *handle)
1494{
1495 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1496 int r;
1497
1498 r = dm_dmub_sw_init(adev);
1499 if (r)
1500 return r;
a94d5569
DF
1501
1502 return load_dmcu_fw(adev);
1503}
1504
4562236b
HW
1505static int dm_sw_fini(void *handle)
1506{
a94d5569
DF
1507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508
8c7aea40
NK
1509 kfree(adev->dm.dmub_fb_info);
1510 adev->dm.dmub_fb_info = NULL;
1511
743b9786
NK
1512 if (adev->dm.dmub_srv) {
1513 dmub_srv_destroy(adev->dm.dmub_srv);
1514 adev->dm.dmub_srv = NULL;
1515 }
1516
75e1658e
ND
1517 release_firmware(adev->dm.dmub_fw);
1518 adev->dm.dmub_fw = NULL;
743b9786 1519
75e1658e
ND
1520 release_firmware(adev->dm.fw_dmcu);
1521 adev->dm.fw_dmcu = NULL;
a94d5569 1522
4562236b
HW
1523 return 0;
1524}
1525
7abcf6b5 1526static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1527{
c84dec2f 1528 struct amdgpu_dm_connector *aconnector;
4562236b 1529 struct drm_connector *connector;
f8d2d39e 1530 struct drm_connector_list_iter iter;
7abcf6b5 1531 int ret = 0;
4562236b 1532
f8d2d39e
LP
1533 drm_connector_list_iter_begin(dev, &iter);
1534 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1535 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1536 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537 aconnector->mst_mgr.aux) {
f1ad2f5e 1538 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1539 aconnector,
1540 aconnector->base.base.id);
7abcf6b5
AG
1541
1542 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1543 if (ret < 0) {
1544 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1545 aconnector->dc_link->type =
1546 dc_connection_single;
1547 break;
7abcf6b5 1548 }
f8d2d39e 1549 }
4562236b 1550 }
f8d2d39e 1551 drm_connector_list_iter_end(&iter);
4562236b 1552
7abcf6b5
AG
1553 return ret;
1554}
1555
1556static int dm_late_init(void *handle)
1557{
42e67c3b 1558 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1559
bbf854dc
DF
1560 struct dmcu_iram_parameters params;
1561 unsigned int linear_lut[16];
1562 int i;
17bdb4a8 1563 struct dmcu *dmcu = NULL;
5cb32419 1564 bool ret = true;
bbf854dc 1565
17bdb4a8
JFZ
1566 dmcu = adev->dm.dc->res_pool->dmcu;
1567
bbf854dc
DF
1568 for (i = 0; i < 16; i++)
1569 linear_lut[i] = 0xFFFF * i / 15;
1570
1571 params.set = 0;
1572 params.backlight_ramping_start = 0xCCCC;
1573 params.backlight_ramping_reduction = 0xCCCCCCCC;
1574 params.backlight_lut_array_size = 16;
1575 params.backlight_lut_array = linear_lut;
1576
2ad0cdf9
AK
1577 /* Min backlight level after ABM reduction, Don't allow below 1%
1578 * 0xFFFF x 0.01 = 0x28F
1579 */
1580 params.min_abm_backlight = 0x28F;
1581
5cb32419
RL
1582 /* In the case where abm is implemented on dmcub,
1583 * dmcu object will be null.
1584 * ABM 2.4 and up are implemented on dmcub.
1585 */
1586 if (dmcu)
1587 ret = dmcu_load_iram(dmcu, params);
1588 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1589 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1590
14ed1c90
HW
1591 if (!ret)
1592 return -EINVAL;
bbf854dc 1593
4a580877 1594 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1595}
1596
1597static void s3_handle_mst(struct drm_device *dev, bool suspend)
1598{
c84dec2f 1599 struct amdgpu_dm_connector *aconnector;
4562236b 1600 struct drm_connector *connector;
f8d2d39e 1601 struct drm_connector_list_iter iter;
fe7553be
LP
1602 struct drm_dp_mst_topology_mgr *mgr;
1603 int ret;
1604 bool need_hotplug = false;
4562236b 1605
f8d2d39e
LP
1606 drm_connector_list_iter_begin(dev, &iter);
1607 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1608 aconnector = to_amdgpu_dm_connector(connector);
1609 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610 aconnector->mst_port)
1611 continue;
1612
1613 mgr = &aconnector->mst_mgr;
1614
1615 if (suspend) {
1616 drm_dp_mst_topology_mgr_suspend(mgr);
1617 } else {
6f85f738 1618 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1619 if (ret < 0) {
1620 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621 need_hotplug = true;
1622 }
1623 }
4562236b 1624 }
f8d2d39e 1625 drm_connector_list_iter_end(&iter);
fe7553be
LP
1626
1627 if (need_hotplug)
1628 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1629}
1630
9340dfd3
HW
1631static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1632{
1633 struct smu_context *smu = &adev->smu;
1634 int ret = 0;
1635
1636 if (!is_support_sw_smu(adev))
1637 return 0;
1638
1639 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640 * on window driver dc implementation.
1641 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642 * should be passed to smu during boot up and resume from s3.
1643 * boot up: dc calculate dcn watermark clock settings within dc_create,
1644 * dcn20_resource_construct
1645 * then call pplib functions below to pass the settings to smu:
1646 * smu_set_watermarks_for_clock_ranges
1647 * smu_set_watermarks_table
1648 * navi10_set_watermarks_table
1649 * smu_write_watermarks_table
1650 *
1651 * For Renoir, clock settings of dcn watermark are also fixed values.
1652 * dc has implemented different flow for window driver:
1653 * dc_hardware_init / dc_set_power_state
1654 * dcn10_init_hw
1655 * notify_wm_ranges
1656 * set_wm_ranges
1657 * -- Linux
1658 * smu_set_watermarks_for_clock_ranges
1659 * renoir_set_watermarks_table
1660 * smu_write_watermarks_table
1661 *
1662 * For Linux,
1663 * dc_hardware_init -> amdgpu_dm_init
1664 * dc_set_power_state --> dm_resume
1665 *
1666 * therefore, this function apply to navi10/12/14 but not Renoir
1667 * *
1668 */
1669 switch(adev->asic_type) {
1670 case CHIP_NAVI10:
1671 case CHIP_NAVI14:
1672 case CHIP_NAVI12:
1673 break;
1674 default:
1675 return 0;
1676 }
1677
e7a95eea
EQ
1678 ret = smu_write_watermarks_table(smu);
1679 if (ret) {
1680 DRM_ERROR("Failed to update WMTABLE!\n");
1681 return ret;
9340dfd3
HW
1682 }
1683
9340dfd3
HW
1684 return 0;
1685}
1686
b8592b48
LL
1687/**
1688 * dm_hw_init() - Initialize DC device
28d687ea 1689 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1690 *
1691 * Initialize the &struct amdgpu_display_manager device. This involves calling
1692 * the initializers of each DM component, then populating the struct with them.
1693 *
1694 * Although the function implies hardware initialization, both hardware and
1695 * software are initialized here. Splitting them out to their relevant init
1696 * hooks is a future TODO item.
1697 *
1698 * Some notable things that are initialized here:
1699 *
1700 * - Display Core, both software and hardware
1701 * - DC modules that we need (freesync and color management)
1702 * - DRM software states
1703 * - Interrupt sources and handlers
1704 * - Vblank support
1705 * - Debug FS entries, if enabled
1706 */
4562236b
HW
1707static int dm_hw_init(void *handle)
1708{
1709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 /* Create DAL display manager */
1711 amdgpu_dm_init(adev);
4562236b
HW
1712 amdgpu_dm_hpd_init(adev);
1713
4562236b
HW
1714 return 0;
1715}
1716
b8592b48
LL
1717/**
1718 * dm_hw_fini() - Teardown DC device
28d687ea 1719 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1720 *
1721 * Teardown components within &struct amdgpu_display_manager that require
1722 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723 * were loaded. Also flush IRQ workqueues and disable them.
1724 */
4562236b
HW
1725static int dm_hw_fini(void *handle)
1726{
1727 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728
1729 amdgpu_dm_hpd_fini(adev);
1730
1731 amdgpu_dm_irq_fini(adev);
21de3396 1732 amdgpu_dm_fini(adev);
4562236b
HW
1733 return 0;
1734}
1735
cdaae837
BL
1736
1737static int dm_enable_vblank(struct drm_crtc *crtc);
1738static void dm_disable_vblank(struct drm_crtc *crtc);
1739
1740static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741 struct dc_state *state, bool enable)
1742{
1743 enum dc_irq_source irq_source;
1744 struct amdgpu_crtc *acrtc;
1745 int rc = -EBUSY;
1746 int i = 0;
1747
1748 for (i = 0; i < state->stream_count; i++) {
1749 acrtc = get_crtc_by_otg_inst(
1750 adev, state->stream_status[i].primary_otg_inst);
1751
1752 if (acrtc && state->stream_status[i].plane_count != 0) {
1753 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756 acrtc->crtc_id, enable ? "en" : "dis", rc);
1757 if (rc)
1758 DRM_WARN("Failed to %s pflip interrupts\n",
1759 enable ? "enable" : "disable");
1760
1761 if (enable) {
1762 rc = dm_enable_vblank(&acrtc->base);
1763 if (rc)
1764 DRM_WARN("Failed to enable vblank interrupts\n");
1765 } else {
1766 dm_disable_vblank(&acrtc->base);
1767 }
1768
1769 }
1770 }
1771
1772}
1773
dfd84d90 1774static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1775{
1776 struct dc_state *context = NULL;
1777 enum dc_status res = DC_ERROR_UNEXPECTED;
1778 int i;
1779 struct dc_stream_state *del_streams[MAX_PIPES];
1780 int del_streams_count = 0;
1781
1782 memset(del_streams, 0, sizeof(del_streams));
1783
1784 context = dc_create_state(dc);
1785 if (context == NULL)
1786 goto context_alloc_fail;
1787
1788 dc_resource_state_copy_construct_current(dc, context);
1789
1790 /* First remove from context all streams */
1791 for (i = 0; i < context->stream_count; i++) {
1792 struct dc_stream_state *stream = context->streams[i];
1793
1794 del_streams[del_streams_count++] = stream;
1795 }
1796
1797 /* Remove all planes for removed streams and then remove the streams */
1798 for (i = 0; i < del_streams_count; i++) {
1799 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800 res = DC_FAIL_DETACH_SURFACES;
1801 goto fail;
1802 }
1803
1804 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1805 if (res != DC_OK)
1806 goto fail;
1807 }
1808
1809
1810 res = dc_validate_global_state(dc, context, false);
1811
1812 if (res != DC_OK) {
1813 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1814 goto fail;
1815 }
1816
1817 res = dc_commit_state(dc, context);
1818
1819fail:
1820 dc_release_state(context);
1821
1822context_alloc_fail:
1823 return res;
1824}
1825
4562236b
HW
1826static int dm_suspend(void *handle)
1827{
1828 struct amdgpu_device *adev = handle;
1829 struct amdgpu_display_manager *dm = &adev->dm;
1830 int ret = 0;
4562236b 1831
53b3f8f4 1832 if (amdgpu_in_reset(adev)) {
cdaae837 1833 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1834
1835#if defined(CONFIG_DRM_AMD_DC_DCN)
1836 dc_allow_idle_optimizations(adev->dm.dc, false);
1837#endif
1838
cdaae837
BL
1839 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1840
1841 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1842
1843 amdgpu_dm_commit_zero_streams(dm->dc);
1844
1845 amdgpu_dm_irq_suspend(adev);
1846
1847 return ret;
1848 }
4562236b 1849
d2f0b53b 1850 WARN_ON(adev->dm.cached_state);
4a580877 1851 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1852
4a580877 1853 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1854
4562236b
HW
1855 amdgpu_dm_irq_suspend(adev);
1856
a3621485 1857
32f5062d 1858 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1859
1c2075d4 1860 return 0;
4562236b
HW
1861}
1862
1daf8c63
AD
1863static struct amdgpu_dm_connector *
1864amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865 struct drm_crtc *crtc)
4562236b
HW
1866{
1867 uint32_t i;
c2cea706 1868 struct drm_connector_state *new_con_state;
4562236b
HW
1869 struct drm_connector *connector;
1870 struct drm_crtc *crtc_from_state;
1871
c2cea706
LSL
1872 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873 crtc_from_state = new_con_state->crtc;
4562236b
HW
1874
1875 if (crtc_from_state == crtc)
c84dec2f 1876 return to_amdgpu_dm_connector(connector);
4562236b
HW
1877 }
1878
1879 return NULL;
1880}
1881
fbbdadf2
BL
1882static void emulated_link_detect(struct dc_link *link)
1883{
1884 struct dc_sink_init_data sink_init_data = { 0 };
1885 struct display_sink_capability sink_caps = { 0 };
1886 enum dc_edid_status edid_status;
1887 struct dc_context *dc_ctx = link->ctx;
1888 struct dc_sink *sink = NULL;
1889 struct dc_sink *prev_sink = NULL;
1890
1891 link->type = dc_connection_none;
1892 prev_sink = link->local_sink;
1893
30164a16
VL
1894 if (prev_sink)
1895 dc_sink_release(prev_sink);
fbbdadf2
BL
1896
1897 switch (link->connector_signal) {
1898 case SIGNAL_TYPE_HDMI_TYPE_A: {
1899 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1901 break;
1902 }
1903
1904 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1907 break;
1908 }
1909
1910 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1913 break;
1914 }
1915
1916 case SIGNAL_TYPE_LVDS: {
1917 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918 sink_caps.signal = SIGNAL_TYPE_LVDS;
1919 break;
1920 }
1921
1922 case SIGNAL_TYPE_EDP: {
1923 sink_caps.transaction_type =
1924 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925 sink_caps.signal = SIGNAL_TYPE_EDP;
1926 break;
1927 }
1928
1929 case SIGNAL_TYPE_DISPLAY_PORT: {
1930 sink_caps.transaction_type =
1931 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1933 break;
1934 }
1935
1936 default:
1937 DC_ERROR("Invalid connector type! signal:%d\n",
1938 link->connector_signal);
1939 return;
1940 }
1941
1942 sink_init_data.link = link;
1943 sink_init_data.sink_signal = sink_caps.signal;
1944
1945 sink = dc_sink_create(&sink_init_data);
1946 if (!sink) {
1947 DC_ERROR("Failed to create sink!\n");
1948 return;
1949 }
1950
dcd5fb82 1951 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1952 link->local_sink = sink;
1953
1954 edid_status = dm_helpers_read_local_edid(
1955 link->ctx,
1956 link,
1957 sink);
1958
1959 if (edid_status != EDID_OK)
1960 DC_ERROR("Failed to read EDID");
1961
1962}
1963
cdaae837
BL
1964static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965 struct amdgpu_display_manager *dm)
1966{
1967 struct {
1968 struct dc_surface_update surface_updates[MAX_SURFACES];
1969 struct dc_plane_info plane_infos[MAX_SURFACES];
1970 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972 struct dc_stream_update stream_update;
1973 } * bundle;
1974 int k, m;
1975
1976 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1977
1978 if (!bundle) {
1979 dm_error("Failed to allocate update bundle\n");
1980 goto cleanup;
1981 }
1982
1983 for (k = 0; k < dc_state->stream_count; k++) {
1984 bundle->stream_update.stream = dc_state->streams[k];
1985
1986 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987 bundle->surface_updates[m].surface =
1988 dc_state->stream_status->plane_states[m];
1989 bundle->surface_updates[m].surface->force_full_update =
1990 true;
1991 }
1992 dc_commit_updates_for_stream(
1993 dm->dc, bundle->surface_updates,
1994 dc_state->stream_status->plane_count,
efc8278e 1995 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
1996 }
1997
1998cleanup:
1999 kfree(bundle);
2000
2001 return;
2002}
2003
3c4d55c9
AP
2004static void dm_set_dpms_off(struct dc_link *link)
2005{
2006 struct dc_stream_state *stream_state;
2007 struct amdgpu_dm_connector *aconnector = link->priv;
2008 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009 struct dc_stream_update stream_update;
2010 bool dpms_off = true;
2011
2012 memset(&stream_update, 0, sizeof(stream_update));
2013 stream_update.dpms_off = &dpms_off;
2014
2015 mutex_lock(&adev->dm.dc_lock);
2016 stream_state = dc_stream_find_from_link(link);
2017
2018 if (stream_state == NULL) {
2019 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020 mutex_unlock(&adev->dm.dc_lock);
2021 return;
2022 }
2023
2024 stream_update.stream = stream_state;
2025 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2026 stream_state, &stream_update,
2027 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2028 mutex_unlock(&adev->dm.dc_lock);
2029}
2030
4562236b
HW
2031static int dm_resume(void *handle)
2032{
2033 struct amdgpu_device *adev = handle;
4a580877 2034 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2035 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2036 struct amdgpu_dm_connector *aconnector;
4562236b 2037 struct drm_connector *connector;
f8d2d39e 2038 struct drm_connector_list_iter iter;
4562236b 2039 struct drm_crtc *crtc;
c2cea706 2040 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2041 struct dm_crtc_state *dm_new_crtc_state;
2042 struct drm_plane *plane;
2043 struct drm_plane_state *new_plane_state;
2044 struct dm_plane_state *dm_new_plane_state;
113b7a01 2045 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2046 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2047 struct dc_state *dc_state;
2048 int i, r, j;
4562236b 2049
53b3f8f4 2050 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2051 dc_state = dm->cached_dc_state;
2052
2053 r = dm_dmub_hw_init(adev);
2054 if (r)
2055 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2056
2057 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2058 dc_resume(dm->dc);
2059
2060 amdgpu_dm_irq_resume_early(adev);
2061
2062 for (i = 0; i < dc_state->stream_count; i++) {
2063 dc_state->streams[i]->mode_changed = true;
2064 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065 dc_state->stream_status->plane_states[j]->update_flags.raw
2066 = 0xffffffff;
2067 }
2068 }
2069
2070 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2071
cdaae837
BL
2072 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2073
2074 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2075
2076 dc_release_state(dm->cached_dc_state);
2077 dm->cached_dc_state = NULL;
2078
2079 amdgpu_dm_irq_resume_late(adev);
2080
2081 mutex_unlock(&dm->dc_lock);
2082
2083 return 0;
2084 }
113b7a01
LL
2085 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086 dc_release_state(dm_state->context);
2087 dm_state->context = dc_create_state(dm->dc);
2088 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089 dc_resource_state_construct(dm->dc, dm_state->context);
2090
8c7aea40
NK
2091 /* Before powering on DC we need to re-initialize DMUB. */
2092 r = dm_dmub_hw_init(adev);
2093 if (r)
2094 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2095
a80aa93d
ML
2096 /* power on hardware */
2097 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2098
4562236b
HW
2099 /* program HPD filter */
2100 dc_resume(dm->dc);
2101
4562236b
HW
2102 /*
2103 * early enable HPD Rx IRQ, should be done before set mode as short
2104 * pulse interrupts are used for MST
2105 */
2106 amdgpu_dm_irq_resume_early(adev);
2107
d20ebea8 2108 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2109 s3_handle_mst(ddev, false);
2110
4562236b 2111 /* Do detection*/
f8d2d39e
LP
2112 drm_connector_list_iter_begin(ddev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2114 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2115
2116 /*
2117 * this is the case when traversing through already created
2118 * MST connectors, should be skipped
2119 */
2120 if (aconnector->mst_port)
2121 continue;
2122
03ea364c 2123 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2124 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125 DRM_ERROR("KMS: Failed to detect connector\n");
2126
2127 if (aconnector->base.force && new_connection_type == dc_connection_none)
2128 emulated_link_detect(aconnector->dc_link);
2129 else
2130 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2131
2132 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133 aconnector->fake_enable = false;
2134
dcd5fb82
MF
2135 if (aconnector->dc_sink)
2136 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2137 aconnector->dc_sink = NULL;
2138 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2139 mutex_unlock(&aconnector->hpd_lock);
4562236b 2140 }
f8d2d39e 2141 drm_connector_list_iter_end(&iter);
4562236b 2142
1f6010a9 2143 /* Force mode set in atomic commit */
a80aa93d 2144 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2145 new_crtc_state->active_changed = true;
4f346e65 2146
fcb4019e
LSL
2147 /*
2148 * atomic_check is expected to create the dc states. We need to release
2149 * them here, since they were duplicated as part of the suspend
2150 * procedure.
2151 */
a80aa93d 2152 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2153 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154 if (dm_new_crtc_state->stream) {
2155 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156 dc_stream_release(dm_new_crtc_state->stream);
2157 dm_new_crtc_state->stream = NULL;
2158 }
2159 }
2160
a80aa93d 2161 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2162 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163 if (dm_new_plane_state->dc_state) {
2164 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165 dc_plane_state_release(dm_new_plane_state->dc_state);
2166 dm_new_plane_state->dc_state = NULL;
2167 }
2168 }
2169
2d1af6a1 2170 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2171
a80aa93d 2172 dm->cached_state = NULL;
0a214e2f 2173
9faa4237 2174 amdgpu_dm_irq_resume_late(adev);
4562236b 2175
9340dfd3
HW
2176 amdgpu_dm_smu_write_watermarks_table(adev);
2177
2d1af6a1 2178 return 0;
4562236b
HW
2179}
2180
b8592b48
LL
2181/**
2182 * DOC: DM Lifecycle
2183 *
2184 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186 * the base driver's device list to be initialized and torn down accordingly.
2187 *
2188 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2189 */
2190
4562236b
HW
2191static const struct amd_ip_funcs amdgpu_dm_funcs = {
2192 .name = "dm",
2193 .early_init = dm_early_init,
7abcf6b5 2194 .late_init = dm_late_init,
4562236b
HW
2195 .sw_init = dm_sw_init,
2196 .sw_fini = dm_sw_fini,
2197 .hw_init = dm_hw_init,
2198 .hw_fini = dm_hw_fini,
2199 .suspend = dm_suspend,
2200 .resume = dm_resume,
2201 .is_idle = dm_is_idle,
2202 .wait_for_idle = dm_wait_for_idle,
2203 .check_soft_reset = dm_check_soft_reset,
2204 .soft_reset = dm_soft_reset,
2205 .set_clockgating_state = dm_set_clockgating_state,
2206 .set_powergating_state = dm_set_powergating_state,
2207};
2208
2209const struct amdgpu_ip_block_version dm_ip_block =
2210{
2211 .type = AMD_IP_BLOCK_TYPE_DCE,
2212 .major = 1,
2213 .minor = 0,
2214 .rev = 0,
2215 .funcs = &amdgpu_dm_funcs,
2216};
2217
ca3268c4 2218
b8592b48
LL
2219/**
2220 * DOC: atomic
2221 *
2222 * *WIP*
2223 */
0a323b84 2224
b3663f70 2225static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2226 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2227 .get_format_info = amd_get_format_info,
366c1baa 2228 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2229 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2230 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2231};
2232
2233static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2235};
2236
94562810
RS
2237static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2238{
2239 u32 max_cll, min_cll, max, min, q, r;
2240 struct amdgpu_dm_backlight_caps *caps;
2241 struct amdgpu_display_manager *dm;
2242 struct drm_connector *conn_base;
2243 struct amdgpu_device *adev;
ec11fe37 2244 struct dc_link *link = NULL;
94562810
RS
2245 static const u8 pre_computed_values[] = {
2246 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2248
2249 if (!aconnector || !aconnector->dc_link)
2250 return;
2251
ec11fe37 2252 link = aconnector->dc_link;
2253 if (link->connector_signal != SIGNAL_TYPE_EDP)
2254 return;
2255
94562810 2256 conn_base = &aconnector->base;
1348969a 2257 adev = drm_to_adev(conn_base->dev);
94562810
RS
2258 dm = &adev->dm;
2259 caps = &dm->backlight_caps;
2260 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261 caps->aux_support = false;
2262 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2264
2265 if (caps->ext_caps->bits.oled == 1 ||
2266 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268 caps->aux_support = true;
2269
7a46f05e
TI
2270 if (amdgpu_backlight == 0)
2271 caps->aux_support = false;
2272 else if (amdgpu_backlight == 1)
2273 caps->aux_support = true;
2274
94562810
RS
2275 /* From the specification (CTA-861-G), for calculating the maximum
2276 * luminance we need to use:
2277 * Luminance = 50*2**(CV/32)
2278 * Where CV is a one-byte value.
2279 * For calculating this expression we may need float point precision;
2280 * to avoid this complexity level, we take advantage that CV is divided
2281 * by a constant. From the Euclids division algorithm, we know that CV
2282 * can be written as: CV = 32*q + r. Next, we replace CV in the
2283 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2284 * need to pre-compute the value of r/32. For pre-computing the values
2285 * We just used the following Ruby line:
2286 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2287 * The results of the above expressions can be verified at
2288 * pre_computed_values.
2289 */
2290 q = max_cll >> 5;
2291 r = max_cll % 32;
2292 max = (1 << q) * pre_computed_values[r];
2293
2294 // min luminance: maxLum * (CV/255)^2 / 100
2295 q = DIV_ROUND_CLOSEST(min_cll, 255);
2296 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2297
2298 caps->aux_max_input_signal = max;
2299 caps->aux_min_input_signal = min;
2300}
2301
97e51c16
HW
2302void amdgpu_dm_update_connector_after_detect(
2303 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2304{
2305 struct drm_connector *connector = &aconnector->base;
2306 struct drm_device *dev = connector->dev;
b73a22d3 2307 struct dc_sink *sink;
4562236b
HW
2308
2309 /* MST handled by drm_mst framework */
2310 if (aconnector->mst_mgr.mst_state == true)
2311 return;
2312
4562236b 2313 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2314 if (sink)
2315 dc_sink_retain(sink);
4562236b 2316
1f6010a9
DF
2317 /*
2318 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2319 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2320 * Skip if already done during boot.
4562236b
HW
2321 */
2322 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2323 && aconnector->dc_em_sink) {
2324
1f6010a9
DF
2325 /*
2326 * For S3 resume with headless use eml_sink to fake stream
2327 * because on resume connector->sink is set to NULL
4562236b
HW
2328 */
2329 mutex_lock(&dev->mode_config.mutex);
2330
2331 if (sink) {
922aa1e1 2332 if (aconnector->dc_sink) {
98e6436d 2333 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2334 /*
2335 * retain and release below are used to
2336 * bump up refcount for sink because the link doesn't point
2337 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2338 * reshuffle by UMD we will get into unwanted dc_sink release
2339 */
dcd5fb82 2340 dc_sink_release(aconnector->dc_sink);
922aa1e1 2341 }
4562236b 2342 aconnector->dc_sink = sink;
dcd5fb82 2343 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2344 amdgpu_dm_update_freesync_caps(connector,
2345 aconnector->edid);
4562236b 2346 } else {
98e6436d 2347 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2348 if (!aconnector->dc_sink) {
4562236b 2349 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2350 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2351 }
4562236b
HW
2352 }
2353
2354 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2355
2356 if (sink)
2357 dc_sink_release(sink);
4562236b
HW
2358 return;
2359 }
2360
2361 /*
2362 * TODO: temporary guard to look for proper fix
2363 * if this sink is MST sink, we should not do anything
2364 */
dcd5fb82
MF
2365 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2366 dc_sink_release(sink);
4562236b 2367 return;
dcd5fb82 2368 }
4562236b
HW
2369
2370 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2371 /*
2372 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2373 * Do nothing!!
2374 */
f1ad2f5e 2375 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2376 aconnector->connector_id);
dcd5fb82
MF
2377 if (sink)
2378 dc_sink_release(sink);
4562236b
HW
2379 return;
2380 }
2381
f1ad2f5e 2382 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2383 aconnector->connector_id, aconnector->dc_sink, sink);
2384
2385 mutex_lock(&dev->mode_config.mutex);
2386
1f6010a9
DF
2387 /*
2388 * 1. Update status of the drm connector
2389 * 2. Send an event and let userspace tell us what to do
2390 */
4562236b 2391 if (sink) {
1f6010a9
DF
2392 /*
2393 * TODO: check if we still need the S3 mode update workaround.
2394 * If yes, put it here.
2395 */
c64b0d6b 2396 if (aconnector->dc_sink) {
98e6436d 2397 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2398 dc_sink_release(aconnector->dc_sink);
2399 }
4562236b
HW
2400
2401 aconnector->dc_sink = sink;
dcd5fb82 2402 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2403 if (sink->dc_edid.length == 0) {
4562236b 2404 aconnector->edid = NULL;
e6142dd5
AP
2405 if (aconnector->dc_link->aux_mode) {
2406 drm_dp_cec_unset_edid(
2407 &aconnector->dm_dp_aux.aux);
2408 }
900b3cb1 2409 } else {
4562236b 2410 aconnector->edid =
e6142dd5 2411 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2412
c555f023 2413 drm_connector_update_edid_property(connector,
e6142dd5 2414 aconnector->edid);
e6142dd5
AP
2415 if (aconnector->dc_link->aux_mode)
2416 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2417 aconnector->edid);
4562236b 2418 }
e6142dd5 2419
98e6436d 2420 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2421 update_connector_ext_caps(aconnector);
4562236b 2422 } else {
e86e8947 2423 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2424 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2425 drm_connector_update_edid_property(connector, NULL);
4562236b 2426 aconnector->num_modes = 0;
dcd5fb82 2427 dc_sink_release(aconnector->dc_sink);
4562236b 2428 aconnector->dc_sink = NULL;
5326c452 2429 aconnector->edid = NULL;
0c8620d6
BL
2430#ifdef CONFIG_DRM_AMD_DC_HDCP
2431 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2432 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2433 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2434#endif
4562236b
HW
2435 }
2436
2437 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2438
0f877894
OV
2439 update_subconnector_property(aconnector);
2440
dcd5fb82
MF
2441 if (sink)
2442 dc_sink_release(sink);
4562236b
HW
2443}
2444
2445static void handle_hpd_irq(void *param)
2446{
c84dec2f 2447 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2448 struct drm_connector *connector = &aconnector->base;
2449 struct drm_device *dev = connector->dev;
fbbdadf2 2450 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2451#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2452 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2453 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2454#endif
4562236b 2455
1f6010a9
DF
2456 /*
2457 * In case of failure or MST no need to update connector status or notify the OS
2458 * since (for MST case) MST does this in its own context.
4562236b
HW
2459 */
2460 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2461
0c8620d6 2462#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2463 if (adev->dm.hdcp_workqueue) {
96a3b32e 2464 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2465 dm_con_state->update_hdcp = true;
2466 }
0c8620d6 2467#endif
2e0ac3d6
HW
2468 if (aconnector->fake_enable)
2469 aconnector->fake_enable = false;
2470
fbbdadf2
BL
2471 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2472 DRM_ERROR("KMS: Failed to detect connector\n");
2473
2474 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475 emulated_link_detect(aconnector->dc_link);
2476
2477
2478 drm_modeset_lock_all(dev);
2479 dm_restore_drm_connector_state(dev, connector);
2480 drm_modeset_unlock_all(dev);
2481
2482 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2483 drm_kms_helper_hotplug_event(dev);
2484
2485 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2486 if (new_connection_type == dc_connection_none &&
2487 aconnector->dc_link->type == dc_connection_none)
2488 dm_set_dpms_off(aconnector->dc_link);
4562236b 2489
3c4d55c9 2490 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2491
2492 drm_modeset_lock_all(dev);
2493 dm_restore_drm_connector_state(dev, connector);
2494 drm_modeset_unlock_all(dev);
2495
2496 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2497 drm_kms_helper_hotplug_event(dev);
2498 }
2499 mutex_unlock(&aconnector->hpd_lock);
2500
2501}
2502
c84dec2f 2503static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2504{
2505 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2506 uint8_t dret;
2507 bool new_irq_handled = false;
2508 int dpcd_addr;
2509 int dpcd_bytes_to_read;
2510
2511 const int max_process_count = 30;
2512 int process_count = 0;
2513
2514 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2515
2516 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2517 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2518 /* DPCD 0x200 - 0x201 for downstream IRQ */
2519 dpcd_addr = DP_SINK_COUNT;
2520 } else {
2521 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2522 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2523 dpcd_addr = DP_SINK_COUNT_ESI;
2524 }
2525
2526 dret = drm_dp_dpcd_read(
2527 &aconnector->dm_dp_aux.aux,
2528 dpcd_addr,
2529 esi,
2530 dpcd_bytes_to_read);
2531
2532 while (dret == dpcd_bytes_to_read &&
2533 process_count < max_process_count) {
2534 uint8_t retry;
2535 dret = 0;
2536
2537 process_count++;
2538
f1ad2f5e 2539 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2540 /* handle HPD short pulse irq */
2541 if (aconnector->mst_mgr.mst_state)
2542 drm_dp_mst_hpd_irq(
2543 &aconnector->mst_mgr,
2544 esi,
2545 &new_irq_handled);
4562236b
HW
2546
2547 if (new_irq_handled) {
2548 /* ACK at DPCD to notify down stream */
2549 const int ack_dpcd_bytes_to_write =
2550 dpcd_bytes_to_read - 1;
2551
2552 for (retry = 0; retry < 3; retry++) {
2553 uint8_t wret;
2554
2555 wret = drm_dp_dpcd_write(
2556 &aconnector->dm_dp_aux.aux,
2557 dpcd_addr + 1,
2558 &esi[1],
2559 ack_dpcd_bytes_to_write);
2560 if (wret == ack_dpcd_bytes_to_write)
2561 break;
2562 }
2563
1f6010a9 2564 /* check if there is new irq to be handled */
4562236b
HW
2565 dret = drm_dp_dpcd_read(
2566 &aconnector->dm_dp_aux.aux,
2567 dpcd_addr,
2568 esi,
2569 dpcd_bytes_to_read);
2570
2571 new_irq_handled = false;
d4a6e8a9 2572 } else {
4562236b 2573 break;
d4a6e8a9 2574 }
4562236b
HW
2575 }
2576
2577 if (process_count == max_process_count)
f1ad2f5e 2578 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2579}
2580
2581static void handle_hpd_rx_irq(void *param)
2582{
c84dec2f 2583 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2584 struct drm_connector *connector = &aconnector->base;
2585 struct drm_device *dev = connector->dev;
53cbf65c 2586 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2587 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2588 bool result = false;
fbbdadf2 2589 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2590 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2591 union hpd_irq_data hpd_irq_data;
2a0f9270
BL
2592
2593 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2594
1f6010a9
DF
2595 /*
2596 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2597 * conflict, after implement i2c helper, this mutex should be
2598 * retired.
2599 */
53cbf65c 2600 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2601 mutex_lock(&aconnector->hpd_lock);
2602
3083a984
QZ
2603 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2604
2605 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2606 (dc_link->type == dc_connection_mst_branch)) {
2607 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2608 result = true;
2609 dm_handle_hpd_rx_irq(aconnector);
2610 goto out;
2611 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2612 result = false;
2613 dm_handle_hpd_rx_irq(aconnector);
2614 goto out;
2615 }
2616 }
2617
c8ea79a8 2618 mutex_lock(&adev->dm.dc_lock);
2a0f9270 2619#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2620 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2621#else
c8ea79a8 2622 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2623#endif
c8ea79a8
QZ
2624 mutex_unlock(&adev->dm.dc_lock);
2625
3083a984 2626out:
c8ea79a8 2627 if (result && !is_mst_root_connector) {
4562236b 2628 /* Downstream Port status changed. */
fbbdadf2
BL
2629 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2630 DRM_ERROR("KMS: Failed to detect connector\n");
2631
2632 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2633 emulated_link_detect(dc_link);
2634
2635 if (aconnector->fake_enable)
2636 aconnector->fake_enable = false;
2637
2638 amdgpu_dm_update_connector_after_detect(aconnector);
2639
2640
2641 drm_modeset_lock_all(dev);
2642 dm_restore_drm_connector_state(dev, connector);
2643 drm_modeset_unlock_all(dev);
2644
2645 drm_kms_helper_hotplug_event(dev);
2646 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2647
2648 if (aconnector->fake_enable)
2649 aconnector->fake_enable = false;
2650
4562236b
HW
2651 amdgpu_dm_update_connector_after_detect(aconnector);
2652
2653
2654 drm_modeset_lock_all(dev);
2655 dm_restore_drm_connector_state(dev, connector);
2656 drm_modeset_unlock_all(dev);
2657
2658 drm_kms_helper_hotplug_event(dev);
2659 }
2660 }
2a0f9270 2661#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2662 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2663 if (adev->dm.hdcp_workqueue)
2664 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2665 }
2a0f9270 2666#endif
4562236b 2667
e86e8947
HV
2668 if (dc_link->type != dc_connection_mst_branch) {
2669 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2670 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2671 }
4562236b
HW
2672}
2673
2674static void register_hpd_handlers(struct amdgpu_device *adev)
2675{
4a580877 2676 struct drm_device *dev = adev_to_drm(adev);
4562236b 2677 struct drm_connector *connector;
c84dec2f 2678 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2679 const struct dc_link *dc_link;
2680 struct dc_interrupt_params int_params = {0};
2681
2682 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2683 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2684
2685 list_for_each_entry(connector,
2686 &dev->mode_config.connector_list, head) {
2687
c84dec2f 2688 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2689 dc_link = aconnector->dc_link;
2690
2691 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2692 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2693 int_params.irq_source = dc_link->irq_source_hpd;
2694
2695 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2696 handle_hpd_irq,
2697 (void *) aconnector);
2698 }
2699
2700 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2701
2702 /* Also register for DP short pulse (hpd_rx). */
2703 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2704 int_params.irq_source = dc_link->irq_source_hpd_rx;
2705
2706 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707 handle_hpd_rx_irq,
2708 (void *) aconnector);
2709 }
2710 }
2711}
2712
55e56389
MR
2713#if defined(CONFIG_DRM_AMD_DC_SI)
2714/* Register IRQ sources and initialize IRQ callbacks */
2715static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2716{
2717 struct dc *dc = adev->dm.dc;
2718 struct common_irq_params *c_irq_params;
2719 struct dc_interrupt_params int_params = {0};
2720 int r;
2721 int i;
2722 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2723
2724 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2725 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2726
2727 /*
2728 * Actions of amdgpu_irq_add_id():
2729 * 1. Register a set() function with base driver.
2730 * Base driver will call set() function to enable/disable an
2731 * interrupt in DC hardware.
2732 * 2. Register amdgpu_dm_irq_handler().
2733 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2734 * coming from DC hardware.
2735 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2736 * for acknowledging and handling. */
2737
2738 /* Use VBLANK interrupt */
2739 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2740 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2741 if (r) {
2742 DRM_ERROR("Failed to add crtc irq id!\n");
2743 return r;
2744 }
2745
2746 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2747 int_params.irq_source =
2748 dc_interrupt_to_irq_source(dc, i+1 , 0);
2749
2750 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2751
2752 c_irq_params->adev = adev;
2753 c_irq_params->irq_src = int_params.irq_source;
2754
2755 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2756 dm_crtc_high_irq, c_irq_params);
2757 }
2758
2759 /* Use GRPH_PFLIP interrupt */
2760 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2761 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2762 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2763 if (r) {
2764 DRM_ERROR("Failed to add page flip irq id!\n");
2765 return r;
2766 }
2767
2768 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769 int_params.irq_source =
2770 dc_interrupt_to_irq_source(dc, i, 0);
2771
2772 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2773
2774 c_irq_params->adev = adev;
2775 c_irq_params->irq_src = int_params.irq_source;
2776
2777 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778 dm_pflip_high_irq, c_irq_params);
2779
2780 }
2781
2782 /* HPD */
2783 r = amdgpu_irq_add_id(adev, client_id,
2784 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2785 if (r) {
2786 DRM_ERROR("Failed to add hpd irq id!\n");
2787 return r;
2788 }
2789
2790 register_hpd_handlers(adev);
2791
2792 return 0;
2793}
2794#endif
2795
4562236b
HW
2796/* Register IRQ sources and initialize IRQ callbacks */
2797static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2798{
2799 struct dc *dc = adev->dm.dc;
2800 struct common_irq_params *c_irq_params;
2801 struct dc_interrupt_params int_params = {0};
2802 int r;
2803 int i;
1ffdeca6 2804 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2805
84374725 2806 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2807 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2808
2809 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2810 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2811
1f6010a9
DF
2812 /*
2813 * Actions of amdgpu_irq_add_id():
4562236b
HW
2814 * 1. Register a set() function with base driver.
2815 * Base driver will call set() function to enable/disable an
2816 * interrupt in DC hardware.
2817 * 2. Register amdgpu_dm_irq_handler().
2818 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2819 * coming from DC hardware.
2820 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2821 * for acknowledging and handling. */
2822
b57de80a 2823 /* Use VBLANK interrupt */
e9029155 2824 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2825 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2826 if (r) {
2827 DRM_ERROR("Failed to add crtc irq id!\n");
2828 return r;
2829 }
2830
2831 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2832 int_params.irq_source =
3d761e79 2833 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2834
b57de80a 2835 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2836
2837 c_irq_params->adev = adev;
2838 c_irq_params->irq_src = int_params.irq_source;
2839
2840 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2841 dm_crtc_high_irq, c_irq_params);
2842 }
2843
d2574c33
MK
2844 /* Use VUPDATE interrupt */
2845 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2846 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2847 if (r) {
2848 DRM_ERROR("Failed to add vupdate irq id!\n");
2849 return r;
2850 }
2851
2852 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2853 int_params.irq_source =
2854 dc_interrupt_to_irq_source(dc, i, 0);
2855
2856 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2857
2858 c_irq_params->adev = adev;
2859 c_irq_params->irq_src = int_params.irq_source;
2860
2861 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862 dm_vupdate_high_irq, c_irq_params);
2863 }
2864
3d761e79 2865 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2866 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2867 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2868 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2869 if (r) {
2870 DRM_ERROR("Failed to add page flip irq id!\n");
2871 return r;
2872 }
2873
2874 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 int_params.irq_source =
2876 dc_interrupt_to_irq_source(dc, i, 0);
2877
2878 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2879
2880 c_irq_params->adev = adev;
2881 c_irq_params->irq_src = int_params.irq_source;
2882
2883 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 dm_pflip_high_irq, c_irq_params);
2885
2886 }
2887
2888 /* HPD */
2c8ad2d5
AD
2889 r = amdgpu_irq_add_id(adev, client_id,
2890 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2891 if (r) {
2892 DRM_ERROR("Failed to add hpd irq id!\n");
2893 return r;
2894 }
2895
2896 register_hpd_handlers(adev);
2897
2898 return 0;
2899}
2900
b86a1aa3 2901#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2902/* Register IRQ sources and initialize IRQ callbacks */
2903static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2904{
2905 struct dc *dc = adev->dm.dc;
2906 struct common_irq_params *c_irq_params;
2907 struct dc_interrupt_params int_params = {0};
2908 int r;
2909 int i;
2910
2911 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2912 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2913
1f6010a9
DF
2914 /*
2915 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2916 * 1. Register a set() function with base driver.
2917 * Base driver will call set() function to enable/disable an
2918 * interrupt in DC hardware.
2919 * 2. Register amdgpu_dm_irq_handler().
2920 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2921 * coming from DC hardware.
2922 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2923 * for acknowledging and handling.
1f6010a9 2924 */
ff5ef992
AD
2925
2926 /* Use VSTARTUP interrupt */
2927 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2928 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2929 i++) {
3760f76c 2930 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2931
2932 if (r) {
2933 DRM_ERROR("Failed to add crtc irq id!\n");
2934 return r;
2935 }
2936
2937 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938 int_params.irq_source =
2939 dc_interrupt_to_irq_source(dc, i, 0);
2940
2941 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2942
2943 c_irq_params->adev = adev;
2944 c_irq_params->irq_src = int_params.irq_source;
2945
2346ef47
NK
2946 amdgpu_dm_irq_register_interrupt(
2947 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2948 }
2949
2950 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2951 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2952 * to trigger at end of each vblank, regardless of state of the lock,
2953 * matching DCE behaviour.
2954 */
2955 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2956 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2957 i++) {
2958 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2959
2960 if (r) {
2961 DRM_ERROR("Failed to add vupdate irq id!\n");
2962 return r;
2963 }
2964
2965 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 int_params.irq_source =
2967 dc_interrupt_to_irq_source(dc, i, 0);
2968
2969 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2970
2971 c_irq_params->adev = adev;
2972 c_irq_params->irq_src = int_params.irq_source;
2973
ff5ef992 2974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2975 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2976 }
2977
ff5ef992
AD
2978 /* Use GRPH_PFLIP interrupt */
2979 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2980 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2981 i++) {
3760f76c 2982 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2983 if (r) {
2984 DRM_ERROR("Failed to add page flip irq id!\n");
2985 return r;
2986 }
2987
2988 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2989 int_params.irq_source =
2990 dc_interrupt_to_irq_source(dc, i, 0);
2991
2992 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2993
2994 c_irq_params->adev = adev;
2995 c_irq_params->irq_src = int_params.irq_source;
2996
2997 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2998 dm_pflip_high_irq, c_irq_params);
2999
3000 }
3001
3002 /* HPD */
3760f76c 3003 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
3004 &adev->hpd_irq);
3005 if (r) {
3006 DRM_ERROR("Failed to add hpd irq id!\n");
3007 return r;
3008 }
3009
3010 register_hpd_handlers(adev);
3011
3012 return 0;
3013}
3014#endif
3015
eb3dc897
NK
3016/*
3017 * Acquires the lock for the atomic state object and returns
3018 * the new atomic state.
3019 *
3020 * This should only be called during atomic check.
3021 */
3022static int dm_atomic_get_state(struct drm_atomic_state *state,
3023 struct dm_atomic_state **dm_state)
3024{
3025 struct drm_device *dev = state->dev;
1348969a 3026 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3027 struct amdgpu_display_manager *dm = &adev->dm;
3028 struct drm_private_state *priv_state;
eb3dc897
NK
3029
3030 if (*dm_state)
3031 return 0;
3032
eb3dc897
NK
3033 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3034 if (IS_ERR(priv_state))
3035 return PTR_ERR(priv_state);
3036
3037 *dm_state = to_dm_atomic_state(priv_state);
3038
3039 return 0;
3040}
3041
dfd84d90 3042static struct dm_atomic_state *
eb3dc897
NK
3043dm_atomic_get_new_state(struct drm_atomic_state *state)
3044{
3045 struct drm_device *dev = state->dev;
1348969a 3046 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3047 struct amdgpu_display_manager *dm = &adev->dm;
3048 struct drm_private_obj *obj;
3049 struct drm_private_state *new_obj_state;
3050 int i;
3051
3052 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3053 if (obj->funcs == dm->atomic_obj.funcs)
3054 return to_dm_atomic_state(new_obj_state);
3055 }
3056
3057 return NULL;
3058}
3059
eb3dc897
NK
3060static struct drm_private_state *
3061dm_atomic_duplicate_state(struct drm_private_obj *obj)
3062{
3063 struct dm_atomic_state *old_state, *new_state;
3064
3065 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3066 if (!new_state)
3067 return NULL;
3068
3069 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3070
813d20dc
AW
3071 old_state = to_dm_atomic_state(obj->state);
3072
3073 if (old_state && old_state->context)
3074 new_state->context = dc_copy_state(old_state->context);
3075
eb3dc897
NK
3076 if (!new_state->context) {
3077 kfree(new_state);
3078 return NULL;
3079 }
3080
eb3dc897
NK
3081 return &new_state->base;
3082}
3083
3084static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3085 struct drm_private_state *state)
3086{
3087 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3088
3089 if (dm_state && dm_state->context)
3090 dc_release_state(dm_state->context);
3091
3092 kfree(dm_state);
3093}
3094
3095static struct drm_private_state_funcs dm_atomic_state_funcs = {
3096 .atomic_duplicate_state = dm_atomic_duplicate_state,
3097 .atomic_destroy_state = dm_atomic_destroy_state,
3098};
3099
4562236b
HW
3100static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3101{
eb3dc897 3102 struct dm_atomic_state *state;
4562236b
HW
3103 int r;
3104
3105 adev->mode_info.mode_config_initialized = true;
3106
4a580877
LT
3107 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3108 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3109
4a580877
LT
3110 adev_to_drm(adev)->mode_config.max_width = 16384;
3111 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3112
4a580877
LT
3113 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3114 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3115 /* indicates support for immediate flip */
4a580877 3116 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3117
4a580877 3118 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3119
eb3dc897
NK
3120 state = kzalloc(sizeof(*state), GFP_KERNEL);
3121 if (!state)
3122 return -ENOMEM;
3123
813d20dc 3124 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3125 if (!state->context) {
3126 kfree(state);
3127 return -ENOMEM;
3128 }
3129
3130 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3131
4a580877 3132 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3133 &adev->dm.atomic_obj,
eb3dc897
NK
3134 &state->base,
3135 &dm_atomic_state_funcs);
3136
3dc9b1ce 3137 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3138 if (r) {
3139 dc_release_state(state->context);
3140 kfree(state);
4562236b 3141 return r;
b67a468a 3142 }
4562236b 3143
6ce8f316 3144 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3145 if (r) {
3146 dc_release_state(state->context);
3147 kfree(state);
6ce8f316 3148 return r;
b67a468a 3149 }
6ce8f316 3150
4562236b
HW
3151 return 0;
3152}
3153
206bbafe
DF
3154#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3155#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3156#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3157
4562236b
HW
3158#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3159 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3160
206bbafe
DF
3161static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3162{
3163#if defined(CONFIG_ACPI)
3164 struct amdgpu_dm_backlight_caps caps;
3165
58965855
FS
3166 memset(&caps, 0, sizeof(caps));
3167
206bbafe
DF
3168 if (dm->backlight_caps.caps_valid)
3169 return;
3170
3171 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3172 if (caps.caps_valid) {
94562810
RS
3173 dm->backlight_caps.caps_valid = true;
3174 if (caps.aux_support)
3175 return;
206bbafe
DF
3176 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3177 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3178 } else {
3179 dm->backlight_caps.min_input_signal =
3180 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3181 dm->backlight_caps.max_input_signal =
3182 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3183 }
3184#else
94562810
RS
3185 if (dm->backlight_caps.aux_support)
3186 return;
3187
8bcbc9ef
DF
3188 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3189 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3190#endif
3191}
3192
69d9f427
AM
3193static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3194 unsigned *min, unsigned *max)
94562810 3195{
94562810 3196 if (!caps)
69d9f427 3197 return 0;
94562810 3198
69d9f427
AM
3199 if (caps->aux_support) {
3200 // Firmware limits are in nits, DC API wants millinits.
3201 *max = 1000 * caps->aux_max_input_signal;
3202 *min = 1000 * caps->aux_min_input_signal;
94562810 3203 } else {
69d9f427
AM
3204 // Firmware limits are 8-bit, PWM control is 16-bit.
3205 *max = 0x101 * caps->max_input_signal;
3206 *min = 0x101 * caps->min_input_signal;
94562810 3207 }
69d9f427
AM
3208 return 1;
3209}
94562810 3210
69d9f427
AM
3211static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3212 uint32_t brightness)
3213{
3214 unsigned min, max;
94562810 3215
69d9f427
AM
3216 if (!get_brightness_range(caps, &min, &max))
3217 return brightness;
3218
3219 // Rescale 0..255 to min..max
3220 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3221 AMDGPU_MAX_BL_LEVEL);
3222}
3223
3224static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3225 uint32_t brightness)
3226{
3227 unsigned min, max;
3228
3229 if (!get_brightness_range(caps, &min, &max))
3230 return brightness;
3231
3232 if (brightness < min)
3233 return 0;
3234 // Rescale min..max to 0..255
3235 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3236 max - min);
94562810
RS
3237}
3238
4562236b
HW
3239static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3240{
3241 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3242 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3243 struct dc_link *link = NULL;
3244 u32 brightness;
3245 bool rc;
4562236b 3246
206bbafe
DF
3247 amdgpu_dm_update_backlight_caps(dm);
3248 caps = dm->backlight_caps;
94562810
RS
3249
3250 link = (struct dc_link *)dm->backlight_link;
3251
69d9f427 3252 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3253 // Change brightness based on AUX property
3254 if (caps.aux_support)
a2f8d988
AD
3255 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3256 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3257 else
3258 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
94562810
RS
3259
3260 return rc ? 0 : 1;
4562236b
HW
3261}
3262
3263static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3264{
620a0d27 3265 struct amdgpu_display_manager *dm = bl_get_data(bd);
0ad3e64e
AD
3266 struct amdgpu_dm_backlight_caps caps;
3267
3268 amdgpu_dm_update_backlight_caps(dm);
3269 caps = dm->backlight_caps;
620a0d27 3270
0ad3e64e
AD
3271 if (caps.aux_support) {
3272 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3273 u32 avg, peak;
3274 bool rc;
3275
3276 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3277 if (!rc)
3278 return bd->props.brightness;
3279 return convert_brightness_to_user(&caps, avg);
3280 } else {
3281 int ret = dc_link_get_backlight_level(dm->backlight_link);
3282
3283 if (ret == DC_ERROR_UNEXPECTED)
3284 return bd->props.brightness;
3285 return convert_brightness_to_user(&caps, ret);
3286 }
4562236b
HW
3287}
3288
3289static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3290 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3291 .get_brightness = amdgpu_dm_backlight_get_brightness,
3292 .update_status = amdgpu_dm_backlight_update_status,
3293};
3294
7578ecda
AD
3295static void
3296amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3297{
3298 char bl_name[16];
3299 struct backlight_properties props = { 0 };
3300
206bbafe
DF
3301 amdgpu_dm_update_backlight_caps(dm);
3302
4562236b 3303 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3304 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3305 props.type = BACKLIGHT_RAW;
3306
3307 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3308 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3309
3310 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3311 adev_to_drm(dm->adev)->dev,
3312 dm,
3313 &amdgpu_dm_backlight_ops,
3314 &props);
4562236b 3315
74baea42 3316 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3317 DRM_ERROR("DM: Backlight registration failed!\n");
3318 else
f1ad2f5e 3319 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3320}
3321
3322#endif
3323
df534fff 3324static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3325 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3326 enum drm_plane_type plane_type,
3327 const struct dc_plane_cap *plane_cap)
df534fff 3328{
f180b4bc 3329 struct drm_plane *plane;
df534fff
S
3330 unsigned long possible_crtcs;
3331 int ret = 0;
3332
f180b4bc 3333 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3334 if (!plane) {
3335 DRM_ERROR("KMS: Failed to allocate plane\n");
3336 return -ENOMEM;
3337 }
b2fddb13 3338 plane->type = plane_type;
df534fff
S
3339
3340 /*
b2fddb13
NK
3341 * HACK: IGT tests expect that the primary plane for a CRTC
3342 * can only have one possible CRTC. Only expose support for
3343 * any CRTC if they're not going to be used as a primary plane
3344 * for a CRTC - like overlay or underlay planes.
df534fff
S
3345 */
3346 possible_crtcs = 1 << plane_id;
3347 if (plane_id >= dm->dc->caps.max_streams)
3348 possible_crtcs = 0xff;
3349
cc1fec57 3350 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3351
3352 if (ret) {
3353 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3354 kfree(plane);
df534fff
S
3355 return ret;
3356 }
3357
54087768
NK
3358 if (mode_info)
3359 mode_info->planes[plane_id] = plane;
3360
df534fff
S
3361 return ret;
3362}
3363
89fc8d4e
HW
3364
3365static void register_backlight_device(struct amdgpu_display_manager *dm,
3366 struct dc_link *link)
3367{
3368#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3369 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3370
3371 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3372 link->type != dc_connection_none) {
1f6010a9
DF
3373 /*
3374 * Event if registration failed, we should continue with
89fc8d4e
HW
3375 * DM initialization because not having a backlight control
3376 * is better then a black screen.
3377 */
3378 amdgpu_dm_register_backlight_device(dm);
3379
3380 if (dm->backlight_dev)
3381 dm->backlight_link = link;
3382 }
3383#endif
3384}
3385
3386
1f6010a9
DF
3387/*
3388 * In this architecture, the association
4562236b
HW
3389 * connector -> encoder -> crtc
3390 * id not really requried. The crtc and connector will hold the
3391 * display_index as an abstraction to use with DAL component
3392 *
3393 * Returns 0 on success
3394 */
7578ecda 3395static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3396{
3397 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3398 int32_t i;
c84dec2f 3399 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3400 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3401 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3402 uint32_t link_cnt;
cc1fec57 3403 int32_t primary_planes;
fbbdadf2 3404 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3405 const struct dc_plane_cap *plane;
4562236b 3406
d58159de
AD
3407 dm->display_indexes_num = dm->dc->caps.max_streams;
3408 /* Update the actual used number of crtc */
3409 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3410
4562236b 3411 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3412 if (amdgpu_dm_mode_config_init(dm->adev)) {
3413 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3414 return -EINVAL;
4562236b
HW
3415 }
3416
b2fddb13
NK
3417 /* There is one primary plane per CRTC */
3418 primary_planes = dm->dc->caps.max_streams;
54087768 3419 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3420
b2fddb13
NK
3421 /*
3422 * Initialize primary planes, implicit planes for legacy IOCTLS.
3423 * Order is reversed to match iteration order in atomic check.
3424 */
3425 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3426 plane = &dm->dc->caps.planes[i];
3427
b2fddb13 3428 if (initialize_plane(dm, mode_info, i,
cc1fec57 3429 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3430 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3431 goto fail;
d4e13b0d 3432 }
df534fff 3433 }
92f3ac40 3434
0d579c7e
NK
3435 /*
3436 * Initialize overlay planes, index starting after primary planes.
3437 * These planes have a higher DRM index than the primary planes since
3438 * they should be considered as having a higher z-order.
3439 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3440 *
3441 * Only support DCN for now, and only expose one so we don't encourage
3442 * userspace to use up all the pipes.
0d579c7e 3443 */
cc1fec57
NK
3444 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3445 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3446
3447 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3448 continue;
3449
3450 if (!plane->blends_with_above || !plane->blends_with_below)
3451 continue;
3452
ea36ad34 3453 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3454 continue;
3455
54087768 3456 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3457 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3458 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3459 goto fail;
d4e13b0d 3460 }
cc1fec57
NK
3461
3462 /* Only create one overlay plane. */
3463 break;
d4e13b0d 3464 }
4562236b 3465
d4e13b0d 3466 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3467 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3468 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3469 goto fail;
4562236b 3470 }
4562236b 3471
4562236b
HW
3472 /* loops over all connectors on the board */
3473 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3474 struct dc_link *link = NULL;
4562236b
HW
3475
3476 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3477 DRM_ERROR(
3478 "KMS: Cannot support more than %d display indexes\n",
3479 AMDGPU_DM_MAX_DISPLAY_INDEX);
3480 continue;
3481 }
3482
3483 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3484 if (!aconnector)
cd8a2ae8 3485 goto fail;
4562236b
HW
3486
3487 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3488 if (!aencoder)
cd8a2ae8 3489 goto fail;
4562236b
HW
3490
3491 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3492 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3493 goto fail;
4562236b
HW
3494 }
3495
3496 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3497 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3498 goto fail;
4562236b
HW
3499 }
3500
89fc8d4e
HW
3501 link = dc_get_link_at_index(dm->dc, i);
3502
fbbdadf2
BL
3503 if (!dc_link_detect_sink(link, &new_connection_type))
3504 DRM_ERROR("KMS: Failed to detect connector\n");
3505
3506 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3507 emulated_link_detect(link);
3508 amdgpu_dm_update_connector_after_detect(aconnector);
3509
3510 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3511 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3512 register_backlight_device(dm, link);
397a9bc5
RL
3513 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3514 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3515 }
3516
3517
4562236b
HW
3518 }
3519
3520 /* Software is initialized. Now we can register interrupt handlers. */
3521 switch (adev->asic_type) {
55e56389
MR
3522#if defined(CONFIG_DRM_AMD_DC_SI)
3523 case CHIP_TAHITI:
3524 case CHIP_PITCAIRN:
3525 case CHIP_VERDE:
3526 case CHIP_OLAND:
3527 if (dce60_register_irq_handlers(dm->adev)) {
3528 DRM_ERROR("DM: Failed to initialize IRQ\n");
3529 goto fail;
3530 }
3531 break;
3532#endif
4562236b
HW
3533 case CHIP_BONAIRE:
3534 case CHIP_HAWAII:
cd4b356f
AD
3535 case CHIP_KAVERI:
3536 case CHIP_KABINI:
3537 case CHIP_MULLINS:
4562236b
HW
3538 case CHIP_TONGA:
3539 case CHIP_FIJI:
3540 case CHIP_CARRIZO:
3541 case CHIP_STONEY:
3542 case CHIP_POLARIS11:
3543 case CHIP_POLARIS10:
b264d345 3544 case CHIP_POLARIS12:
7737de91 3545 case CHIP_VEGAM:
2c8ad2d5 3546 case CHIP_VEGA10:
2325ff30 3547 case CHIP_VEGA12:
1fe6bf2f 3548 case CHIP_VEGA20:
4562236b
HW
3549 if (dce110_register_irq_handlers(dm->adev)) {
3550 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3551 goto fail;
4562236b
HW
3552 }
3553 break;
b86a1aa3 3554#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3555 case CHIP_RAVEN:
fbd2afe5 3556 case CHIP_NAVI12:
476e955d 3557 case CHIP_NAVI10:
fce651e3 3558 case CHIP_NAVI14:
30221ad8 3559 case CHIP_RENOIR:
79037324 3560 case CHIP_SIENNA_CICHLID:
a6c5308f 3561 case CHIP_NAVY_FLOUNDER:
2a411205 3562 case CHIP_DIMGREY_CAVEFISH:
469989ca 3563 case CHIP_VANGOGH:
ff5ef992
AD
3564 if (dcn10_register_irq_handlers(dm->adev)) {
3565 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3566 goto fail;
ff5ef992
AD
3567 }
3568 break;
3569#endif
4562236b 3570 default:
e63f8673 3571 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3572 goto fail;
4562236b
HW
3573 }
3574
4562236b 3575 return 0;
cd8a2ae8 3576fail:
4562236b 3577 kfree(aencoder);
4562236b 3578 kfree(aconnector);
54087768 3579
59d0f396 3580 return -EINVAL;
4562236b
HW
3581}
3582
7578ecda 3583static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3584{
3585 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3586 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3587 return;
3588}
3589
3590/******************************************************************************
3591 * amdgpu_display_funcs functions
3592 *****************************************************************************/
3593
1f6010a9 3594/*
4562236b
HW
3595 * dm_bandwidth_update - program display watermarks
3596 *
3597 * @adev: amdgpu_device pointer
3598 *
3599 * Calculate and program the display watermarks and line buffer allocation.
3600 */
3601static void dm_bandwidth_update(struct amdgpu_device *adev)
3602{
49c07a99 3603 /* TODO: implement later */
4562236b
HW
3604}
3605
39cc5be2 3606static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3607 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3608 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3609 .backlight_set_level = NULL, /* never called for DC */
3610 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3611 .hpd_sense = NULL,/* called unconditionally */
3612 .hpd_set_polarity = NULL, /* called unconditionally */
3613 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3614 .page_flip_get_scanoutpos =
3615 dm_crtc_get_scanoutpos,/* called unconditionally */
3616 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3617 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3618};
3619
3620#if defined(CONFIG_DEBUG_KERNEL_DC)
3621
3ee6b26b
AD
3622static ssize_t s3_debug_store(struct device *device,
3623 struct device_attribute *attr,
3624 const char *buf,
3625 size_t count)
4562236b
HW
3626{
3627 int ret;
3628 int s3_state;
ef1de361 3629 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3630 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3631
3632 ret = kstrtoint(buf, 0, &s3_state);
3633
3634 if (ret == 0) {
3635 if (s3_state) {
3636 dm_resume(adev);
4a580877 3637 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3638 } else
3639 dm_suspend(adev);
3640 }
3641
3642 return ret == 0 ? count : 0;
3643}
3644
3645DEVICE_ATTR_WO(s3_debug);
3646
3647#endif
3648
3649static int dm_early_init(void *handle)
3650{
3651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3652
4562236b 3653 switch (adev->asic_type) {
55e56389
MR
3654#if defined(CONFIG_DRM_AMD_DC_SI)
3655 case CHIP_TAHITI:
3656 case CHIP_PITCAIRN:
3657 case CHIP_VERDE:
3658 adev->mode_info.num_crtc = 6;
3659 adev->mode_info.num_hpd = 6;
3660 adev->mode_info.num_dig = 6;
3661 break;
3662 case CHIP_OLAND:
3663 adev->mode_info.num_crtc = 2;
3664 adev->mode_info.num_hpd = 2;
3665 adev->mode_info.num_dig = 2;
3666 break;
3667#endif
4562236b
HW
3668 case CHIP_BONAIRE:
3669 case CHIP_HAWAII:
3670 adev->mode_info.num_crtc = 6;
3671 adev->mode_info.num_hpd = 6;
3672 adev->mode_info.num_dig = 6;
4562236b 3673 break;
cd4b356f
AD
3674 case CHIP_KAVERI:
3675 adev->mode_info.num_crtc = 4;
3676 adev->mode_info.num_hpd = 6;
3677 adev->mode_info.num_dig = 7;
cd4b356f
AD
3678 break;
3679 case CHIP_KABINI:
3680 case CHIP_MULLINS:
3681 adev->mode_info.num_crtc = 2;
3682 adev->mode_info.num_hpd = 6;
3683 adev->mode_info.num_dig = 6;
cd4b356f 3684 break;
4562236b
HW
3685 case CHIP_FIJI:
3686 case CHIP_TONGA:
3687 adev->mode_info.num_crtc = 6;
3688 adev->mode_info.num_hpd = 6;
3689 adev->mode_info.num_dig = 7;
4562236b
HW
3690 break;
3691 case CHIP_CARRIZO:
3692 adev->mode_info.num_crtc = 3;
3693 adev->mode_info.num_hpd = 6;
3694 adev->mode_info.num_dig = 9;
4562236b
HW
3695 break;
3696 case CHIP_STONEY:
3697 adev->mode_info.num_crtc = 2;
3698 adev->mode_info.num_hpd = 6;
3699 adev->mode_info.num_dig = 9;
4562236b
HW
3700 break;
3701 case CHIP_POLARIS11:
b264d345 3702 case CHIP_POLARIS12:
4562236b
HW
3703 adev->mode_info.num_crtc = 5;
3704 adev->mode_info.num_hpd = 5;
3705 adev->mode_info.num_dig = 5;
4562236b
HW
3706 break;
3707 case CHIP_POLARIS10:
7737de91 3708 case CHIP_VEGAM:
4562236b
HW
3709 adev->mode_info.num_crtc = 6;
3710 adev->mode_info.num_hpd = 6;
3711 adev->mode_info.num_dig = 6;
4562236b 3712 break;
2c8ad2d5 3713 case CHIP_VEGA10:
2325ff30 3714 case CHIP_VEGA12:
1fe6bf2f 3715 case CHIP_VEGA20:
2c8ad2d5
AD
3716 adev->mode_info.num_crtc = 6;
3717 adev->mode_info.num_hpd = 6;
3718 adev->mode_info.num_dig = 6;
3719 break;
b86a1aa3 3720#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3721 case CHIP_RAVEN:
20f2ffe5
AD
3722 case CHIP_RENOIR:
3723 case CHIP_VANGOGH:
ff5ef992
AD
3724 adev->mode_info.num_crtc = 4;
3725 adev->mode_info.num_hpd = 4;
3726 adev->mode_info.num_dig = 4;
ff5ef992 3727 break;
476e955d 3728 case CHIP_NAVI10:
fbd2afe5 3729 case CHIP_NAVI12:
79037324 3730 case CHIP_SIENNA_CICHLID:
a6c5308f 3731 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3732 adev->mode_info.num_crtc = 6;
3733 adev->mode_info.num_hpd = 6;
3734 adev->mode_info.num_dig = 6;
3735 break;
fce651e3 3736 case CHIP_NAVI14:
2a411205 3737 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3738 adev->mode_info.num_crtc = 5;
3739 adev->mode_info.num_hpd = 5;
3740 adev->mode_info.num_dig = 5;
3741 break;
20f2ffe5 3742#endif
4562236b 3743 default:
e63f8673 3744 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3745 return -EINVAL;
3746 }
3747
c8dd5715
MD
3748 amdgpu_dm_set_irq_funcs(adev);
3749
39cc5be2
AD
3750 if (adev->mode_info.funcs == NULL)
3751 adev->mode_info.funcs = &dm_display_funcs;
3752
1f6010a9
DF
3753 /*
3754 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3755 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3756 * amdgpu_device_init()
3757 */
4562236b
HW
3758#if defined(CONFIG_DEBUG_KERNEL_DC)
3759 device_create_file(
4a580877 3760 adev_to_drm(adev)->dev,
4562236b
HW
3761 &dev_attr_s3_debug);
3762#endif
3763
3764 return 0;
3765}
3766
9b690ef3 3767static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3768 struct dc_stream_state *new_stream,
3769 struct dc_stream_state *old_stream)
9b690ef3 3770{
2afda735 3771 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3772}
3773
3774static bool modereset_required(struct drm_crtc_state *crtc_state)
3775{
2afda735 3776 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3777}
3778
7578ecda 3779static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3780{
3781 drm_encoder_cleanup(encoder);
3782 kfree(encoder);
3783}
3784
3785static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3786 .destroy = amdgpu_dm_encoder_destroy,
3787};
3788
e7b07cee 3789
6300b3bd
MK
3790static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3791 struct drm_framebuffer *fb,
3792 int *min_downscale, int *max_upscale)
3793{
3794 struct amdgpu_device *adev = drm_to_adev(dev);
3795 struct dc *dc = adev->dm.dc;
3796 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3797 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3798
3799 switch (fb->format->format) {
3800 case DRM_FORMAT_P010:
3801 case DRM_FORMAT_NV12:
3802 case DRM_FORMAT_NV21:
3803 *max_upscale = plane_cap->max_upscale_factor.nv12;
3804 *min_downscale = plane_cap->max_downscale_factor.nv12;
3805 break;
3806
3807 case DRM_FORMAT_XRGB16161616F:
3808 case DRM_FORMAT_ARGB16161616F:
3809 case DRM_FORMAT_XBGR16161616F:
3810 case DRM_FORMAT_ABGR16161616F:
3811 *max_upscale = plane_cap->max_upscale_factor.fp16;
3812 *min_downscale = plane_cap->max_downscale_factor.fp16;
3813 break;
3814
3815 default:
3816 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3817 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3818 break;
3819 }
3820
3821 /*
3822 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3823 * scaling factor of 1.0 == 1000 units.
3824 */
3825 if (*max_upscale == 1)
3826 *max_upscale = 1000;
3827
3828 if (*min_downscale == 1)
3829 *min_downscale = 1000;
3830}
3831
3832
695af5f9
NK
3833static int fill_dc_scaling_info(const struct drm_plane_state *state,
3834 struct dc_scaling_info *scaling_info)
e7b07cee 3835{
6300b3bd 3836 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 3837
695af5f9 3838 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3839
695af5f9
NK
3840 /* Source is fixed 16.16 but we ignore mantissa for now... */
3841 scaling_info->src_rect.x = state->src_x >> 16;
3842 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3843
695af5f9
NK
3844 scaling_info->src_rect.width = state->src_w >> 16;
3845 if (scaling_info->src_rect.width == 0)
3846 return -EINVAL;
3847
3848 scaling_info->src_rect.height = state->src_h >> 16;
3849 if (scaling_info->src_rect.height == 0)
3850 return -EINVAL;
3851
3852 scaling_info->dst_rect.x = state->crtc_x;
3853 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3854
3855 if (state->crtc_w == 0)
695af5f9 3856 return -EINVAL;
e7b07cee 3857
695af5f9 3858 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3859
3860 if (state->crtc_h == 0)
695af5f9 3861 return -EINVAL;
e7b07cee 3862
695af5f9 3863 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3864
695af5f9
NK
3865 /* DRM doesn't specify clipping on destination output. */
3866 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3867
6300b3bd
MK
3868 /* Validate scaling per-format with DC plane caps */
3869 if (state->plane && state->plane->dev && state->fb) {
3870 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3871 &min_downscale, &max_upscale);
3872 } else {
3873 min_downscale = 250;
3874 max_upscale = 16000;
3875 }
3876
6491f0c0
NK
3877 scale_w = scaling_info->dst_rect.width * 1000 /
3878 scaling_info->src_rect.width;
e7b07cee 3879
6300b3bd 3880 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
3881 return -EINVAL;
3882
3883 scale_h = scaling_info->dst_rect.height * 1000 /
3884 scaling_info->src_rect.height;
3885
6300b3bd 3886 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
3887 return -EINVAL;
3888
695af5f9
NK
3889 /*
3890 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3891 * assume reasonable defaults based on the format.
3892 */
e7b07cee 3893
695af5f9 3894 return 0;
4562236b 3895}
695af5f9 3896
a3241991
BN
3897static void
3898fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3899 uint64_t tiling_flags)
e7b07cee 3900{
a3241991
BN
3901 /* Fill GFX8 params */
3902 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3903 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3904
a3241991
BN
3905 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3906 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3907 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3908 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3909 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3910
a3241991
BN
3911 /* XXX fix me for VI */
3912 tiling_info->gfx8.num_banks = num_banks;
3913 tiling_info->gfx8.array_mode =
3914 DC_ARRAY_2D_TILED_THIN1;
3915 tiling_info->gfx8.tile_split = tile_split;
3916 tiling_info->gfx8.bank_width = bankw;
3917 tiling_info->gfx8.bank_height = bankh;
3918 tiling_info->gfx8.tile_aspect = mtaspect;
3919 tiling_info->gfx8.tile_mode =
3920 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3921 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3922 == DC_ARRAY_1D_TILED_THIN1) {
3923 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3924 }
3925
a3241991
BN
3926 tiling_info->gfx8.pipe_config =
3927 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3928}
3929
a3241991
BN
3930static void
3931fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3932 union dc_tiling_info *tiling_info)
3933{
3934 tiling_info->gfx9.num_pipes =
3935 adev->gfx.config.gb_addr_config_fields.num_pipes;
3936 tiling_info->gfx9.num_banks =
3937 adev->gfx.config.gb_addr_config_fields.num_banks;
3938 tiling_info->gfx9.pipe_interleave =
3939 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3940 tiling_info->gfx9.num_shader_engines =
3941 adev->gfx.config.gb_addr_config_fields.num_se;
3942 tiling_info->gfx9.max_compressed_frags =
3943 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3944 tiling_info->gfx9.num_rb_per_se =
3945 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3946 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
3947 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3948 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3949 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3950 adev->asic_type == CHIP_VANGOGH)
3951 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
3952}
3953
695af5f9 3954static int
a3241991
BN
3955validate_dcc(struct amdgpu_device *adev,
3956 const enum surface_pixel_format format,
3957 const enum dc_rotation_angle rotation,
3958 const union dc_tiling_info *tiling_info,
3959 const struct dc_plane_dcc_param *dcc,
3960 const struct dc_plane_address *address,
3961 const struct plane_size *plane_size)
7df7e505
NK
3962{
3963 struct dc *dc = adev->dm.dc;
8daa1218
NC
3964 struct dc_dcc_surface_param input;
3965 struct dc_surface_dcc_cap output;
7df7e505 3966
8daa1218
NC
3967 memset(&input, 0, sizeof(input));
3968 memset(&output, 0, sizeof(output));
3969
a3241991 3970 if (!dcc->enable)
87b7ebc2
RS
3971 return 0;
3972
a3241991
BN
3973 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3974 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3975 return -EINVAL;
7df7e505 3976
695af5f9 3977 input.format = format;
12e2b2d4
DL
3978 input.surface_size.width = plane_size->surface_size.width;
3979 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3980 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3981
695af5f9 3982 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3983 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3984 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3985 input.scan = SCAN_DIRECTION_VERTICAL;
3986
3987 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3988 return -EINVAL;
7df7e505
NK
3989
3990 if (!output.capable)
09e5665a 3991 return -EINVAL;
7df7e505 3992
a3241991
BN
3993 if (dcc->independent_64b_blks == 0 &&
3994 output.grph.rgb.independent_64b_blks != 0)
09e5665a 3995 return -EINVAL;
7df7e505 3996
a3241991
BN
3997 return 0;
3998}
3999
37384b3f
BN
4000static bool
4001modifier_has_dcc(uint64_t modifier)
4002{
4003 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4004}
4005
4006static unsigned
4007modifier_gfx9_swizzle_mode(uint64_t modifier)
4008{
4009 if (modifier == DRM_FORMAT_MOD_LINEAR)
4010 return 0;
4011
4012 return AMD_FMT_MOD_GET(TILE, modifier);
4013}
4014
dfbbfe3c
BN
4015static const struct drm_format_info *
4016amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4017{
816853f9 4018 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4019}
4020
37384b3f
BN
4021static void
4022fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4023 union dc_tiling_info *tiling_info,
4024 uint64_t modifier)
4025{
4026 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4027 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4028 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4029 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4030
4031 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4032
4033 if (!IS_AMD_FMT_MOD(modifier))
4034 return;
4035
4036 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4037 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4038
4039 if (adev->family >= AMDGPU_FAMILY_NV) {
4040 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4041 } else {
4042 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4043
4044 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4045 }
4046}
4047
faa37f54
BN
4048enum dm_micro_swizzle {
4049 MICRO_SWIZZLE_Z = 0,
4050 MICRO_SWIZZLE_S = 1,
4051 MICRO_SWIZZLE_D = 2,
4052 MICRO_SWIZZLE_R = 3
4053};
4054
4055static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4056 uint32_t format,
4057 uint64_t modifier)
4058{
4059 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4060 const struct drm_format_info *info = drm_format_info(format);
4061
4062 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4063
4064 if (!info)
4065 return false;
4066
4067 /*
4068 * We always have to allow this modifier, because core DRM still
4069 * checks LINEAR support if userspace does not provide modifers.
4070 */
4071 if (modifier == DRM_FORMAT_MOD_LINEAR)
4072 return true;
4073
4074 /*
4075 * The arbitrary tiling support for multiplane formats has not been hooked
4076 * up.
4077 */
4078 if (info->num_planes > 1)
4079 return false;
4080
4081 /*
4082 * For D swizzle the canonical modifier depends on the bpp, so check
4083 * it here.
4084 */
4085 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4086 adev->family >= AMDGPU_FAMILY_NV) {
4087 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4088 return false;
4089 }
4090
4091 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4092 info->cpp[0] < 8)
4093 return false;
4094
4095 if (modifier_has_dcc(modifier)) {
4096 /* Per radeonsi comments 16/64 bpp are more complicated. */
4097 if (info->cpp[0] != 4)
4098 return false;
4099 }
4100
4101 return true;
4102}
4103
4104static void
4105add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4106{
4107 if (!*mods)
4108 return;
4109
4110 if (*cap - *size < 1) {
4111 uint64_t new_cap = *cap * 2;
4112 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4113
4114 if (!new_mods) {
4115 kfree(*mods);
4116 *mods = NULL;
4117 return;
4118 }
4119
4120 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4121 kfree(*mods);
4122 *mods = new_mods;
4123 *cap = new_cap;
4124 }
4125
4126 (*mods)[*size] = mod;
4127 *size += 1;
4128}
4129
4130static void
4131add_gfx9_modifiers(const struct amdgpu_device *adev,
4132 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4133{
4134 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4135 int pipe_xor_bits = min(8, pipes +
4136 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4137 int bank_xor_bits = min(8 - pipe_xor_bits,
4138 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4139 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4140 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4141
4142
4143 if (adev->family == AMDGPU_FAMILY_RV) {
4144 /* Raven2 and later */
4145 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4146
4147 /*
4148 * No _D DCC swizzles yet because we only allow 32bpp, which
4149 * doesn't support _D on DCN
4150 */
4151
4152 if (has_constant_encode) {
4153 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4154 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4155 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4156 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4157 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4158 AMD_FMT_MOD_SET(DCC, 1) |
4159 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4160 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4161 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4162 }
4163
4164 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4166 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4167 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4169 AMD_FMT_MOD_SET(DCC, 1) |
4170 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4171 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4172 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4173
4174 if (has_constant_encode) {
4175 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4176 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4177 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4178 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4179 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4180 AMD_FMT_MOD_SET(DCC, 1) |
4181 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4182 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4183 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4184
4185 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 AMD_FMT_MOD_SET(RB, rb) |
4187 AMD_FMT_MOD_SET(PIPE, pipes));
4188 }
4189
4190 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4191 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4192 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4193 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4194 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4195 AMD_FMT_MOD_SET(DCC, 1) |
4196 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4197 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4198 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4199 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4200 AMD_FMT_MOD_SET(RB, rb) |
4201 AMD_FMT_MOD_SET(PIPE, pipes));
4202 }
4203
4204 /*
4205 * Only supported for 64bpp on Raven, will be filtered on format in
4206 * dm_plane_format_mod_supported.
4207 */
4208 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4210 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4211 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4212 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4213
4214 if (adev->family == AMDGPU_FAMILY_RV) {
4215 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4217 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4218 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4219 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4220 }
4221
4222 /*
4223 * Only supported for 64bpp on Raven, will be filtered on format in
4224 * dm_plane_format_mod_supported.
4225 */
4226 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4228 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4229
4230 if (adev->family == AMDGPU_FAMILY_RV) {
4231 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4232 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4233 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4234 }
4235}
4236
4237static void
4238add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4239 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4240{
4241 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4242
4243 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4244 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4245 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4246 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4247 AMD_FMT_MOD_SET(DCC, 1) |
4248 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4249 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4250 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4251
4252 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4253 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4254 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4255 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4256 AMD_FMT_MOD_SET(DCC, 1) |
4257 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4258 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4259 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4260 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4261
4262 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4264 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4265 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4266
4267 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4268 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4269 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4270 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4271
4272
4273 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4274 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4275 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4276 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4277
4278 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4279 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4280 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4281}
4282
4283static void
4284add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4285 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4286{
4287 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4288 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4289
4290 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4292 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4295 AMD_FMT_MOD_SET(DCC, 1) |
4296 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4297 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4298 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4299 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4300
4301 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4303 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4304 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4305 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4306 AMD_FMT_MOD_SET(DCC, 1) |
4307 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4308 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4309 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4310 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4311 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4312
4313 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4315 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4316 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317 AMD_FMT_MOD_SET(PACKERS, pkrs));
4318
4319 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4321 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4322 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4323 AMD_FMT_MOD_SET(PACKERS, pkrs));
4324
4325 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4326 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4327 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4328 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4329
4330 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4331 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4332 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4333}
4334
4335static int
4336get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4337{
4338 uint64_t size = 0, capacity = 128;
4339 *mods = NULL;
4340
4341 /* We have not hooked up any pre-GFX9 modifiers. */
4342 if (adev->family < AMDGPU_FAMILY_AI)
4343 return 0;
4344
4345 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4346
4347 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4348 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4349 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4350 return *mods ? 0 : -ENOMEM;
4351 }
4352
4353 switch (adev->family) {
4354 case AMDGPU_FAMILY_AI:
4355 case AMDGPU_FAMILY_RV:
4356 add_gfx9_modifiers(adev, mods, &size, &capacity);
4357 break;
4358 case AMDGPU_FAMILY_NV:
4359 case AMDGPU_FAMILY_VGH:
4360 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4361 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4362 else
4363 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4364 break;
4365 }
4366
4367 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4368
4369 /* INVALID marks the end of the list. */
4370 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4371
4372 if (!*mods)
4373 return -ENOMEM;
4374
4375 return 0;
4376}
4377
37384b3f
BN
4378static int
4379fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4380 const struct amdgpu_framebuffer *afb,
4381 const enum surface_pixel_format format,
4382 const enum dc_rotation_angle rotation,
4383 const struct plane_size *plane_size,
4384 union dc_tiling_info *tiling_info,
4385 struct dc_plane_dcc_param *dcc,
4386 struct dc_plane_address *address,
4387 const bool force_disable_dcc)
4388{
4389 const uint64_t modifier = afb->base.modifier;
4390 int ret;
4391
4392 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4393 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4394
4395 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4396 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4397
4398 dcc->enable = 1;
4399 dcc->meta_pitch = afb->base.pitches[1];
4400 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4401
4402 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4403 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4404 }
4405
4406 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4407 if (ret)
4408 return ret;
7df7e505 4409
09e5665a
NK
4410 return 0;
4411}
4412
4413static int
320932bf 4414fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4415 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4416 const enum surface_pixel_format format,
4417 const enum dc_rotation_angle rotation,
4418 const uint64_t tiling_flags,
09e5665a 4419 union dc_tiling_info *tiling_info,
12e2b2d4 4420 struct plane_size *plane_size,
09e5665a 4421 struct dc_plane_dcc_param *dcc,
87b7ebc2 4422 struct dc_plane_address *address,
5888f07a 4423 bool tmz_surface,
87b7ebc2 4424 bool force_disable_dcc)
09e5665a 4425{
320932bf 4426 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4427 int ret;
4428
4429 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4430 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4431 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4432 memset(address, 0, sizeof(*address));
4433
5888f07a
HW
4434 address->tmz_surface = tmz_surface;
4435
695af5f9 4436 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4437 uint64_t addr = afb->address + fb->offsets[0];
4438
12e2b2d4
DL
4439 plane_size->surface_size.x = 0;
4440 plane_size->surface_size.y = 0;
4441 plane_size->surface_size.width = fb->width;
4442 plane_size->surface_size.height = fb->height;
4443 plane_size->surface_pitch =
320932bf
NK
4444 fb->pitches[0] / fb->format->cpp[0];
4445
e0634e8d 4446 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4447 address->grph.addr.low_part = lower_32_bits(addr);
4448 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4449 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4450 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4451 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4452
12e2b2d4
DL
4453 plane_size->surface_size.x = 0;
4454 plane_size->surface_size.y = 0;
4455 plane_size->surface_size.width = fb->width;
4456 plane_size->surface_size.height = fb->height;
4457 plane_size->surface_pitch =
320932bf
NK
4458 fb->pitches[0] / fb->format->cpp[0];
4459
12e2b2d4
DL
4460 plane_size->chroma_size.x = 0;
4461 plane_size->chroma_size.y = 0;
320932bf 4462 /* TODO: set these based on surface format */
12e2b2d4
DL
4463 plane_size->chroma_size.width = fb->width / 2;
4464 plane_size->chroma_size.height = fb->height / 2;
320932bf 4465
12e2b2d4 4466 plane_size->chroma_pitch =
320932bf
NK
4467 fb->pitches[1] / fb->format->cpp[1];
4468
e0634e8d
NK
4469 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4470 address->video_progressive.luma_addr.low_part =
be7b9b32 4471 lower_32_bits(luma_addr);
e0634e8d 4472 address->video_progressive.luma_addr.high_part =
be7b9b32 4473 upper_32_bits(luma_addr);
e0634e8d
NK
4474 address->video_progressive.chroma_addr.low_part =
4475 lower_32_bits(chroma_addr);
4476 address->video_progressive.chroma_addr.high_part =
4477 upper_32_bits(chroma_addr);
4478 }
09e5665a 4479
a3241991 4480 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4481 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4482 rotation, plane_size,
4483 tiling_info, dcc,
4484 address,
4485 force_disable_dcc);
09e5665a
NK
4486 if (ret)
4487 return ret;
a3241991
BN
4488 } else {
4489 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4490 }
4491
4492 return 0;
7df7e505
NK
4493}
4494
d74004b6 4495static void
695af5f9 4496fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4497 bool *per_pixel_alpha, bool *global_alpha,
4498 int *global_alpha_value)
4499{
4500 *per_pixel_alpha = false;
4501 *global_alpha = false;
4502 *global_alpha_value = 0xff;
4503
4504 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4505 return;
4506
4507 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4508 static const uint32_t alpha_formats[] = {
4509 DRM_FORMAT_ARGB8888,
4510 DRM_FORMAT_RGBA8888,
4511 DRM_FORMAT_ABGR8888,
4512 };
4513 uint32_t format = plane_state->fb->format->format;
4514 unsigned int i;
4515
4516 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4517 if (format == alpha_formats[i]) {
4518 *per_pixel_alpha = true;
4519 break;
4520 }
4521 }
4522 }
4523
4524 if (plane_state->alpha < 0xffff) {
4525 *global_alpha = true;
4526 *global_alpha_value = plane_state->alpha >> 8;
4527 }
4528}
4529
004fefa3
NK
4530static int
4531fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4532 const enum surface_pixel_format format,
004fefa3
NK
4533 enum dc_color_space *color_space)
4534{
4535 bool full_range;
4536
4537 *color_space = COLOR_SPACE_SRGB;
4538
4539 /* DRM color properties only affect non-RGB formats. */
695af5f9 4540 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4541 return 0;
4542
4543 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4544
4545 switch (plane_state->color_encoding) {
4546 case DRM_COLOR_YCBCR_BT601:
4547 if (full_range)
4548 *color_space = COLOR_SPACE_YCBCR601;
4549 else
4550 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4551 break;
4552
4553 case DRM_COLOR_YCBCR_BT709:
4554 if (full_range)
4555 *color_space = COLOR_SPACE_YCBCR709;
4556 else
4557 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4558 break;
4559
4560 case DRM_COLOR_YCBCR_BT2020:
4561 if (full_range)
4562 *color_space = COLOR_SPACE_2020_YCBCR;
4563 else
4564 return -EINVAL;
4565 break;
4566
4567 default:
4568 return -EINVAL;
4569 }
4570
4571 return 0;
4572}
4573
695af5f9
NK
4574static int
4575fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4576 const struct drm_plane_state *plane_state,
4577 const uint64_t tiling_flags,
4578 struct dc_plane_info *plane_info,
87b7ebc2 4579 struct dc_plane_address *address,
5888f07a 4580 bool tmz_surface,
87b7ebc2 4581 bool force_disable_dcc)
695af5f9
NK
4582{
4583 const struct drm_framebuffer *fb = plane_state->fb;
4584 const struct amdgpu_framebuffer *afb =
4585 to_amdgpu_framebuffer(plane_state->fb);
4586 struct drm_format_name_buf format_name;
4587 int ret;
4588
4589 memset(plane_info, 0, sizeof(*plane_info));
4590
4591 switch (fb->format->format) {
4592 case DRM_FORMAT_C8:
4593 plane_info->format =
4594 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4595 break;
4596 case DRM_FORMAT_RGB565:
4597 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4598 break;
4599 case DRM_FORMAT_XRGB8888:
4600 case DRM_FORMAT_ARGB8888:
4601 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4602 break;
4603 case DRM_FORMAT_XRGB2101010:
4604 case DRM_FORMAT_ARGB2101010:
4605 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4606 break;
4607 case DRM_FORMAT_XBGR2101010:
4608 case DRM_FORMAT_ABGR2101010:
4609 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4610 break;
4611 case DRM_FORMAT_XBGR8888:
4612 case DRM_FORMAT_ABGR8888:
4613 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4614 break;
4615 case DRM_FORMAT_NV21:
4616 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4617 break;
4618 case DRM_FORMAT_NV12:
4619 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4620 break;
cbec6477
SW
4621 case DRM_FORMAT_P010:
4622 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4623 break;
492548dc
SW
4624 case DRM_FORMAT_XRGB16161616F:
4625 case DRM_FORMAT_ARGB16161616F:
4626 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4627 break;
2a5195dc
MK
4628 case DRM_FORMAT_XBGR16161616F:
4629 case DRM_FORMAT_ABGR16161616F:
4630 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4631 break;
695af5f9
NK
4632 default:
4633 DRM_ERROR(
4634 "Unsupported screen format %s\n",
4635 drm_get_format_name(fb->format->format, &format_name));
4636 return -EINVAL;
4637 }
4638
4639 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4640 case DRM_MODE_ROTATE_0:
4641 plane_info->rotation = ROTATION_ANGLE_0;
4642 break;
4643 case DRM_MODE_ROTATE_90:
4644 plane_info->rotation = ROTATION_ANGLE_90;
4645 break;
4646 case DRM_MODE_ROTATE_180:
4647 plane_info->rotation = ROTATION_ANGLE_180;
4648 break;
4649 case DRM_MODE_ROTATE_270:
4650 plane_info->rotation = ROTATION_ANGLE_270;
4651 break;
4652 default:
4653 plane_info->rotation = ROTATION_ANGLE_0;
4654 break;
4655 }
4656
4657 plane_info->visible = true;
4658 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4659
6d83a32d
MS
4660 plane_info->layer_index = 0;
4661
695af5f9
NK
4662 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4663 &plane_info->color_space);
4664 if (ret)
4665 return ret;
4666
4667 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4668 plane_info->rotation, tiling_flags,
4669 &plane_info->tiling_info,
4670 &plane_info->plane_size,
5888f07a 4671 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4672 force_disable_dcc);
695af5f9
NK
4673 if (ret)
4674 return ret;
4675
4676 fill_blending_from_plane_state(
4677 plane_state, &plane_info->per_pixel_alpha,
4678 &plane_info->global_alpha, &plane_info->global_alpha_value);
4679
4680 return 0;
4681}
4682
4683static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4684 struct dc_plane_state *dc_plane_state,
4685 struct drm_plane_state *plane_state,
4686 struct drm_crtc_state *crtc_state)
e7b07cee 4687{
cf020d49 4688 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4689 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4690 struct dc_scaling_info scaling_info;
4691 struct dc_plane_info plane_info;
695af5f9 4692 int ret;
87b7ebc2 4693 bool force_disable_dcc = false;
e7b07cee 4694
695af5f9
NK
4695 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4696 if (ret)
4697 return ret;
e7b07cee 4698
695af5f9
NK
4699 dc_plane_state->src_rect = scaling_info.src_rect;
4700 dc_plane_state->dst_rect = scaling_info.dst_rect;
4701 dc_plane_state->clip_rect = scaling_info.clip_rect;
4702 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4703
87b7ebc2 4704 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4705 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4706 afb->tiling_flags,
695af5f9 4707 &plane_info,
87b7ebc2 4708 &dc_plane_state->address,
6eed95b0 4709 afb->tmz_surface,
87b7ebc2 4710 force_disable_dcc);
004fefa3
NK
4711 if (ret)
4712 return ret;
4713
695af5f9
NK
4714 dc_plane_state->format = plane_info.format;
4715 dc_plane_state->color_space = plane_info.color_space;
4716 dc_plane_state->format = plane_info.format;
4717 dc_plane_state->plane_size = plane_info.plane_size;
4718 dc_plane_state->rotation = plane_info.rotation;
4719 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4720 dc_plane_state->stereo_format = plane_info.stereo_format;
4721 dc_plane_state->tiling_info = plane_info.tiling_info;
4722 dc_plane_state->visible = plane_info.visible;
4723 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4724 dc_plane_state->global_alpha = plane_info.global_alpha;
4725 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4726 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4727 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4728
e277adc5
LSL
4729 /*
4730 * Always set input transfer function, since plane state is refreshed
4731 * every time.
4732 */
cf020d49
NK
4733 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4734 if (ret)
4735 return ret;
e7b07cee 4736
cf020d49 4737 return 0;
e7b07cee
HW
4738}
4739
3ee6b26b
AD
4740static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4741 const struct dm_connector_state *dm_state,
4742 struct dc_stream_state *stream)
e7b07cee
HW
4743{
4744 enum amdgpu_rmx_type rmx_type;
4745
4746 struct rect src = { 0 }; /* viewport in composition space*/
4747 struct rect dst = { 0 }; /* stream addressable area */
4748
4749 /* no mode. nothing to be done */
4750 if (!mode)
4751 return;
4752
4753 /* Full screen scaling by default */
4754 src.width = mode->hdisplay;
4755 src.height = mode->vdisplay;
4756 dst.width = stream->timing.h_addressable;
4757 dst.height = stream->timing.v_addressable;
4758
f4791779
HW
4759 if (dm_state) {
4760 rmx_type = dm_state->scaling;
4761 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4762 if (src.width * dst.height <
4763 src.height * dst.width) {
4764 /* height needs less upscaling/more downscaling */
4765 dst.width = src.width *
4766 dst.height / src.height;
4767 } else {
4768 /* width needs less upscaling/more downscaling */
4769 dst.height = src.height *
4770 dst.width / src.width;
4771 }
4772 } else if (rmx_type == RMX_CENTER) {
4773 dst = src;
e7b07cee 4774 }
e7b07cee 4775
f4791779
HW
4776 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4777 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4778
f4791779
HW
4779 if (dm_state->underscan_enable) {
4780 dst.x += dm_state->underscan_hborder / 2;
4781 dst.y += dm_state->underscan_vborder / 2;
4782 dst.width -= dm_state->underscan_hborder;
4783 dst.height -= dm_state->underscan_vborder;
4784 }
e7b07cee
HW
4785 }
4786
4787 stream->src = src;
4788 stream->dst = dst;
4789
f1ad2f5e 4790 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4791 dst.x, dst.y, dst.width, dst.height);
4792
4793}
4794
3ee6b26b 4795static enum dc_color_depth
42ba01fc 4796convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4797 bool is_y420, int requested_bpc)
e7b07cee 4798{
1bc22f20 4799 uint8_t bpc;
01c22997 4800
1bc22f20
SW
4801 if (is_y420) {
4802 bpc = 8;
4803
4804 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4805 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4806 bpc = 16;
4807 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4808 bpc = 12;
4809 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4810 bpc = 10;
4811 } else {
4812 bpc = (uint8_t)connector->display_info.bpc;
4813 /* Assume 8 bpc by default if no bpc is specified. */
4814 bpc = bpc ? bpc : 8;
4815 }
e7b07cee 4816
cbd14ae7 4817 if (requested_bpc > 0) {
01c22997
NK
4818 /*
4819 * Cap display bpc based on the user requested value.
4820 *
4821 * The value for state->max_bpc may not correctly updated
4822 * depending on when the connector gets added to the state
4823 * or if this was called outside of atomic check, so it
4824 * can't be used directly.
4825 */
cbd14ae7 4826 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4827
1825fd34
NK
4828 /* Round down to the nearest even number. */
4829 bpc = bpc - (bpc & 1);
4830 }
07e3a1cf 4831
e7b07cee
HW
4832 switch (bpc) {
4833 case 0:
1f6010a9
DF
4834 /*
4835 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4836 * EDID revision before 1.4
4837 * TODO: Fix edid parsing
4838 */
4839 return COLOR_DEPTH_888;
4840 case 6:
4841 return COLOR_DEPTH_666;
4842 case 8:
4843 return COLOR_DEPTH_888;
4844 case 10:
4845 return COLOR_DEPTH_101010;
4846 case 12:
4847 return COLOR_DEPTH_121212;
4848 case 14:
4849 return COLOR_DEPTH_141414;
4850 case 16:
4851 return COLOR_DEPTH_161616;
4852 default:
4853 return COLOR_DEPTH_UNDEFINED;
4854 }
4855}
4856
3ee6b26b
AD
4857static enum dc_aspect_ratio
4858get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4859{
e11d4147
LSL
4860 /* 1-1 mapping, since both enums follow the HDMI spec. */
4861 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4862}
4863
3ee6b26b
AD
4864static enum dc_color_space
4865get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4866{
4867 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4868
4869 switch (dc_crtc_timing->pixel_encoding) {
4870 case PIXEL_ENCODING_YCBCR422:
4871 case PIXEL_ENCODING_YCBCR444:
4872 case PIXEL_ENCODING_YCBCR420:
4873 {
4874 /*
4875 * 27030khz is the separation point between HDTV and SDTV
4876 * according to HDMI spec, we use YCbCr709 and YCbCr601
4877 * respectively
4878 */
380604e2 4879 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4880 if (dc_crtc_timing->flags.Y_ONLY)
4881 color_space =
4882 COLOR_SPACE_YCBCR709_LIMITED;
4883 else
4884 color_space = COLOR_SPACE_YCBCR709;
4885 } else {
4886 if (dc_crtc_timing->flags.Y_ONLY)
4887 color_space =
4888 COLOR_SPACE_YCBCR601_LIMITED;
4889 else
4890 color_space = COLOR_SPACE_YCBCR601;
4891 }
4892
4893 }
4894 break;
4895 case PIXEL_ENCODING_RGB:
4896 color_space = COLOR_SPACE_SRGB;
4897 break;
4898
4899 default:
4900 WARN_ON(1);
4901 break;
4902 }
4903
4904 return color_space;
4905}
4906
ea117312
TA
4907static bool adjust_colour_depth_from_display_info(
4908 struct dc_crtc_timing *timing_out,
4909 const struct drm_display_info *info)
400443e8 4910{
ea117312 4911 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4912 int normalized_clk;
400443e8 4913 do {
380604e2 4914 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4915 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4916 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4917 normalized_clk /= 2;
4918 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4919 switch (depth) {
4920 case COLOR_DEPTH_888:
4921 break;
400443e8
ML
4922 case COLOR_DEPTH_101010:
4923 normalized_clk = (normalized_clk * 30) / 24;
4924 break;
4925 case COLOR_DEPTH_121212:
4926 normalized_clk = (normalized_clk * 36) / 24;
4927 break;
4928 case COLOR_DEPTH_161616:
4929 normalized_clk = (normalized_clk * 48) / 24;
4930 break;
4931 default:
ea117312
TA
4932 /* The above depths are the only ones valid for HDMI. */
4933 return false;
400443e8 4934 }
ea117312
TA
4935 if (normalized_clk <= info->max_tmds_clock) {
4936 timing_out->display_color_depth = depth;
4937 return true;
4938 }
4939 } while (--depth > COLOR_DEPTH_666);
4940 return false;
400443e8 4941}
e7b07cee 4942
42ba01fc
NK
4943static void fill_stream_properties_from_drm_display_mode(
4944 struct dc_stream_state *stream,
4945 const struct drm_display_mode *mode_in,
4946 const struct drm_connector *connector,
4947 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4948 const struct dc_stream_state *old_stream,
4949 int requested_bpc)
e7b07cee
HW
4950{
4951 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4952 const struct drm_display_info *info = &connector->display_info;
d4252eee 4953 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4954 struct hdmi_vendor_infoframe hv_frame;
4955 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4956
acf83f86
WL
4957 memset(&hv_frame, 0, sizeof(hv_frame));
4958 memset(&avi_frame, 0, sizeof(avi_frame));
4959
e7b07cee
HW
4960 timing_out->h_border_left = 0;
4961 timing_out->h_border_right = 0;
4962 timing_out->v_border_top = 0;
4963 timing_out->v_border_bottom = 0;
4964 /* TODO: un-hardcode */
fe61a2f1 4965 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4966 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4967 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4968 else if (drm_mode_is_420_also(info, mode_in)
4969 && aconnector->force_yuv420_output)
4970 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4971 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4972 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4973 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4974 else
4975 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4976
4977 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4978 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4979 connector,
4980 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4981 requested_bpc);
e7b07cee
HW
4982 timing_out->scan_type = SCANNING_TYPE_NODATA;
4983 timing_out->hdmi_vic = 0;
b333730d
BL
4984
4985 if(old_stream) {
4986 timing_out->vic = old_stream->timing.vic;
4987 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4988 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4989 } else {
4990 timing_out->vic = drm_match_cea_mode(mode_in);
4991 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4992 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4993 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4994 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4995 }
e7b07cee 4996
1cb1d477
WL
4997 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4998 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4999 timing_out->vic = avi_frame.video_code;
5000 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5001 timing_out->hdmi_vic = hv_frame.vic;
5002 }
5003
e7b07cee
HW
5004 timing_out->h_addressable = mode_in->crtc_hdisplay;
5005 timing_out->h_total = mode_in->crtc_htotal;
5006 timing_out->h_sync_width =
5007 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5008 timing_out->h_front_porch =
5009 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5010 timing_out->v_total = mode_in->crtc_vtotal;
5011 timing_out->v_addressable = mode_in->crtc_vdisplay;
5012 timing_out->v_front_porch =
5013 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5014 timing_out->v_sync_width =
5015 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 5016 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 5017 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5018
5019 stream->output_color_space = get_output_color_space(timing_out);
5020
e43a432c
AK
5021 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5022 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5023 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5024 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5025 drm_mode_is_420_also(info, mode_in) &&
5026 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5027 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5028 adjust_colour_depth_from_display_info(timing_out, info);
5029 }
5030 }
e7b07cee
HW
5031}
5032
3ee6b26b
AD
5033static void fill_audio_info(struct audio_info *audio_info,
5034 const struct drm_connector *drm_connector,
5035 const struct dc_sink *dc_sink)
e7b07cee
HW
5036{
5037 int i = 0;
5038 int cea_revision = 0;
5039 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5040
5041 audio_info->manufacture_id = edid_caps->manufacturer_id;
5042 audio_info->product_id = edid_caps->product_id;
5043
5044 cea_revision = drm_connector->display_info.cea_rev;
5045
090afc1e 5046 strscpy(audio_info->display_name,
d2b2562c 5047 edid_caps->display_name,
090afc1e 5048 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5049
b830ebc9 5050 if (cea_revision >= 3) {
e7b07cee
HW
5051 audio_info->mode_count = edid_caps->audio_mode_count;
5052
5053 for (i = 0; i < audio_info->mode_count; ++i) {
5054 audio_info->modes[i].format_code =
5055 (enum audio_format_code)
5056 (edid_caps->audio_modes[i].format_code);
5057 audio_info->modes[i].channel_count =
5058 edid_caps->audio_modes[i].channel_count;
5059 audio_info->modes[i].sample_rates.all =
5060 edid_caps->audio_modes[i].sample_rate;
5061 audio_info->modes[i].sample_size =
5062 edid_caps->audio_modes[i].sample_size;
5063 }
5064 }
5065
5066 audio_info->flags.all = edid_caps->speaker_flags;
5067
5068 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5069 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5070 audio_info->video_latency = drm_connector->video_latency[0];
5071 audio_info->audio_latency = drm_connector->audio_latency[0];
5072 }
5073
5074 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5075
5076}
5077
3ee6b26b
AD
5078static void
5079copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5080 struct drm_display_mode *dst_mode)
e7b07cee
HW
5081{
5082 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5083 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5084 dst_mode->crtc_clock = src_mode->crtc_clock;
5085 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5086 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5087 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5088 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5089 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5090 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5091 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5092 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5093 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5094 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5095 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5096}
5097
3ee6b26b
AD
5098static void
5099decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5100 const struct drm_display_mode *native_mode,
5101 bool scale_enabled)
e7b07cee
HW
5102{
5103 if (scale_enabled) {
5104 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5105 } else if (native_mode->clock == drm_mode->clock &&
5106 native_mode->htotal == drm_mode->htotal &&
5107 native_mode->vtotal == drm_mode->vtotal) {
5108 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5109 } else {
5110 /* no scaling nor amdgpu inserted, no need to patch */
5111 }
5112}
5113
aed15309
ML
5114static struct dc_sink *
5115create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5116{
2e0ac3d6 5117 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5118 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5119 sink_init_data.link = aconnector->dc_link;
5120 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5121
5122 sink = dc_sink_create(&sink_init_data);
423788c7 5123 if (!sink) {
2e0ac3d6 5124 DRM_ERROR("Failed to create sink!\n");
aed15309 5125 return NULL;
423788c7 5126 }
2e0ac3d6 5127 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5128
aed15309 5129 return sink;
2e0ac3d6
HW
5130}
5131
fa2123db
ML
5132static void set_multisync_trigger_params(
5133 struct dc_stream_state *stream)
5134{
5135 if (stream->triggered_crtc_reset.enabled) {
5136 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5137 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5138 }
5139}
5140
5141static void set_master_stream(struct dc_stream_state *stream_set[],
5142 int stream_count)
5143{
5144 int j, highest_rfr = 0, master_stream = 0;
5145
5146 for (j = 0; j < stream_count; j++) {
5147 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5148 int refresh_rate = 0;
5149
380604e2 5150 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5151 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5152 if (refresh_rate > highest_rfr) {
5153 highest_rfr = refresh_rate;
5154 master_stream = j;
5155 }
5156 }
5157 }
5158 for (j = 0; j < stream_count; j++) {
03736f4c 5159 if (stream_set[j])
fa2123db
ML
5160 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5161 }
5162}
5163
5164static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5165{
5166 int i = 0;
5167
5168 if (context->stream_count < 2)
5169 return;
5170 for (i = 0; i < context->stream_count ; i++) {
5171 if (!context->streams[i])
5172 continue;
1f6010a9
DF
5173 /*
5174 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5175 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5176 * For now it's set to false
fa2123db
ML
5177 */
5178 set_multisync_trigger_params(context->streams[i]);
5179 }
5180 set_master_stream(context->streams, context->stream_count);
5181}
5182
3ee6b26b
AD
5183static struct dc_stream_state *
5184create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5185 const struct drm_display_mode *drm_mode,
b333730d 5186 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5187 const struct dc_stream_state *old_stream,
5188 int requested_bpc)
e7b07cee
HW
5189{
5190 struct drm_display_mode *preferred_mode = NULL;
391ef035 5191 struct drm_connector *drm_connector;
42ba01fc
NK
5192 const struct drm_connector_state *con_state =
5193 dm_state ? &dm_state->base : NULL;
0971c40e 5194 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5195 struct drm_display_mode mode = *drm_mode;
5196 bool native_mode_found = false;
b333730d
BL
5197 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5198 int mode_refresh;
58124bf8 5199 int preferred_refresh = 0;
defeb878 5200#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5201 struct dsc_dec_dpcd_caps dsc_caps;
5202 uint32_t link_bandwidth_kbps;
7c431455 5203#endif
aed15309 5204 struct dc_sink *sink = NULL;
b830ebc9 5205 if (aconnector == NULL) {
e7b07cee 5206 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5207 return stream;
e7b07cee
HW
5208 }
5209
e7b07cee 5210 drm_connector = &aconnector->base;
2e0ac3d6 5211
f4ac176e 5212 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5213 sink = create_fake_sink(aconnector);
5214 if (!sink)
5215 return stream;
aed15309
ML
5216 } else {
5217 sink = aconnector->dc_sink;
dcd5fb82 5218 dc_sink_retain(sink);
f4ac176e 5219 }
2e0ac3d6 5220
aed15309 5221 stream = dc_create_stream_for_sink(sink);
4562236b 5222
b830ebc9 5223 if (stream == NULL) {
e7b07cee 5224 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5225 goto finish;
e7b07cee
HW
5226 }
5227
ceb3dbb4
JL
5228 stream->dm_stream_context = aconnector;
5229
4a36fcba
WL
5230 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5231 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5232
e7b07cee
HW
5233 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5234 /* Search for preferred mode */
5235 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5236 native_mode_found = true;
5237 break;
5238 }
5239 }
5240 if (!native_mode_found)
5241 preferred_mode = list_first_entry_or_null(
5242 &aconnector->base.modes,
5243 struct drm_display_mode,
5244 head);
5245
b333730d
BL
5246 mode_refresh = drm_mode_vrefresh(&mode);
5247
b830ebc9 5248 if (preferred_mode == NULL) {
1f6010a9
DF
5249 /*
5250 * This may not be an error, the use case is when we have no
e7b07cee
HW
5251 * usermode calls to reset and set mode upon hotplug. In this
5252 * case, we call set mode ourselves to restore the previous mode
5253 * and the modelist may not be filled in in time.
5254 */
f1ad2f5e 5255 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
5256 } else {
5257 decide_crtc_timing_for_drm_display_mode(
5258 &mode, preferred_mode,
f4791779 5259 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5260 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5261 }
5262
f783577c
JFZ
5263 if (!dm_state)
5264 drm_mode_set_crtcinfo(&mode, 0);
5265
b333730d
BL
5266 /*
5267 * If scaling is enabled and refresh rate didn't change
5268 * we copy the vic and polarities of the old timings
5269 */
5270 if (!scale || mode_refresh != preferred_refresh)
5271 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5272 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
5273 else
5274 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5275 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5276
df2f1015
DF
5277 stream->timing.flags.DSC = 0;
5278
5279 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5280#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5281 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5282 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5283 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5284 &dsc_caps);
5285 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5286 dc_link_get_link_cap(aconnector->dc_link));
5287
0749ddeb 5288 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5289 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5290 dc_dsc_policy_set_enable_dsc_when_not_needed(
5291 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5292
0417df16 5293 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5294 &dsc_caps,
0417df16 5295 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5296 0,
df2f1015
DF
5297 link_bandwidth_kbps,
5298 &stream->timing,
5299 &stream->timing.dsc_cfg))
5300 stream->timing.flags.DSC = 1;
27e84dd7 5301 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5302 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5303 stream->timing.flags.DSC = 1;
734e4c97 5304
28b2f656
EB
5305 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5306 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5307
28b2f656
EB
5308 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5309 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5310
5311 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5312 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5313 }
39a4eb85 5314#endif
df2f1015 5315 }
39a4eb85 5316
e7b07cee
HW
5317 update_stream_scaling_settings(&mode, dm_state, stream);
5318
5319 fill_audio_info(
5320 &stream->audio_info,
5321 drm_connector,
aed15309 5322 sink);
e7b07cee 5323
ceb3dbb4 5324 update_stream_signal(stream, sink);
9182b4cb 5325
d832fc3b 5326 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5327 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5328
8a488f5d
RL
5329 if (stream->link->psr_settings.psr_feature_enabled) {
5330 //
5331 // should decide stream support vsc sdp colorimetry capability
5332 // before building vsc info packet
5333 //
5334 stream->use_vsc_sdp_for_colorimetry = false;
5335 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5336 stream->use_vsc_sdp_for_colorimetry =
5337 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5338 } else {
5339 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5340 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5341 }
8a488f5d 5342 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5343 }
aed15309 5344finish:
dcd5fb82 5345 dc_sink_release(sink);
9e3efe3e 5346
e7b07cee
HW
5347 return stream;
5348}
5349
7578ecda 5350static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5351{
5352 drm_crtc_cleanup(crtc);
5353 kfree(crtc);
5354}
5355
5356static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5357 struct drm_crtc_state *state)
e7b07cee
HW
5358{
5359 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5360
5361 /* TODO Destroy dc_stream objects are stream object is flattened */
5362 if (cur->stream)
5363 dc_stream_release(cur->stream);
5364
5365
5366 __drm_atomic_helper_crtc_destroy_state(state);
5367
5368
5369 kfree(state);
5370}
5371
5372static void dm_crtc_reset_state(struct drm_crtc *crtc)
5373{
5374 struct dm_crtc_state *state;
5375
5376 if (crtc->state)
5377 dm_crtc_destroy_state(crtc, crtc->state);
5378
5379 state = kzalloc(sizeof(*state), GFP_KERNEL);
5380 if (WARN_ON(!state))
5381 return;
5382
1f8a52ec 5383 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5384}
5385
5386static struct drm_crtc_state *
5387dm_crtc_duplicate_state(struct drm_crtc *crtc)
5388{
5389 struct dm_crtc_state *state, *cur;
5390
5391 cur = to_dm_crtc_state(crtc->state);
5392
5393 if (WARN_ON(!crtc->state))
5394 return NULL;
5395
2004f45e 5396 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5397 if (!state)
5398 return NULL;
e7b07cee
HW
5399
5400 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5401
5402 if (cur->stream) {
5403 state->stream = cur->stream;
5404 dc_stream_retain(state->stream);
5405 }
5406
d6ef9b41 5407 state->active_planes = cur->active_planes;
98e6436d 5408 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5409 state->abm_level = cur->abm_level;
bb47de73
NK
5410 state->vrr_supported = cur->vrr_supported;
5411 state->freesync_config = cur->freesync_config;
14b25846 5412 state->crc_src = cur->crc_src;
cf020d49
NK
5413 state->cm_has_degamma = cur->cm_has_degamma;
5414 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e2881d6d 5415
e7b07cee
HW
5416 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5417
5418 return &state->base;
5419}
5420
d2574c33
MK
5421static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5422{
5423 enum dc_irq_source irq_source;
5424 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5425 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5426 int rc;
5427
5428 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5429
5430 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5431
5432 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5433 acrtc->crtc_id, enable ? "en" : "dis", rc);
5434 return rc;
5435}
589d2739
HW
5436
5437static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5438{
5439 enum dc_irq_source irq_source;
5440 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5441 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5442 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 5443#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5444 struct amdgpu_display_manager *dm = &adev->dm;
ea3b4242
QZ
5445 unsigned long flags;
5446#endif
d2574c33
MK
5447 int rc = 0;
5448
5449 if (enable) {
5450 /* vblank irq on -> Only need vupdate irq in vrr mode */
5451 if (amdgpu_dm_vrr_active(acrtc_state))
5452 rc = dm_set_vupdate_irq(crtc, true);
5453 } else {
5454 /* vblank irq off -> vupdate irq off */
5455 rc = dm_set_vupdate_irq(crtc, false);
5456 }
5457
5458 if (rc)
5459 return rc;
589d2739
HW
5460
5461 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5462
5463 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5464 return -EBUSY;
5465
98ab5f35
BL
5466 if (amdgpu_in_reset(adev))
5467 return 0;
5468
4928b480 5469#if defined(CONFIG_DRM_AMD_DC_DCN)
ea3b4242
QZ
5470 spin_lock_irqsave(&dm->vblank_lock, flags);
5471 dm->vblank_workqueue->dm = dm;
5472 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5473 dm->vblank_workqueue->enable = enable;
5474 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5475 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 5476#endif
71338cb4 5477
71338cb4 5478 return 0;
589d2739
HW
5479}
5480
5481static int dm_enable_vblank(struct drm_crtc *crtc)
5482{
5483 return dm_set_vblank(crtc, true);
5484}
5485
5486static void dm_disable_vblank(struct drm_crtc *crtc)
5487{
5488 dm_set_vblank(crtc, false);
5489}
5490
e7b07cee
HW
5491/* Implemented only the options currently availible for the driver */
5492static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5493 .reset = dm_crtc_reset_state,
5494 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
5495 .set_config = drm_atomic_helper_set_config,
5496 .page_flip = drm_atomic_helper_page_flip,
5497 .atomic_duplicate_state = dm_crtc_duplicate_state,
5498 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5499 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5500 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5501 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5502 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5503 .enable_vblank = dm_enable_vblank,
5504 .disable_vblank = dm_disable_vblank,
e3eff4b5 5505 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
5506};
5507
5508static enum drm_connector_status
5509amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5510{
5511 bool connected;
c84dec2f 5512 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5513
1f6010a9
DF
5514 /*
5515 * Notes:
e7b07cee
HW
5516 * 1. This interface is NOT called in context of HPD irq.
5517 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5518 * makes it a bad place for *any* MST-related activity.
5519 */
e7b07cee 5520
8580d60b
HW
5521 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5522 !aconnector->fake_enable)
e7b07cee
HW
5523 connected = (aconnector->dc_sink != NULL);
5524 else
5525 connected = (aconnector->base.force == DRM_FORCE_ON);
5526
0f877894
OV
5527 update_subconnector_property(aconnector);
5528
e7b07cee
HW
5529 return (connected ? connector_status_connected :
5530 connector_status_disconnected);
5531}
5532
3ee6b26b
AD
5533int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5534 struct drm_connector_state *connector_state,
5535 struct drm_property *property,
5536 uint64_t val)
e7b07cee
HW
5537{
5538 struct drm_device *dev = connector->dev;
1348969a 5539 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5540 struct dm_connector_state *dm_old_state =
5541 to_dm_connector_state(connector->state);
5542 struct dm_connector_state *dm_new_state =
5543 to_dm_connector_state(connector_state);
5544
5545 int ret = -EINVAL;
5546
5547 if (property == dev->mode_config.scaling_mode_property) {
5548 enum amdgpu_rmx_type rmx_type;
5549
5550 switch (val) {
5551 case DRM_MODE_SCALE_CENTER:
5552 rmx_type = RMX_CENTER;
5553 break;
5554 case DRM_MODE_SCALE_ASPECT:
5555 rmx_type = RMX_ASPECT;
5556 break;
5557 case DRM_MODE_SCALE_FULLSCREEN:
5558 rmx_type = RMX_FULL;
5559 break;
5560 case DRM_MODE_SCALE_NONE:
5561 default:
5562 rmx_type = RMX_OFF;
5563 break;
5564 }
5565
5566 if (dm_old_state->scaling == rmx_type)
5567 return 0;
5568
5569 dm_new_state->scaling = rmx_type;
5570 ret = 0;
5571 } else if (property == adev->mode_info.underscan_hborder_property) {
5572 dm_new_state->underscan_hborder = val;
5573 ret = 0;
5574 } else if (property == adev->mode_info.underscan_vborder_property) {
5575 dm_new_state->underscan_vborder = val;
5576 ret = 0;
5577 } else if (property == adev->mode_info.underscan_property) {
5578 dm_new_state->underscan_enable = val;
5579 ret = 0;
c1ee92f9
DF
5580 } else if (property == adev->mode_info.abm_level_property) {
5581 dm_new_state->abm_level = val;
5582 ret = 0;
e7b07cee
HW
5583 }
5584
5585 return ret;
5586}
5587
3ee6b26b
AD
5588int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5589 const struct drm_connector_state *state,
5590 struct drm_property *property,
5591 uint64_t *val)
e7b07cee
HW
5592{
5593 struct drm_device *dev = connector->dev;
1348969a 5594 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5595 struct dm_connector_state *dm_state =
5596 to_dm_connector_state(state);
5597 int ret = -EINVAL;
5598
5599 if (property == dev->mode_config.scaling_mode_property) {
5600 switch (dm_state->scaling) {
5601 case RMX_CENTER:
5602 *val = DRM_MODE_SCALE_CENTER;
5603 break;
5604 case RMX_ASPECT:
5605 *val = DRM_MODE_SCALE_ASPECT;
5606 break;
5607 case RMX_FULL:
5608 *val = DRM_MODE_SCALE_FULLSCREEN;
5609 break;
5610 case RMX_OFF:
5611 default:
5612 *val = DRM_MODE_SCALE_NONE;
5613 break;
5614 }
5615 ret = 0;
5616 } else if (property == adev->mode_info.underscan_hborder_property) {
5617 *val = dm_state->underscan_hborder;
5618 ret = 0;
5619 } else if (property == adev->mode_info.underscan_vborder_property) {
5620 *val = dm_state->underscan_vborder;
5621 ret = 0;
5622 } else if (property == adev->mode_info.underscan_property) {
5623 *val = dm_state->underscan_enable;
5624 ret = 0;
c1ee92f9
DF
5625 } else if (property == adev->mode_info.abm_level_property) {
5626 *val = dm_state->abm_level;
5627 ret = 0;
e7b07cee 5628 }
c1ee92f9 5629
e7b07cee
HW
5630 return ret;
5631}
5632
526c654a
ED
5633static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5634{
5635 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5636
5637 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5638}
5639
7578ecda 5640static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5641{
c84dec2f 5642 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5643 const struct dc_link *link = aconnector->dc_link;
1348969a 5644 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5645 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5646
5dff80bd
AG
5647 /*
5648 * Call only if mst_mgr was iniitalized before since it's not done
5649 * for all connector types.
5650 */
5651 if (aconnector->mst_mgr.dev)
5652 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5653
e7b07cee
HW
5654#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5655 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5656
89fc8d4e 5657 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5658 link->type != dc_connection_none &&
5659 dm->backlight_dev) {
5660 backlight_device_unregister(dm->backlight_dev);
5661 dm->backlight_dev = NULL;
e7b07cee
HW
5662 }
5663#endif
dcd5fb82
MF
5664
5665 if (aconnector->dc_em_sink)
5666 dc_sink_release(aconnector->dc_em_sink);
5667 aconnector->dc_em_sink = NULL;
5668 if (aconnector->dc_sink)
5669 dc_sink_release(aconnector->dc_sink);
5670 aconnector->dc_sink = NULL;
5671
e86e8947 5672 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5673 drm_connector_unregister(connector);
5674 drm_connector_cleanup(connector);
526c654a
ED
5675 if (aconnector->i2c) {
5676 i2c_del_adapter(&aconnector->i2c->base);
5677 kfree(aconnector->i2c);
5678 }
7daec99f 5679 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5680
e7b07cee
HW
5681 kfree(connector);
5682}
5683
5684void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5685{
5686 struct dm_connector_state *state =
5687 to_dm_connector_state(connector->state);
5688
df099b9b
LSL
5689 if (connector->state)
5690 __drm_atomic_helper_connector_destroy_state(connector->state);
5691
e7b07cee
HW
5692 kfree(state);
5693
5694 state = kzalloc(sizeof(*state), GFP_KERNEL);
5695
5696 if (state) {
5697 state->scaling = RMX_OFF;
5698 state->underscan_enable = false;
5699 state->underscan_hborder = 0;
5700 state->underscan_vborder = 0;
01933ba4 5701 state->base.max_requested_bpc = 8;
3261e013
ML
5702 state->vcpi_slots = 0;
5703 state->pbn = 0;
c3e50f89
NK
5704 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5705 state->abm_level = amdgpu_dm_abm_level;
5706
df099b9b 5707 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5708 }
5709}
5710
3ee6b26b
AD
5711struct drm_connector_state *
5712amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5713{
5714 struct dm_connector_state *state =
5715 to_dm_connector_state(connector->state);
5716
5717 struct dm_connector_state *new_state =
5718 kmemdup(state, sizeof(*state), GFP_KERNEL);
5719
98e6436d
AK
5720 if (!new_state)
5721 return NULL;
e7b07cee 5722
98e6436d
AK
5723 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5724
5725 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5726 new_state->abm_level = state->abm_level;
922454c2
NK
5727 new_state->scaling = state->scaling;
5728 new_state->underscan_enable = state->underscan_enable;
5729 new_state->underscan_hborder = state->underscan_hborder;
5730 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5731 new_state->vcpi_slots = state->vcpi_slots;
5732 new_state->pbn = state->pbn;
98e6436d 5733 return &new_state->base;
e7b07cee
HW
5734}
5735
14f04fa4
AD
5736static int
5737amdgpu_dm_connector_late_register(struct drm_connector *connector)
5738{
5739 struct amdgpu_dm_connector *amdgpu_dm_connector =
5740 to_amdgpu_dm_connector(connector);
00a8037e 5741 int r;
14f04fa4 5742
00a8037e
AD
5743 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5744 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5745 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5746 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5747 if (r)
5748 return r;
5749 }
5750
5751#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5752 connector_debugfs_init(amdgpu_dm_connector);
5753#endif
5754
5755 return 0;
5756}
5757
e7b07cee
HW
5758static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5759 .reset = amdgpu_dm_connector_funcs_reset,
5760 .detect = amdgpu_dm_connector_detect,
5761 .fill_modes = drm_helper_probe_single_connector_modes,
5762 .destroy = amdgpu_dm_connector_destroy,
5763 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5764 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5765 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5766 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5767 .late_register = amdgpu_dm_connector_late_register,
526c654a 5768 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5769};
5770
e7b07cee
HW
5771static int get_modes(struct drm_connector *connector)
5772{
5773 return amdgpu_dm_connector_get_modes(connector);
5774}
5775
c84dec2f 5776static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5777{
5778 struct dc_sink_init_data init_params = {
5779 .link = aconnector->dc_link,
5780 .sink_signal = SIGNAL_TYPE_VIRTUAL
5781 };
70e8ffc5 5782 struct edid *edid;
e7b07cee 5783
a89ff457 5784 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5785 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5786 aconnector->base.name);
5787
5788 aconnector->base.force = DRM_FORCE_OFF;
5789 aconnector->base.override_edid = false;
5790 return;
5791 }
5792
70e8ffc5
HW
5793 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5794
e7b07cee
HW
5795 aconnector->edid = edid;
5796
5797 aconnector->dc_em_sink = dc_link_add_remote_sink(
5798 aconnector->dc_link,
5799 (uint8_t *)edid,
5800 (edid->extensions + 1) * EDID_LENGTH,
5801 &init_params);
5802
dcd5fb82 5803 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5804 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5805 aconnector->dc_link->local_sink :
5806 aconnector->dc_em_sink;
dcd5fb82
MF
5807 dc_sink_retain(aconnector->dc_sink);
5808 }
e7b07cee
HW
5809}
5810
c84dec2f 5811static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5812{
5813 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5814
1f6010a9
DF
5815 /*
5816 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5817 * Those settings have to be != 0 to get initial modeset
5818 */
5819 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5820 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5821 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5822 }
5823
5824
5825 aconnector->base.override_edid = true;
5826 create_eml_sink(aconnector);
5827}
5828
cbd14ae7
SW
5829static struct dc_stream_state *
5830create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5831 const struct drm_display_mode *drm_mode,
5832 const struct dm_connector_state *dm_state,
5833 const struct dc_stream_state *old_stream)
5834{
5835 struct drm_connector *connector = &aconnector->base;
1348969a 5836 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5837 struct dc_stream_state *stream;
4b7da34b
SW
5838 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5839 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5840 enum dc_status dc_result = DC_OK;
5841
5842 do {
5843 stream = create_stream_for_sink(aconnector, drm_mode,
5844 dm_state, old_stream,
5845 requested_bpc);
5846 if (stream == NULL) {
5847 DRM_ERROR("Failed to create stream for sink!\n");
5848 break;
5849 }
5850
5851 dc_result = dc_validate_stream(adev->dm.dc, stream);
5852
5853 if (dc_result != DC_OK) {
74a16675 5854 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5855 drm_mode->hdisplay,
5856 drm_mode->vdisplay,
5857 drm_mode->clock,
74a16675
RS
5858 dc_result,
5859 dc_status_to_str(dc_result));
cbd14ae7
SW
5860
5861 dc_stream_release(stream);
5862 stream = NULL;
5863 requested_bpc -= 2; /* lower bpc to retry validation */
5864 }
5865
5866 } while (stream == NULL && requested_bpc >= 6);
5867
5868 return stream;
5869}
5870
ba9ca088 5871enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5872 struct drm_display_mode *mode)
e7b07cee
HW
5873{
5874 int result = MODE_ERROR;
5875 struct dc_sink *dc_sink;
e7b07cee 5876 /* TODO: Unhardcode stream count */
0971c40e 5877 struct dc_stream_state *stream;
c84dec2f 5878 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5879
5880 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5881 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5882 return result;
5883
1f6010a9
DF
5884 /*
5885 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5886 * EDID mgmt
5887 */
5888 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5889 !aconnector->dc_em_sink)
5890 handle_edid_mgmt(aconnector);
5891
c84dec2f 5892 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5893
ad975f44
VL
5894 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5895 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5896 DRM_ERROR("dc_sink is NULL!\n");
5897 goto fail;
5898 }
5899
cbd14ae7
SW
5900 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5901 if (stream) {
5902 dc_stream_release(stream);
e7b07cee 5903 result = MODE_OK;
cbd14ae7 5904 }
e7b07cee
HW
5905
5906fail:
5907 /* TODO: error handling*/
5908 return result;
5909}
5910
88694af9
NK
5911static int fill_hdr_info_packet(const struct drm_connector_state *state,
5912 struct dc_info_packet *out)
5913{
5914 struct hdmi_drm_infoframe frame;
5915 unsigned char buf[30]; /* 26 + 4 */
5916 ssize_t len;
5917 int ret, i;
5918
5919 memset(out, 0, sizeof(*out));
5920
5921 if (!state->hdr_output_metadata)
5922 return 0;
5923
5924 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5925 if (ret)
5926 return ret;
5927
5928 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5929 if (len < 0)
5930 return (int)len;
5931
5932 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5933 if (len != 30)
5934 return -EINVAL;
5935
5936 /* Prepare the infopacket for DC. */
5937 switch (state->connector->connector_type) {
5938 case DRM_MODE_CONNECTOR_HDMIA:
5939 out->hb0 = 0x87; /* type */
5940 out->hb1 = 0x01; /* version */
5941 out->hb2 = 0x1A; /* length */
5942 out->sb[0] = buf[3]; /* checksum */
5943 i = 1;
5944 break;
5945
5946 case DRM_MODE_CONNECTOR_DisplayPort:
5947 case DRM_MODE_CONNECTOR_eDP:
5948 out->hb0 = 0x00; /* sdp id, zero */
5949 out->hb1 = 0x87; /* type */
5950 out->hb2 = 0x1D; /* payload len - 1 */
5951 out->hb3 = (0x13 << 2); /* sdp version */
5952 out->sb[0] = 0x01; /* version */
5953 out->sb[1] = 0x1A; /* length */
5954 i = 2;
5955 break;
5956
5957 default:
5958 return -EINVAL;
5959 }
5960
5961 memcpy(&out->sb[i], &buf[4], 26);
5962 out->valid = true;
5963
5964 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5965 sizeof(out->sb), false);
5966
5967 return 0;
5968}
5969
5970static bool
5971is_hdr_metadata_different(const struct drm_connector_state *old_state,
5972 const struct drm_connector_state *new_state)
5973{
5974 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5975 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5976
5977 if (old_blob != new_blob) {
5978 if (old_blob && new_blob &&
5979 old_blob->length == new_blob->length)
5980 return memcmp(old_blob->data, new_blob->data,
5981 old_blob->length);
5982
5983 return true;
5984 }
5985
5986 return false;
5987}
5988
5989static int
5990amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5991 struct drm_atomic_state *state)
88694af9 5992{
51e857af
SP
5993 struct drm_connector_state *new_con_state =
5994 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5995 struct drm_connector_state *old_con_state =
5996 drm_atomic_get_old_connector_state(state, conn);
5997 struct drm_crtc *crtc = new_con_state->crtc;
5998 struct drm_crtc_state *new_crtc_state;
5999 int ret;
6000
e8a98235
RS
6001 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6002
88694af9
NK
6003 if (!crtc)
6004 return 0;
6005
6006 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6007 struct dc_info_packet hdr_infopacket;
6008
6009 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6010 if (ret)
6011 return ret;
6012
6013 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6014 if (IS_ERR(new_crtc_state))
6015 return PTR_ERR(new_crtc_state);
6016
6017 /*
6018 * DC considers the stream backends changed if the
6019 * static metadata changes. Forcing the modeset also
6020 * gives a simple way for userspace to switch from
b232d4ed
NK
6021 * 8bpc to 10bpc when setting the metadata to enter
6022 * or exit HDR.
6023 *
6024 * Changing the static metadata after it's been
6025 * set is permissible, however. So only force a
6026 * modeset if we're entering or exiting HDR.
88694af9 6027 */
b232d4ed
NK
6028 new_crtc_state->mode_changed =
6029 !old_con_state->hdr_output_metadata ||
6030 !new_con_state->hdr_output_metadata;
88694af9
NK
6031 }
6032
6033 return 0;
6034}
6035
e7b07cee
HW
6036static const struct drm_connector_helper_funcs
6037amdgpu_dm_connector_helper_funcs = {
6038 /*
1f6010a9 6039 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6040 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6041 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6042 * in get_modes call back, not just return the modes count
6043 */
e7b07cee
HW
6044 .get_modes = get_modes,
6045 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6046 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6047};
6048
6049static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6050{
6051}
6052
d6ef9b41 6053static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6054{
6055 struct drm_atomic_state *state = new_crtc_state->state;
6056 struct drm_plane *plane;
6057 int num_active = 0;
6058
6059 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6060 struct drm_plane_state *new_plane_state;
6061
6062 /* Cursor planes are "fake". */
6063 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6064 continue;
6065
6066 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6067
6068 if (!new_plane_state) {
6069 /*
6070 * The plane is enable on the CRTC and hasn't changed
6071 * state. This means that it previously passed
6072 * validation and is therefore enabled.
6073 */
6074 num_active += 1;
6075 continue;
6076 }
6077
6078 /* We need a framebuffer to be considered enabled. */
6079 num_active += (new_plane_state->fb != NULL);
6080 }
6081
d6ef9b41
NK
6082 return num_active;
6083}
6084
8fe684e9
NK
6085static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6086 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6087{
6088 struct dm_crtc_state *dm_new_crtc_state =
6089 to_dm_crtc_state(new_crtc_state);
6090
6091 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6092
6093 if (!dm_new_crtc_state->stream)
6094 return;
6095
6096 dm_new_crtc_state->active_planes =
6097 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6098}
6099
3ee6b26b 6100static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6101 struct drm_atomic_state *state)
e7b07cee 6102{
29b77ad7
MR
6103 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6104 crtc);
1348969a 6105 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6106 struct dc *dc = adev->dm.dc;
29b77ad7 6107 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6108 int ret = -EINVAL;
6109
5b8c5969 6110 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6111
29b77ad7 6112 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6113
9b690ef3 6114 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6115 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6116 WARN_ON(1);
6117 return ret;
6118 }
6119
bc92c065 6120 /*
b836a274
MD
6121 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6122 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6123 * planes are disabled, which is not supported by the hardware. And there is legacy
6124 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6125 */
29b77ad7 6126 if (crtc_state->enable &&
ea9522f5
SS
6127 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6128 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6129 return -EINVAL;
ea9522f5 6130 }
c14a005c 6131
b836a274
MD
6132 /* In some use cases, like reset, no stream is attached */
6133 if (!dm_crtc_state->stream)
6134 return 0;
6135
62c933f9 6136 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6137 return 0;
6138
ea9522f5 6139 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6140 return ret;
6141}
6142
3ee6b26b
AD
6143static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6144 const struct drm_display_mode *mode,
6145 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6146{
6147 return true;
6148}
6149
6150static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6151 .disable = dm_crtc_helper_disable,
6152 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6153 .mode_fixup = dm_crtc_helper_mode_fixup,
6154 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6155};
6156
6157static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6158{
6159
6160}
6161
3261e013
ML
6162static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6163{
6164 switch (display_color_depth) {
6165 case COLOR_DEPTH_666:
6166 return 6;
6167 case COLOR_DEPTH_888:
6168 return 8;
6169 case COLOR_DEPTH_101010:
6170 return 10;
6171 case COLOR_DEPTH_121212:
6172 return 12;
6173 case COLOR_DEPTH_141414:
6174 return 14;
6175 case COLOR_DEPTH_161616:
6176 return 16;
6177 default:
6178 break;
6179 }
6180 return 0;
6181}
6182
3ee6b26b
AD
6183static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6184 struct drm_crtc_state *crtc_state,
6185 struct drm_connector_state *conn_state)
e7b07cee 6186{
3261e013
ML
6187 struct drm_atomic_state *state = crtc_state->state;
6188 struct drm_connector *connector = conn_state->connector;
6189 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6190 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6191 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6192 struct drm_dp_mst_topology_mgr *mst_mgr;
6193 struct drm_dp_mst_port *mst_port;
6194 enum dc_color_depth color_depth;
6195 int clock, bpp = 0;
1bc22f20 6196 bool is_y420 = false;
3261e013
ML
6197
6198 if (!aconnector->port || !aconnector->dc_sink)
6199 return 0;
6200
6201 mst_port = aconnector->port;
6202 mst_mgr = &aconnector->mst_port->mst_mgr;
6203
6204 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6205 return 0;
6206
6207 if (!state->duplicated) {
cbd14ae7 6208 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6209 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6210 aconnector->force_yuv420_output;
cbd14ae7
SW
6211 color_depth = convert_color_depth_from_display_info(connector,
6212 is_y420,
6213 max_bpc);
3261e013
ML
6214 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6215 clock = adjusted_mode->clock;
dc48529f 6216 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6217 }
6218 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6219 mst_mgr,
6220 mst_port,
1c6c1cb5 6221 dm_new_connector_state->pbn,
03ca9600 6222 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6223 if (dm_new_connector_state->vcpi_slots < 0) {
6224 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6225 return dm_new_connector_state->vcpi_slots;
6226 }
e7b07cee
HW
6227 return 0;
6228}
6229
6230const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6231 .disable = dm_encoder_helper_disable,
6232 .atomic_check = dm_encoder_helper_atomic_check
6233};
6234
d9fe1a4c 6235#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6236static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6237 struct dc_state *dc_state)
6238{
6239 struct dc_stream_state *stream = NULL;
6240 struct drm_connector *connector;
6241 struct drm_connector_state *new_con_state, *old_con_state;
6242 struct amdgpu_dm_connector *aconnector;
6243 struct dm_connector_state *dm_conn_state;
6244 int i, j, clock, bpp;
6245 int vcpi, pbn_div, pbn = 0;
6246
6247 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6248
6249 aconnector = to_amdgpu_dm_connector(connector);
6250
6251 if (!aconnector->port)
6252 continue;
6253
6254 if (!new_con_state || !new_con_state->crtc)
6255 continue;
6256
6257 dm_conn_state = to_dm_connector_state(new_con_state);
6258
6259 for (j = 0; j < dc_state->stream_count; j++) {
6260 stream = dc_state->streams[j];
6261 if (!stream)
6262 continue;
6263
6264 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6265 break;
6266
6267 stream = NULL;
6268 }
6269
6270 if (!stream)
6271 continue;
6272
6273 if (stream->timing.flags.DSC != 1) {
6274 drm_dp_mst_atomic_enable_dsc(state,
6275 aconnector->port,
6276 dm_conn_state->pbn,
6277 0,
6278 false);
6279 continue;
6280 }
6281
6282 pbn_div = dm_mst_get_pbn_divider(stream->link);
6283 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6284 clock = stream->timing.pix_clk_100hz / 10;
6285 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6286 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6287 aconnector->port,
6288 pbn, pbn_div,
6289 true);
6290 if (vcpi < 0)
6291 return vcpi;
6292
6293 dm_conn_state->pbn = pbn;
6294 dm_conn_state->vcpi_slots = vcpi;
6295 }
6296 return 0;
6297}
d9fe1a4c 6298#endif
29b9ba74 6299
e7b07cee
HW
6300static void dm_drm_plane_reset(struct drm_plane *plane)
6301{
6302 struct dm_plane_state *amdgpu_state = NULL;
6303
6304 if (plane->state)
6305 plane->funcs->atomic_destroy_state(plane, plane->state);
6306
6307 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6308 WARN_ON(amdgpu_state == NULL);
1f6010a9 6309
7ddaef96
NK
6310 if (amdgpu_state)
6311 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6312}
6313
6314static struct drm_plane_state *
6315dm_drm_plane_duplicate_state(struct drm_plane *plane)
6316{
6317 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6318
6319 old_dm_plane_state = to_dm_plane_state(plane->state);
6320 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6321 if (!dm_plane_state)
6322 return NULL;
6323
6324 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6325
3be5262e
HW
6326 if (old_dm_plane_state->dc_state) {
6327 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6328 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6329 }
6330
6331 return &dm_plane_state->base;
6332}
6333
dfd84d90 6334static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6335 struct drm_plane_state *state)
e7b07cee
HW
6336{
6337 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6338
3be5262e
HW
6339 if (dm_plane_state->dc_state)
6340 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6341
0627bbd3 6342 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6343}
6344
6345static const struct drm_plane_funcs dm_plane_funcs = {
6346 .update_plane = drm_atomic_helper_update_plane,
6347 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6348 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6349 .reset = dm_drm_plane_reset,
6350 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6351 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6352 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6353};
6354
3ee6b26b
AD
6355static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6356 struct drm_plane_state *new_state)
e7b07cee
HW
6357{
6358 struct amdgpu_framebuffer *afb;
6359 struct drm_gem_object *obj;
5d43be0c 6360 struct amdgpu_device *adev;
e7b07cee 6361 struct amdgpu_bo *rbo;
e7b07cee 6362 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6363 struct list_head list;
6364 struct ttm_validate_buffer tv;
6365 struct ww_acquire_ctx ticket;
5d43be0c
CK
6366 uint32_t domain;
6367 int r;
e7b07cee
HW
6368
6369 if (!new_state->fb) {
f1ad2f5e 6370 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6371 return 0;
6372 }
6373
6374 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6375 obj = new_state->fb->obj[0];
e7b07cee 6376 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6377 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6378 INIT_LIST_HEAD(&list);
6379
6380 tv.bo = &rbo->tbo;
6381 tv.num_shared = 1;
6382 list_add(&tv.head, &list);
6383
9165fb87 6384 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6385 if (r) {
6386 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6387 return r;
0f257b09 6388 }
e7b07cee 6389
5d43be0c 6390 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6391 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6392 else
6393 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6394
7b7c6c81 6395 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6396 if (unlikely(r != 0)) {
30b7c614
HW
6397 if (r != -ERESTARTSYS)
6398 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6399 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6400 return r;
6401 }
6402
bb812f1e
JZ
6403 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6404 if (unlikely(r != 0)) {
6405 amdgpu_bo_unpin(rbo);
0f257b09 6406 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6407 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6408 return r;
6409 }
7df7e505 6410
0f257b09 6411 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6412
7b7c6c81 6413 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6414
6415 amdgpu_bo_ref(rbo);
6416
cf322b49
NK
6417 /**
6418 * We don't do surface updates on planes that have been newly created,
6419 * but we also don't have the afb->address during atomic check.
6420 *
6421 * Fill in buffer attributes depending on the address here, but only on
6422 * newly created planes since they're not being used by DC yet and this
6423 * won't modify global state.
6424 */
6425 dm_plane_state_old = to_dm_plane_state(plane->state);
6426 dm_plane_state_new = to_dm_plane_state(new_state);
6427
3be5262e 6428 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6429 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6430 struct dc_plane_state *plane_state =
6431 dm_plane_state_new->dc_state;
6432 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6433
320932bf 6434 fill_plane_buffer_attributes(
695af5f9 6435 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6436 afb->tiling_flags,
cf322b49
NK
6437 &plane_state->tiling_info, &plane_state->plane_size,
6438 &plane_state->dcc, &plane_state->address,
6eed95b0 6439 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6440 }
6441
e7b07cee
HW
6442 return 0;
6443}
6444
3ee6b26b
AD
6445static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6446 struct drm_plane_state *old_state)
e7b07cee
HW
6447{
6448 struct amdgpu_bo *rbo;
e7b07cee
HW
6449 int r;
6450
6451 if (!old_state->fb)
6452 return;
6453
e68d14dd 6454 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6455 r = amdgpu_bo_reserve(rbo, false);
6456 if (unlikely(r)) {
6457 DRM_ERROR("failed to reserve rbo before unpin\n");
6458 return;
b830ebc9
HW
6459 }
6460
6461 amdgpu_bo_unpin(rbo);
6462 amdgpu_bo_unreserve(rbo);
6463 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6464}
6465
8c44515b
AP
6466static int dm_plane_helper_check_state(struct drm_plane_state *state,
6467 struct drm_crtc_state *new_crtc_state)
6468{
6300b3bd
MK
6469 struct drm_framebuffer *fb = state->fb;
6470 int min_downscale, max_upscale;
6471 int min_scale = 0;
6472 int max_scale = INT_MAX;
6473
40d916a2 6474 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 6475 if (fb && state->crtc) {
40d916a2
NC
6476 /* Validate viewport to cover the case when only the position changes */
6477 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6478 int viewport_width = state->crtc_w;
6479 int viewport_height = state->crtc_h;
6480
6481 if (state->crtc_x < 0)
6482 viewport_width += state->crtc_x;
6483 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6484 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6485
6486 if (state->crtc_y < 0)
6487 viewport_height += state->crtc_y;
6488 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6489 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6490
6491 /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6492 * which is still OK to satisfy the condition below, thereby also covering these cases
6493 * (when plane is completely outside of screen).
6494 * x2 for width is because of pipe-split.
6495 */
6496 if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6497 return -EINVAL;
6498 }
6499
6500 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
6501 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6502 &min_downscale, &max_upscale);
6503 /*
6504 * Convert to drm convention: 16.16 fixed point, instead of dc's
6505 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6506 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6507 */
6508 min_scale = (1000 << 16) / max_upscale;
6509 max_scale = (1000 << 16) / min_downscale;
6510 }
8c44515b 6511
8c44515b 6512 return drm_atomic_helper_check_plane_state(
6300b3bd 6513 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
6514}
6515
7578ecda
AD
6516static int dm_plane_atomic_check(struct drm_plane *plane,
6517 struct drm_plane_state *state)
cbd19488 6518{
1348969a 6519 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6520 struct dc *dc = adev->dm.dc;
78171832 6521 struct dm_plane_state *dm_plane_state;
695af5f9 6522 struct dc_scaling_info scaling_info;
8c44515b 6523 struct drm_crtc_state *new_crtc_state;
695af5f9 6524 int ret;
78171832 6525
e8a98235
RS
6526 trace_amdgpu_dm_plane_atomic_check(state);
6527
78171832 6528 dm_plane_state = to_dm_plane_state(state);
cbd19488 6529
3be5262e 6530 if (!dm_plane_state->dc_state)
9a3329b1 6531 return 0;
cbd19488 6532
8c44515b
AP
6533 new_crtc_state =
6534 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6535 if (!new_crtc_state)
6536 return -EINVAL;
6537
6538 ret = dm_plane_helper_check_state(state, new_crtc_state);
6539 if (ret)
6540 return ret;
6541
695af5f9
NK
6542 ret = fill_dc_scaling_info(state, &scaling_info);
6543 if (ret)
6544 return ret;
a05bcff1 6545
62c933f9 6546 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6547 return 0;
6548
6549 return -EINVAL;
6550}
6551
674e78ac
NK
6552static int dm_plane_atomic_async_check(struct drm_plane *plane,
6553 struct drm_plane_state *new_plane_state)
6554{
6555 /* Only support async updates on cursor planes. */
6556 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6557 return -EINVAL;
6558
6559 return 0;
6560}
6561
6562static void dm_plane_atomic_async_update(struct drm_plane *plane,
6563 struct drm_plane_state *new_state)
6564{
6565 struct drm_plane_state *old_state =
6566 drm_atomic_get_old_plane_state(new_state->state, plane);
6567
e8a98235
RS
6568 trace_amdgpu_dm_atomic_update_cursor(new_state);
6569
332af874 6570 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6571
6572 plane->state->src_x = new_state->src_x;
6573 plane->state->src_y = new_state->src_y;
6574 plane->state->src_w = new_state->src_w;
6575 plane->state->src_h = new_state->src_h;
6576 plane->state->crtc_x = new_state->crtc_x;
6577 plane->state->crtc_y = new_state->crtc_y;
6578 plane->state->crtc_w = new_state->crtc_w;
6579 plane->state->crtc_h = new_state->crtc_h;
6580
6581 handle_cursor_update(plane, old_state);
6582}
6583
e7b07cee
HW
6584static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6585 .prepare_fb = dm_plane_helper_prepare_fb,
6586 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6587 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6588 .atomic_async_check = dm_plane_atomic_async_check,
6589 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6590};
6591
6592/*
6593 * TODO: these are currently initialized to rgb formats only.
6594 * For future use cases we should either initialize them dynamically based on
6595 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6596 * check will succeed, and let DC implement proper check
e7b07cee 6597 */
d90371b0 6598static const uint32_t rgb_formats[] = {
e7b07cee
HW
6599 DRM_FORMAT_XRGB8888,
6600 DRM_FORMAT_ARGB8888,
6601 DRM_FORMAT_RGBA8888,
6602 DRM_FORMAT_XRGB2101010,
6603 DRM_FORMAT_XBGR2101010,
6604 DRM_FORMAT_ARGB2101010,
6605 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6606 DRM_FORMAT_XBGR8888,
6607 DRM_FORMAT_ABGR8888,
46dd9ff7 6608 DRM_FORMAT_RGB565,
e7b07cee
HW
6609};
6610
0d579c7e
NK
6611static const uint32_t overlay_formats[] = {
6612 DRM_FORMAT_XRGB8888,
6613 DRM_FORMAT_ARGB8888,
6614 DRM_FORMAT_RGBA8888,
6615 DRM_FORMAT_XBGR8888,
6616 DRM_FORMAT_ABGR8888,
7267a1a9 6617 DRM_FORMAT_RGB565
e7b07cee
HW
6618};
6619
6620static const u32 cursor_formats[] = {
6621 DRM_FORMAT_ARGB8888
6622};
6623
37c6a93b
NK
6624static int get_plane_formats(const struct drm_plane *plane,
6625 const struct dc_plane_cap *plane_cap,
6626 uint32_t *formats, int max_formats)
e7b07cee 6627{
37c6a93b
NK
6628 int i, num_formats = 0;
6629
6630 /*
6631 * TODO: Query support for each group of formats directly from
6632 * DC plane caps. This will require adding more formats to the
6633 * caps list.
6634 */
e7b07cee 6635
f180b4bc 6636 switch (plane->type) {
e7b07cee 6637 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6638 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6639 if (num_formats >= max_formats)
6640 break;
6641
6642 formats[num_formats++] = rgb_formats[i];
6643 }
6644
ea36ad34 6645 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6646 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6647 if (plane_cap && plane_cap->pixel_format_support.p010)
6648 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6649 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6650 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6651 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6652 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6653 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6654 }
e7b07cee 6655 break;
37c6a93b 6656
e7b07cee 6657 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6658 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6659 if (num_formats >= max_formats)
6660 break;
6661
6662 formats[num_formats++] = overlay_formats[i];
6663 }
e7b07cee 6664 break;
37c6a93b 6665
e7b07cee 6666 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6667 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6668 if (num_formats >= max_formats)
6669 break;
6670
6671 formats[num_formats++] = cursor_formats[i];
6672 }
e7b07cee
HW
6673 break;
6674 }
6675
37c6a93b
NK
6676 return num_formats;
6677}
6678
6679static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6680 struct drm_plane *plane,
6681 unsigned long possible_crtcs,
6682 const struct dc_plane_cap *plane_cap)
6683{
6684 uint32_t formats[32];
6685 int num_formats;
6686 int res = -EPERM;
ecc874a6 6687 unsigned int supported_rotations;
faa37f54 6688 uint64_t *modifiers = NULL;
37c6a93b
NK
6689
6690 num_formats = get_plane_formats(plane, plane_cap, formats,
6691 ARRAY_SIZE(formats));
6692
faa37f54
BN
6693 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6694 if (res)
6695 return res;
6696
4a580877 6697 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6698 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6699 modifiers, plane->type, NULL);
6700 kfree(modifiers);
37c6a93b
NK
6701 if (res)
6702 return res;
6703
cc1fec57
NK
6704 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6705 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6706 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6707 BIT(DRM_MODE_BLEND_PREMULTI);
6708
6709 drm_plane_create_alpha_property(plane);
6710 drm_plane_create_blend_mode_property(plane, blend_caps);
6711 }
6712
fc8e5230 6713 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6714 plane_cap &&
6715 (plane_cap->pixel_format_support.nv12 ||
6716 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6717 /* This only affects YUV formats. */
6718 drm_plane_create_color_properties(
6719 plane,
6720 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6721 BIT(DRM_COLOR_YCBCR_BT709) |
6722 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6723 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6724 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6725 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6726 }
6727
ecc874a6
PLG
6728 supported_rotations =
6729 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6730 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6731
1347385f
SS
6732 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6733 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
6734 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6735 supported_rotations);
ecc874a6 6736
f180b4bc 6737 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6738
96719c54 6739 /* Create (reset) the plane state */
f180b4bc
HW
6740 if (plane->funcs->reset)
6741 plane->funcs->reset(plane);
96719c54 6742
37c6a93b 6743 return 0;
e7b07cee
HW
6744}
6745
7578ecda
AD
6746static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6747 struct drm_plane *plane,
6748 uint32_t crtc_index)
e7b07cee
HW
6749{
6750 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6751 struct drm_plane *cursor_plane;
e7b07cee
HW
6752
6753 int res = -ENOMEM;
6754
6755 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6756 if (!cursor_plane)
6757 goto fail;
6758
f180b4bc 6759 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6760 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6761
6762 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6763 if (!acrtc)
6764 goto fail;
6765
6766 res = drm_crtc_init_with_planes(
6767 dm->ddev,
6768 &acrtc->base,
6769 plane,
f180b4bc 6770 cursor_plane,
e7b07cee
HW
6771 &amdgpu_dm_crtc_funcs, NULL);
6772
6773 if (res)
6774 goto fail;
6775
6776 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6777
96719c54
HW
6778 /* Create (reset) the plane state */
6779 if (acrtc->base.funcs->reset)
6780 acrtc->base.funcs->reset(&acrtc->base);
6781
e7b07cee
HW
6782 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6783 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6784
6785 acrtc->crtc_id = crtc_index;
6786 acrtc->base.enabled = false;
c37e2d29 6787 acrtc->otg_inst = -1;
e7b07cee
HW
6788
6789 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6790 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6791 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6792 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 6793
e7b07cee
HW
6794 return 0;
6795
6796fail:
b830ebc9
HW
6797 kfree(acrtc);
6798 kfree(cursor_plane);
e7b07cee
HW
6799 return res;
6800}
6801
6802
6803static int to_drm_connector_type(enum signal_type st)
6804{
6805 switch (st) {
6806 case SIGNAL_TYPE_HDMI_TYPE_A:
6807 return DRM_MODE_CONNECTOR_HDMIA;
6808 case SIGNAL_TYPE_EDP:
6809 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6810 case SIGNAL_TYPE_LVDS:
6811 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6812 case SIGNAL_TYPE_RGB:
6813 return DRM_MODE_CONNECTOR_VGA;
6814 case SIGNAL_TYPE_DISPLAY_PORT:
6815 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6816 return DRM_MODE_CONNECTOR_DisplayPort;
6817 case SIGNAL_TYPE_DVI_DUAL_LINK:
6818 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6819 return DRM_MODE_CONNECTOR_DVID;
6820 case SIGNAL_TYPE_VIRTUAL:
6821 return DRM_MODE_CONNECTOR_VIRTUAL;
6822
6823 default:
6824 return DRM_MODE_CONNECTOR_Unknown;
6825 }
6826}
6827
2b4c1c05
DV
6828static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6829{
62afb4ad
JRS
6830 struct drm_encoder *encoder;
6831
6832 /* There is only one encoder per connector */
6833 drm_connector_for_each_possible_encoder(connector, encoder)
6834 return encoder;
6835
6836 return NULL;
2b4c1c05
DV
6837}
6838
e7b07cee
HW
6839static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6840{
e7b07cee
HW
6841 struct drm_encoder *encoder;
6842 struct amdgpu_encoder *amdgpu_encoder;
6843
2b4c1c05 6844 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6845
6846 if (encoder == NULL)
6847 return;
6848
6849 amdgpu_encoder = to_amdgpu_encoder(encoder);
6850
6851 amdgpu_encoder->native_mode.clock = 0;
6852
6853 if (!list_empty(&connector->probed_modes)) {
6854 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6855
e7b07cee 6856 list_for_each_entry(preferred_mode,
b830ebc9
HW
6857 &connector->probed_modes,
6858 head) {
6859 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6860 amdgpu_encoder->native_mode = *preferred_mode;
6861
e7b07cee
HW
6862 break;
6863 }
6864
6865 }
6866}
6867
3ee6b26b
AD
6868static struct drm_display_mode *
6869amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6870 char *name,
6871 int hdisplay, int vdisplay)
e7b07cee
HW
6872{
6873 struct drm_device *dev = encoder->dev;
6874 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6875 struct drm_display_mode *mode = NULL;
6876 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6877
6878 mode = drm_mode_duplicate(dev, native_mode);
6879
b830ebc9 6880 if (mode == NULL)
e7b07cee
HW
6881 return NULL;
6882
6883 mode->hdisplay = hdisplay;
6884 mode->vdisplay = vdisplay;
6885 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6886 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6887
6888 return mode;
6889
6890}
6891
6892static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6893 struct drm_connector *connector)
e7b07cee
HW
6894{
6895 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6896 struct drm_display_mode *mode = NULL;
6897 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6898 struct amdgpu_dm_connector *amdgpu_dm_connector =
6899 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6900 int i;
6901 int n;
6902 struct mode_size {
6903 char name[DRM_DISPLAY_MODE_LEN];
6904 int w;
6905 int h;
b830ebc9 6906 } common_modes[] = {
e7b07cee
HW
6907 { "640x480", 640, 480},
6908 { "800x600", 800, 600},
6909 { "1024x768", 1024, 768},
6910 { "1280x720", 1280, 720},
6911 { "1280x800", 1280, 800},
6912 {"1280x1024", 1280, 1024},
6913 { "1440x900", 1440, 900},
6914 {"1680x1050", 1680, 1050},
6915 {"1600x1200", 1600, 1200},
6916 {"1920x1080", 1920, 1080},
6917 {"1920x1200", 1920, 1200}
6918 };
6919
b830ebc9 6920 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6921
6922 for (i = 0; i < n; i++) {
6923 struct drm_display_mode *curmode = NULL;
6924 bool mode_existed = false;
6925
6926 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6927 common_modes[i].h > native_mode->vdisplay ||
6928 (common_modes[i].w == native_mode->hdisplay &&
6929 common_modes[i].h == native_mode->vdisplay))
6930 continue;
e7b07cee
HW
6931
6932 list_for_each_entry(curmode, &connector->probed_modes, head) {
6933 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6934 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6935 mode_existed = true;
6936 break;
6937 }
6938 }
6939
6940 if (mode_existed)
6941 continue;
6942
6943 mode = amdgpu_dm_create_common_mode(encoder,
6944 common_modes[i].name, common_modes[i].w,
6945 common_modes[i].h);
6946 drm_mode_probed_add(connector, mode);
c84dec2f 6947 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6948 }
6949}
6950
3ee6b26b
AD
6951static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6952 struct edid *edid)
e7b07cee 6953{
c84dec2f
HW
6954 struct amdgpu_dm_connector *amdgpu_dm_connector =
6955 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6956
6957 if (edid) {
6958 /* empty probed_modes */
6959 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6960 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6961 drm_add_edid_modes(connector, edid);
6962
f1e5e913
YMM
6963 /* sorting the probed modes before calling function
6964 * amdgpu_dm_get_native_mode() since EDID can have
6965 * more than one preferred mode. The modes that are
6966 * later in the probed mode list could be of higher
6967 * and preferred resolution. For example, 3840x2160
6968 * resolution in base EDID preferred timing and 4096x2160
6969 * preferred resolution in DID extension block later.
6970 */
6971 drm_mode_sort(&connector->probed_modes);
e7b07cee 6972 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6973 } else {
c84dec2f 6974 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6975 }
e7b07cee
HW
6976}
6977
7578ecda 6978static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6979{
c84dec2f
HW
6980 struct amdgpu_dm_connector *amdgpu_dm_connector =
6981 to_amdgpu_dm_connector(connector);
e7b07cee 6982 struct drm_encoder *encoder;
c84dec2f 6983 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6984
2b4c1c05 6985 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6986
5c0e6840 6987 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
6988 amdgpu_dm_connector->num_modes =
6989 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6990 } else {
6991 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6992 amdgpu_dm_connector_add_common_modes(encoder, connector);
6993 }
3e332d3a 6994 amdgpu_dm_fbc_init(connector);
5099114b 6995
c84dec2f 6996 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6997}
6998
3ee6b26b
AD
6999void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7000 struct amdgpu_dm_connector *aconnector,
7001 int connector_type,
7002 struct dc_link *link,
7003 int link_index)
e7b07cee 7004{
1348969a 7005 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7006
f04bee34
NK
7007 /*
7008 * Some of the properties below require access to state, like bpc.
7009 * Allocate some default initial connector state with our reset helper.
7010 */
7011 if (aconnector->base.funcs->reset)
7012 aconnector->base.funcs->reset(&aconnector->base);
7013
e7b07cee
HW
7014 aconnector->connector_id = link_index;
7015 aconnector->dc_link = link;
7016 aconnector->base.interlace_allowed = false;
7017 aconnector->base.doublescan_allowed = false;
7018 aconnector->base.stereo_allowed = false;
7019 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7020 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7021 aconnector->audio_inst = -1;
e7b07cee
HW
7022 mutex_init(&aconnector->hpd_lock);
7023
1f6010a9
DF
7024 /*
7025 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7026 * which means HPD hot plug not supported
7027 */
e7b07cee
HW
7028 switch (connector_type) {
7029 case DRM_MODE_CONNECTOR_HDMIA:
7030 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7031 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7032 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7033 break;
7034 case DRM_MODE_CONNECTOR_DisplayPort:
7035 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7036 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7037 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7038 break;
7039 case DRM_MODE_CONNECTOR_DVID:
7040 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7041 break;
7042 default:
7043 break;
7044 }
7045
7046 drm_object_attach_property(&aconnector->base.base,
7047 dm->ddev->mode_config.scaling_mode_property,
7048 DRM_MODE_SCALE_NONE);
7049
7050 drm_object_attach_property(&aconnector->base.base,
7051 adev->mode_info.underscan_property,
7052 UNDERSCAN_OFF);
7053 drm_object_attach_property(&aconnector->base.base,
7054 adev->mode_info.underscan_hborder_property,
7055 0);
7056 drm_object_attach_property(&aconnector->base.base,
7057 adev->mode_info.underscan_vborder_property,
7058 0);
1825fd34 7059
8c61b31e
JFZ
7060 if (!aconnector->mst_port)
7061 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7062
4a8ca46b
RL
7063 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7064 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7065 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7066
c1ee92f9 7067 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7068 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7069 drm_object_attach_property(&aconnector->base.base,
7070 adev->mode_info.abm_level_property, 0);
7071 }
bb47de73
NK
7072
7073 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7074 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7075 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
7076 drm_object_attach_property(
7077 &aconnector->base.base,
7078 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7079
8c61b31e
JFZ
7080 if (!aconnector->mst_port)
7081 drm_connector_attach_vrr_capable_property(&aconnector->base);
7082
0c8620d6 7083#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7084 if (adev->dm.hdcp_workqueue)
53e108aa 7085 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7086#endif
bb47de73 7087 }
e7b07cee
HW
7088}
7089
7578ecda
AD
7090static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7091 struct i2c_msg *msgs, int num)
e7b07cee
HW
7092{
7093 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7094 struct ddc_service *ddc_service = i2c->ddc_service;
7095 struct i2c_command cmd;
7096 int i;
7097 int result = -EIO;
7098
b830ebc9 7099 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7100
7101 if (!cmd.payloads)
7102 return result;
7103
7104 cmd.number_of_payloads = num;
7105 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7106 cmd.speed = 100;
7107
7108 for (i = 0; i < num; i++) {
7109 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7110 cmd.payloads[i].address = msgs[i].addr;
7111 cmd.payloads[i].length = msgs[i].len;
7112 cmd.payloads[i].data = msgs[i].buf;
7113 }
7114
c85e6e54
DF
7115 if (dc_submit_i2c(
7116 ddc_service->ctx->dc,
7117 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7118 &cmd))
7119 result = num;
7120
7121 kfree(cmd.payloads);
7122 return result;
7123}
7124
7578ecda 7125static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7126{
7127 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7128}
7129
7130static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7131 .master_xfer = amdgpu_dm_i2c_xfer,
7132 .functionality = amdgpu_dm_i2c_func,
7133};
7134
3ee6b26b
AD
7135static struct amdgpu_i2c_adapter *
7136create_i2c(struct ddc_service *ddc_service,
7137 int link_index,
7138 int *res)
e7b07cee
HW
7139{
7140 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7141 struct amdgpu_i2c_adapter *i2c;
7142
b830ebc9 7143 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7144 if (!i2c)
7145 return NULL;
e7b07cee
HW
7146 i2c->base.owner = THIS_MODULE;
7147 i2c->base.class = I2C_CLASS_DDC;
7148 i2c->base.dev.parent = &adev->pdev->dev;
7149 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7150 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7151 i2c_set_adapdata(&i2c->base, i2c);
7152 i2c->ddc_service = ddc_service;
c85e6e54 7153 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7154
7155 return i2c;
7156}
7157
89fc8d4e 7158
1f6010a9
DF
7159/*
7160 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7161 * dc_link which will be represented by this aconnector.
7162 */
7578ecda
AD
7163static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7164 struct amdgpu_dm_connector *aconnector,
7165 uint32_t link_index,
7166 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7167{
7168 int res = 0;
7169 int connector_type;
7170 struct dc *dc = dm->dc;
7171 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7172 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7173
7174 link->priv = aconnector;
e7b07cee 7175
f1ad2f5e 7176 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7177
7178 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7179 if (!i2c) {
7180 DRM_ERROR("Failed to create i2c adapter data\n");
7181 return -ENOMEM;
7182 }
7183
e7b07cee
HW
7184 aconnector->i2c = i2c;
7185 res = i2c_add_adapter(&i2c->base);
7186
7187 if (res) {
7188 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7189 goto out_free;
7190 }
7191
7192 connector_type = to_drm_connector_type(link->connector_signal);
7193
17165de2 7194 res = drm_connector_init_with_ddc(
e7b07cee
HW
7195 dm->ddev,
7196 &aconnector->base,
7197 &amdgpu_dm_connector_funcs,
17165de2
AP
7198 connector_type,
7199 &i2c->base);
e7b07cee
HW
7200
7201 if (res) {
7202 DRM_ERROR("connector_init failed\n");
7203 aconnector->connector_id = -1;
7204 goto out_free;
7205 }
7206
7207 drm_connector_helper_add(
7208 &aconnector->base,
7209 &amdgpu_dm_connector_helper_funcs);
7210
7211 amdgpu_dm_connector_init_helper(
7212 dm,
7213 aconnector,
7214 connector_type,
7215 link,
7216 link_index);
7217
cde4c44d 7218 drm_connector_attach_encoder(
e7b07cee
HW
7219 &aconnector->base, &aencoder->base);
7220
e7b07cee
HW
7221 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7222 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7223 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7224
e7b07cee
HW
7225out_free:
7226 if (res) {
7227 kfree(i2c);
7228 aconnector->i2c = NULL;
7229 }
7230 return res;
7231}
7232
7233int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7234{
7235 switch (adev->mode_info.num_crtc) {
7236 case 1:
7237 return 0x1;
7238 case 2:
7239 return 0x3;
7240 case 3:
7241 return 0x7;
7242 case 4:
7243 return 0xf;
7244 case 5:
7245 return 0x1f;
7246 case 6:
7247 default:
7248 return 0x3f;
7249 }
7250}
7251
7578ecda
AD
7252static int amdgpu_dm_encoder_init(struct drm_device *dev,
7253 struct amdgpu_encoder *aencoder,
7254 uint32_t link_index)
e7b07cee 7255{
1348969a 7256 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7257
7258 int res = drm_encoder_init(dev,
7259 &aencoder->base,
7260 &amdgpu_dm_encoder_funcs,
7261 DRM_MODE_ENCODER_TMDS,
7262 NULL);
7263
7264 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7265
7266 if (!res)
7267 aencoder->encoder_id = link_index;
7268 else
7269 aencoder->encoder_id = -1;
7270
7271 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7272
7273 return res;
7274}
7275
3ee6b26b
AD
7276static void manage_dm_interrupts(struct amdgpu_device *adev,
7277 struct amdgpu_crtc *acrtc,
7278 bool enable)
e7b07cee
HW
7279{
7280 /*
8fe684e9
NK
7281 * We have no guarantee that the frontend index maps to the same
7282 * backend index - some even map to more than one.
7283 *
7284 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7285 */
7286 int irq_type =
734dd01d 7287 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7288 adev,
7289 acrtc->crtc_id);
7290
7291 if (enable) {
7292 drm_crtc_vblank_on(&acrtc->base);
7293 amdgpu_irq_get(
7294 adev,
7295 &adev->pageflip_irq,
7296 irq_type);
7297 } else {
7298
7299 amdgpu_irq_put(
7300 adev,
7301 &adev->pageflip_irq,
7302 irq_type);
7303 drm_crtc_vblank_off(&acrtc->base);
7304 }
7305}
7306
8fe684e9
NK
7307static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7308 struct amdgpu_crtc *acrtc)
7309{
7310 int irq_type =
7311 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7312
7313 /**
7314 * This reads the current state for the IRQ and force reapplies
7315 * the setting to hardware.
7316 */
7317 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7318}
7319
3ee6b26b
AD
7320static bool
7321is_scaling_state_different(const struct dm_connector_state *dm_state,
7322 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7323{
7324 if (dm_state->scaling != old_dm_state->scaling)
7325 return true;
7326 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7327 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7328 return true;
7329 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7330 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7331 return true;
b830ebc9
HW
7332 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7333 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7334 return true;
e7b07cee
HW
7335 return false;
7336}
7337
0c8620d6
BL
7338#ifdef CONFIG_DRM_AMD_DC_HDCP
7339static bool is_content_protection_different(struct drm_connector_state *state,
7340 const struct drm_connector_state *old_state,
7341 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7342{
7343 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7344 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7345
31c0ed90 7346 /* Handle: Type0/1 change */
53e108aa
BL
7347 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7348 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7349 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7350 return true;
7351 }
7352
31c0ed90
BL
7353 /* CP is being re enabled, ignore this
7354 *
7355 * Handles: ENABLED -> DESIRED
7356 */
0c8620d6
BL
7357 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7358 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7359 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7360 return false;
7361 }
7362
31c0ed90
BL
7363 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7364 *
7365 * Handles: UNDESIRED -> ENABLED
7366 */
0c8620d6
BL
7367 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7368 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7369 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7370
7371 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7372 * hot-plug, headless s3, dpms
31c0ed90
BL
7373 *
7374 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7375 */
97f6c917
BL
7376 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7377 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7378 dm_con_state->update_hdcp = false;
0c8620d6 7379 return true;
97f6c917 7380 }
0c8620d6 7381
31c0ed90
BL
7382 /*
7383 * Handles: UNDESIRED -> UNDESIRED
7384 * DESIRED -> DESIRED
7385 * ENABLED -> ENABLED
7386 */
0c8620d6
BL
7387 if (old_state->content_protection == state->content_protection)
7388 return false;
7389
31c0ed90
BL
7390 /*
7391 * Handles: UNDESIRED -> DESIRED
7392 * DESIRED -> UNDESIRED
7393 * ENABLED -> UNDESIRED
7394 */
97f6c917 7395 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7396 return true;
7397
31c0ed90
BL
7398 /*
7399 * Handles: DESIRED -> ENABLED
7400 */
0c8620d6
BL
7401 return false;
7402}
7403
0c8620d6 7404#endif
3ee6b26b
AD
7405static void remove_stream(struct amdgpu_device *adev,
7406 struct amdgpu_crtc *acrtc,
7407 struct dc_stream_state *stream)
e7b07cee
HW
7408{
7409 /* this is the update mode case */
e7b07cee
HW
7410
7411 acrtc->otg_inst = -1;
7412 acrtc->enabled = false;
7413}
7414
7578ecda
AD
7415static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7416 struct dc_cursor_position *position)
2a8f6ccb 7417{
f4c2cc43 7418 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7419 int x, y;
7420 int xorigin = 0, yorigin = 0;
7421
e371e19c
NK
7422 position->enable = false;
7423 position->x = 0;
7424 position->y = 0;
7425
7426 if (!crtc || !plane->state->fb)
2a8f6ccb 7427 return 0;
2a8f6ccb
HW
7428
7429 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7430 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7431 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7432 __func__,
7433 plane->state->crtc_w,
7434 plane->state->crtc_h);
7435 return -EINVAL;
7436 }
7437
7438 x = plane->state->crtc_x;
7439 y = plane->state->crtc_y;
c14a005c 7440
e371e19c
NK
7441 if (x <= -amdgpu_crtc->max_cursor_width ||
7442 y <= -amdgpu_crtc->max_cursor_height)
7443 return 0;
7444
2a8f6ccb
HW
7445 if (x < 0) {
7446 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7447 x = 0;
7448 }
7449 if (y < 0) {
7450 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7451 y = 0;
7452 }
7453 position->enable = true;
d243b6ff 7454 position->translate_by_source = true;
2a8f6ccb
HW
7455 position->x = x;
7456 position->y = y;
7457 position->x_hotspot = xorigin;
7458 position->y_hotspot = yorigin;
7459
7460 return 0;
7461}
7462
3ee6b26b
AD
7463static void handle_cursor_update(struct drm_plane *plane,
7464 struct drm_plane_state *old_plane_state)
e7b07cee 7465{
1348969a 7466 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7467 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7468 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7469 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7470 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7471 uint64_t address = afb ? afb->address : 0;
7472 struct dc_cursor_position position;
7473 struct dc_cursor_attributes attributes;
7474 int ret;
7475
e7b07cee
HW
7476 if (!plane->state->fb && !old_plane_state->fb)
7477 return;
7478
f1ad2f5e 7479 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7480 __func__,
7481 amdgpu_crtc->crtc_id,
7482 plane->state->crtc_w,
7483 plane->state->crtc_h);
2a8f6ccb
HW
7484
7485 ret = get_cursor_position(plane, crtc, &position);
7486 if (ret)
7487 return;
7488
7489 if (!position.enable) {
7490 /* turn off cursor */
674e78ac
NK
7491 if (crtc_state && crtc_state->stream) {
7492 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7493 dc_stream_set_cursor_position(crtc_state->stream,
7494 &position);
674e78ac
NK
7495 mutex_unlock(&adev->dm.dc_lock);
7496 }
2a8f6ccb 7497 return;
e7b07cee 7498 }
e7b07cee 7499
2a8f6ccb
HW
7500 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7501 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7502
c1cefe11 7503 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7504 attributes.address.high_part = upper_32_bits(address);
7505 attributes.address.low_part = lower_32_bits(address);
7506 attributes.width = plane->state->crtc_w;
7507 attributes.height = plane->state->crtc_h;
7508 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7509 attributes.rotation_angle = 0;
7510 attributes.attribute_flags.value = 0;
7511
03a66367 7512 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 7513
886daac9 7514 if (crtc_state->stream) {
674e78ac 7515 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7516 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7517 &attributes))
7518 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7519
2a8f6ccb
HW
7520 if (!dc_stream_set_cursor_position(crtc_state->stream,
7521 &position))
7522 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7523 mutex_unlock(&adev->dm.dc_lock);
886daac9 7524 }
2a8f6ccb 7525}
e7b07cee
HW
7526
7527static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7528{
7529
7530 assert_spin_locked(&acrtc->base.dev->event_lock);
7531 WARN_ON(acrtc->event);
7532
7533 acrtc->event = acrtc->base.state->event;
7534
7535 /* Set the flip status */
7536 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7537
7538 /* Mark this event as consumed */
7539 acrtc->base.state->event = NULL;
7540
7541 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7542 acrtc->crtc_id);
7543}
7544
bb47de73
NK
7545static void update_freesync_state_on_stream(
7546 struct amdgpu_display_manager *dm,
7547 struct dm_crtc_state *new_crtc_state,
180db303
NK
7548 struct dc_stream_state *new_stream,
7549 struct dc_plane_state *surface,
7550 u32 flip_timestamp_in_us)
bb47de73 7551{
09aef2c4 7552 struct mod_vrr_params vrr_params;
bb47de73 7553 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7554 struct amdgpu_device *adev = dm->adev;
585d450c 7555 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7556 unsigned long flags;
bb47de73
NK
7557
7558 if (!new_stream)
7559 return;
7560
7561 /*
7562 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7563 * For now it's sufficient to just guard against these conditions.
7564 */
7565
7566 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7567 return;
7568
4a580877 7569 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7570 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7571
180db303
NK
7572 if (surface) {
7573 mod_freesync_handle_preflip(
7574 dm->freesync_module,
7575 surface,
7576 new_stream,
7577 flip_timestamp_in_us,
7578 &vrr_params);
09aef2c4
MK
7579
7580 if (adev->family < AMDGPU_FAMILY_AI &&
7581 amdgpu_dm_vrr_active(new_crtc_state)) {
7582 mod_freesync_handle_v_update(dm->freesync_module,
7583 new_stream, &vrr_params);
e63e2491
EB
7584
7585 /* Need to call this before the frame ends. */
7586 dc_stream_adjust_vmin_vmax(dm->dc,
7587 new_crtc_state->stream,
7588 &vrr_params.adjust);
09aef2c4 7589 }
180db303 7590 }
bb47de73
NK
7591
7592 mod_freesync_build_vrr_infopacket(
7593 dm->freesync_module,
7594 new_stream,
180db303 7595 &vrr_params,
ecd0136b
HT
7596 PACKET_TYPE_VRR,
7597 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7598 &vrr_infopacket);
7599
8a48b44c 7600 new_crtc_state->freesync_timing_changed |=
585d450c 7601 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7602 &vrr_params.adjust,
7603 sizeof(vrr_params.adjust)) != 0);
bb47de73 7604
8a48b44c 7605 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7606 (memcmp(&new_crtc_state->vrr_infopacket,
7607 &vrr_infopacket,
7608 sizeof(vrr_infopacket)) != 0);
7609
585d450c 7610 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7611 new_crtc_state->vrr_infopacket = vrr_infopacket;
7612
585d450c 7613 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7614 new_stream->vrr_infopacket = vrr_infopacket;
7615
7616 if (new_crtc_state->freesync_vrr_info_changed)
7617 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7618 new_crtc_state->base.crtc->base.id,
7619 (int)new_crtc_state->base.vrr_enabled,
180db303 7620 (int)vrr_params.state);
09aef2c4 7621
4a580877 7622 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7623}
7624
585d450c 7625static void update_stream_irq_parameters(
e854194c
MK
7626 struct amdgpu_display_manager *dm,
7627 struct dm_crtc_state *new_crtc_state)
7628{
7629 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7630 struct mod_vrr_params vrr_params;
e854194c 7631 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7632 struct amdgpu_device *adev = dm->adev;
585d450c 7633 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7634 unsigned long flags;
e854194c
MK
7635
7636 if (!new_stream)
7637 return;
7638
7639 /*
7640 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7641 * For now it's sufficient to just guard against these conditions.
7642 */
7643 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7644 return;
7645
4a580877 7646 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7647 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7648
e854194c
MK
7649 if (new_crtc_state->vrr_supported &&
7650 config.min_refresh_in_uhz &&
7651 config.max_refresh_in_uhz) {
7652 config.state = new_crtc_state->base.vrr_enabled ?
7653 VRR_STATE_ACTIVE_VARIABLE :
7654 VRR_STATE_INACTIVE;
7655 } else {
7656 config.state = VRR_STATE_UNSUPPORTED;
7657 }
7658
7659 mod_freesync_build_vrr_params(dm->freesync_module,
7660 new_stream,
7661 &config, &vrr_params);
7662
7663 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7664 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7665 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7666
585d450c
AP
7667 new_crtc_state->freesync_config = config;
7668 /* Copy state for access from DM IRQ handler */
7669 acrtc->dm_irq_params.freesync_config = config;
7670 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7671 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7672 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7673}
7674
66b0c973
MK
7675static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7676 struct dm_crtc_state *new_state)
7677{
7678 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7679 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7680
7681 if (!old_vrr_active && new_vrr_active) {
7682 /* Transition VRR inactive -> active:
7683 * While VRR is active, we must not disable vblank irq, as a
7684 * reenable after disable would compute bogus vblank/pflip
7685 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7686 *
7687 * We also need vupdate irq for the actual core vblank handling
7688 * at end of vblank.
66b0c973 7689 */
d2574c33 7690 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7691 drm_crtc_vblank_get(new_state->base.crtc);
7692 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7693 __func__, new_state->base.crtc->base.id);
7694 } else if (old_vrr_active && !new_vrr_active) {
7695 /* Transition VRR active -> inactive:
7696 * Allow vblank irq disable again for fixed refresh rate.
7697 */
d2574c33 7698 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7699 drm_crtc_vblank_put(new_state->base.crtc);
7700 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7701 __func__, new_state->base.crtc->base.id);
7702 }
7703}
7704
8ad27806
NK
7705static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7706{
7707 struct drm_plane *plane;
7708 struct drm_plane_state *old_plane_state, *new_plane_state;
7709 int i;
7710
7711 /*
7712 * TODO: Make this per-stream so we don't issue redundant updates for
7713 * commits with multiple streams.
7714 */
7715 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7716 new_plane_state, i)
7717 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7718 handle_cursor_update(plane, old_plane_state);
7719}
7720
3be5262e 7721static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7722 struct dc_state *dc_state,
3ee6b26b
AD
7723 struct drm_device *dev,
7724 struct amdgpu_display_manager *dm,
7725 struct drm_crtc *pcrtc,
420cd472 7726 bool wait_for_vblank)
e7b07cee 7727{
efc8278e 7728 uint32_t i;
8a48b44c 7729 uint64_t timestamp_ns;
e7b07cee 7730 struct drm_plane *plane;
0bc9706d 7731 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7732 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7733 struct drm_crtc_state *new_pcrtc_state =
7734 drm_atomic_get_new_crtc_state(state, pcrtc);
7735 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7736 struct dm_crtc_state *dm_old_crtc_state =
7737 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7738 int planes_count = 0, vpos, hpos;
570c91d5 7739 long r;
e7b07cee 7740 unsigned long flags;
8a48b44c 7741 struct amdgpu_bo *abo;
fdd1fe57
MK
7742 uint32_t target_vblank, last_flip_vblank;
7743 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7744 bool pflip_present = false;
bc7f670e
DF
7745 struct {
7746 struct dc_surface_update surface_updates[MAX_SURFACES];
7747 struct dc_plane_info plane_infos[MAX_SURFACES];
7748 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7749 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7750 struct dc_stream_update stream_update;
74aa7bd4 7751 } *bundle;
bc7f670e 7752
74aa7bd4 7753 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7754
74aa7bd4
DF
7755 if (!bundle) {
7756 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7757 goto cleanup;
7758 }
e7b07cee 7759
8ad27806
NK
7760 /*
7761 * Disable the cursor first if we're disabling all the planes.
7762 * It'll remain on the screen after the planes are re-enabled
7763 * if we don't.
7764 */
7765 if (acrtc_state->active_planes == 0)
7766 amdgpu_dm_commit_cursors(state);
7767
e7b07cee 7768 /* update planes when needed */
efc8278e 7769 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 7770 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7771 struct drm_crtc_state *new_crtc_state;
0bc9706d 7772 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7773 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7774 bool plane_needs_flip;
c7af5f77 7775 struct dc_plane_state *dc_plane;
54d76575 7776 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7777
80c218d5
NK
7778 /* Cursor plane is handled after stream updates */
7779 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7780 continue;
e7b07cee 7781
f5ba60fe
DD
7782 if (!fb || !crtc || pcrtc != crtc)
7783 continue;
7784
7785 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7786 if (!new_crtc_state->active)
e7b07cee
HW
7787 continue;
7788
bc7f670e 7789 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7790
74aa7bd4 7791 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7792 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7793 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7794 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7795 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7796 }
8a48b44c 7797
695af5f9
NK
7798 fill_dc_scaling_info(new_plane_state,
7799 &bundle->scaling_infos[planes_count]);
8a48b44c 7800
695af5f9
NK
7801 bundle->surface_updates[planes_count].scaling_info =
7802 &bundle->scaling_infos[planes_count];
8a48b44c 7803
f5031000 7804 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7805
f5031000 7806 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7807
f5031000
DF
7808 if (!plane_needs_flip) {
7809 planes_count += 1;
7810 continue;
7811 }
8a48b44c 7812
2fac0f53
CK
7813 abo = gem_to_amdgpu_bo(fb->obj[0]);
7814
f8308898
AG
7815 /*
7816 * Wait for all fences on this FB. Do limited wait to avoid
7817 * deadlock during GPU reset when this fence will not signal
7818 * but we hold reservation lock for the BO.
7819 */
52791eee 7820 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7821 false,
f8308898
AG
7822 msecs_to_jiffies(5000));
7823 if (unlikely(r <= 0))
ed8a5fb2 7824 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7825
695af5f9 7826 fill_dc_plane_info_and_addr(
8ce5d842 7827 dm->adev, new_plane_state,
6eed95b0 7828 afb->tiling_flags,
695af5f9 7829 &bundle->plane_infos[planes_count],
87b7ebc2 7830 &bundle->flip_addrs[planes_count].address,
6eed95b0 7831 afb->tmz_surface, false);
87b7ebc2
RS
7832
7833 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7834 new_plane_state->plane->index,
7835 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7836
7837 bundle->surface_updates[planes_count].plane_info =
7838 &bundle->plane_infos[planes_count];
8a48b44c 7839
caff0e66
NK
7840 /*
7841 * Only allow immediate flips for fast updates that don't
7842 * change FB pitch, DCC state, rotation or mirroing.
7843 */
f5031000 7844 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7845 crtc->state->async_flip &&
caff0e66 7846 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7847
f5031000
DF
7848 timestamp_ns = ktime_get_ns();
7849 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7850 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7851 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7852
f5031000
DF
7853 if (!bundle->surface_updates[planes_count].surface) {
7854 DRM_ERROR("No surface for CRTC: id=%d\n",
7855 acrtc_attach->crtc_id);
7856 continue;
bc7f670e
DF
7857 }
7858
f5031000
DF
7859 if (plane == pcrtc->primary)
7860 update_freesync_state_on_stream(
7861 dm,
7862 acrtc_state,
7863 acrtc_state->stream,
7864 dc_plane,
7865 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7866
f5031000
DF
7867 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7868 __func__,
7869 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7870 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7871
7872 planes_count += 1;
7873
8a48b44c
DF
7874 }
7875
74aa7bd4 7876 if (pflip_present) {
634092b1
MK
7877 if (!vrr_active) {
7878 /* Use old throttling in non-vrr fixed refresh rate mode
7879 * to keep flip scheduling based on target vblank counts
7880 * working in a backwards compatible way, e.g., for
7881 * clients using the GLX_OML_sync_control extension or
7882 * DRI3/Present extension with defined target_msc.
7883 */
e3eff4b5 7884 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7885 }
7886 else {
7887 /* For variable refresh rate mode only:
7888 * Get vblank of last completed flip to avoid > 1 vrr
7889 * flips per video frame by use of throttling, but allow
7890 * flip programming anywhere in the possibly large
7891 * variable vrr vblank interval for fine-grained flip
7892 * timing control and more opportunity to avoid stutter
7893 * on late submission of flips.
7894 */
7895 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7896 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7897 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7898 }
7899
fdd1fe57 7900 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7901
7902 /*
7903 * Wait until we're out of the vertical blank period before the one
7904 * targeted by the flip
7905 */
7906 while ((acrtc_attach->enabled &&
7907 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7908 0, &vpos, &hpos, NULL,
7909 NULL, &pcrtc->hwmode)
7910 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7911 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7912 (int)(target_vblank -
e3eff4b5 7913 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7914 usleep_range(1000, 1100);
7915 }
7916
8fe684e9
NK
7917 /**
7918 * Prepare the flip event for the pageflip interrupt to handle.
7919 *
7920 * This only works in the case where we've already turned on the
7921 * appropriate hardware blocks (eg. HUBP) so in the transition case
7922 * from 0 -> n planes we have to skip a hardware generated event
7923 * and rely on sending it from software.
7924 */
7925 if (acrtc_attach->base.state->event &&
7926 acrtc_state->active_planes > 0) {
8a48b44c
DF
7927 drm_crtc_vblank_get(pcrtc);
7928
7929 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7930
7931 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7932 prepare_flip_isr(acrtc_attach);
7933
7934 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7935 }
7936
7937 if (acrtc_state->stream) {
8a48b44c 7938 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7939 bundle->stream_update.vrr_infopacket =
8a48b44c 7940 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7941 }
e7b07cee
HW
7942 }
7943
bc92c065 7944 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7945 if ((planes_count || acrtc_state->active_planes == 0) &&
7946 acrtc_state->stream) {
b6e881c9 7947 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7948 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7949 bundle->stream_update.src = acrtc_state->stream->src;
7950 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7951 }
7952
cf020d49
NK
7953 if (new_pcrtc_state->color_mgmt_changed) {
7954 /*
7955 * TODO: This isn't fully correct since we've actually
7956 * already modified the stream in place.
7957 */
7958 bundle->stream_update.gamut_remap =
7959 &acrtc_state->stream->gamut_remap_matrix;
7960 bundle->stream_update.output_csc_transform =
7961 &acrtc_state->stream->csc_color_matrix;
7962 bundle->stream_update.out_transfer_func =
7963 acrtc_state->stream->out_transfer_func;
7964 }
bc7f670e 7965
8a48b44c 7966 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7967 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7968 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7969
e63e2491
EB
7970 /*
7971 * If FreeSync state on the stream has changed then we need to
7972 * re-adjust the min/max bounds now that DC doesn't handle this
7973 * as part of commit.
7974 */
7975 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7976 amdgpu_dm_vrr_active(acrtc_state)) {
7977 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7978 dc_stream_adjust_vmin_vmax(
7979 dm->dc, acrtc_state->stream,
585d450c 7980 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7981 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7982 }
bc7f670e 7983 mutex_lock(&dm->dc_lock);
8c322309 7984 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7985 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7986 amdgpu_dm_psr_disable(acrtc_state->stream);
7987
bc7f670e 7988 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7989 bundle->surface_updates,
bc7f670e
DF
7990 planes_count,
7991 acrtc_state->stream,
efc8278e
AJ
7992 &bundle->stream_update,
7993 dc_state);
8c322309 7994
8fe684e9
NK
7995 /**
7996 * Enable or disable the interrupts on the backend.
7997 *
7998 * Most pipes are put into power gating when unused.
7999 *
8000 * When power gating is enabled on a pipe we lose the
8001 * interrupt enablement state when power gating is disabled.
8002 *
8003 * So we need to update the IRQ control state in hardware
8004 * whenever the pipe turns on (since it could be previously
8005 * power gated) or off (since some pipes can't be power gated
8006 * on some ASICs).
8007 */
8008 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8009 dm_update_pflip_irq_state(drm_to_adev(dev),
8010 acrtc_attach);
8fe684e9 8011
8c322309 8012 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8013 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8014 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8015 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8016 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8017 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8018 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8019 amdgpu_dm_psr_enable(acrtc_state->stream);
8020 }
8021
bc7f670e 8022 mutex_unlock(&dm->dc_lock);
e7b07cee 8023 }
4b510503 8024
8ad27806
NK
8025 /*
8026 * Update cursor state *after* programming all the planes.
8027 * This avoids redundant programming in the case where we're going
8028 * to be disabling a single plane - those pipes are being disabled.
8029 */
8030 if (acrtc_state->active_planes)
8031 amdgpu_dm_commit_cursors(state);
80c218d5 8032
4b510503 8033cleanup:
74aa7bd4 8034 kfree(bundle);
e7b07cee
HW
8035}
8036
6ce8f316
NK
8037static void amdgpu_dm_commit_audio(struct drm_device *dev,
8038 struct drm_atomic_state *state)
8039{
1348969a 8040 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8041 struct amdgpu_dm_connector *aconnector;
8042 struct drm_connector *connector;
8043 struct drm_connector_state *old_con_state, *new_con_state;
8044 struct drm_crtc_state *new_crtc_state;
8045 struct dm_crtc_state *new_dm_crtc_state;
8046 const struct dc_stream_status *status;
8047 int i, inst;
8048
8049 /* Notify device removals. */
8050 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8051 if (old_con_state->crtc != new_con_state->crtc) {
8052 /* CRTC changes require notification. */
8053 goto notify;
8054 }
8055
8056 if (!new_con_state->crtc)
8057 continue;
8058
8059 new_crtc_state = drm_atomic_get_new_crtc_state(
8060 state, new_con_state->crtc);
8061
8062 if (!new_crtc_state)
8063 continue;
8064
8065 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8066 continue;
8067
8068 notify:
8069 aconnector = to_amdgpu_dm_connector(connector);
8070
8071 mutex_lock(&adev->dm.audio_lock);
8072 inst = aconnector->audio_inst;
8073 aconnector->audio_inst = -1;
8074 mutex_unlock(&adev->dm.audio_lock);
8075
8076 amdgpu_dm_audio_eld_notify(adev, inst);
8077 }
8078
8079 /* Notify audio device additions. */
8080 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8081 if (!new_con_state->crtc)
8082 continue;
8083
8084 new_crtc_state = drm_atomic_get_new_crtc_state(
8085 state, new_con_state->crtc);
8086
8087 if (!new_crtc_state)
8088 continue;
8089
8090 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8091 continue;
8092
8093 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8094 if (!new_dm_crtc_state->stream)
8095 continue;
8096
8097 status = dc_stream_get_status(new_dm_crtc_state->stream);
8098 if (!status)
8099 continue;
8100
8101 aconnector = to_amdgpu_dm_connector(connector);
8102
8103 mutex_lock(&adev->dm.audio_lock);
8104 inst = status->audio_inst;
8105 aconnector->audio_inst = inst;
8106 mutex_unlock(&adev->dm.audio_lock);
8107
8108 amdgpu_dm_audio_eld_notify(adev, inst);
8109 }
8110}
8111
1f6010a9 8112/*
27b3f4fc
LSL
8113 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8114 * @crtc_state: the DRM CRTC state
8115 * @stream_state: the DC stream state.
8116 *
8117 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8118 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8119 */
8120static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8121 struct dc_stream_state *stream_state)
8122{
b9952f93 8123 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8124}
e7b07cee 8125
b8592b48
LL
8126/**
8127 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8128 * @state: The atomic state to commit
8129 *
8130 * This will tell DC to commit the constructed DC state from atomic_check,
8131 * programming the hardware. Any failures here implies a hardware failure, since
8132 * atomic check should have filtered anything non-kosher.
8133 */
7578ecda 8134static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8135{
8136 struct drm_device *dev = state->dev;
1348969a 8137 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8138 struct amdgpu_display_manager *dm = &adev->dm;
8139 struct dm_atomic_state *dm_state;
eb3dc897 8140 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8141 uint32_t i, j;
5cc6dcbd 8142 struct drm_crtc *crtc;
0bc9706d 8143 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8144 unsigned long flags;
8145 bool wait_for_vblank = true;
8146 struct drm_connector *connector;
c2cea706 8147 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8148 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8149 int crtc_disable_count = 0;
6ee90e88 8150 bool mode_set_reset_required = false;
e7b07cee 8151
e8a98235
RS
8152 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8153
e7b07cee
HW
8154 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8155
eb3dc897
NK
8156 dm_state = dm_atomic_get_new_state(state);
8157 if (dm_state && dm_state->context) {
8158 dc_state = dm_state->context;
8159 } else {
8160 /* No state changes, retain current state. */
813d20dc 8161 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8162 ASSERT(dc_state_temp);
8163 dc_state = dc_state_temp;
8164 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8165 }
e7b07cee 8166
6d90a208
AP
8167 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8168 new_crtc_state, i) {
8169 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8170
8171 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8172
8173 if (old_crtc_state->active &&
8174 (!new_crtc_state->active ||
8175 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8176 manage_dm_interrupts(adev, acrtc, false);
8177 dc_stream_release(dm_old_crtc_state->stream);
8178 }
8179 }
8180
8976f73b
RS
8181 drm_atomic_helper_calc_timestamping_constants(state);
8182
e7b07cee 8183 /* update changed items */
0bc9706d 8184 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8185 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8186
54d76575
LSL
8187 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8188 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8189
f1ad2f5e 8190 DRM_DEBUG_DRIVER(
e7b07cee
HW
8191 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8192 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8193 "connectors_changed:%d\n",
8194 acrtc->crtc_id,
0bc9706d
LSL
8195 new_crtc_state->enable,
8196 new_crtc_state->active,
8197 new_crtc_state->planes_changed,
8198 new_crtc_state->mode_changed,
8199 new_crtc_state->active_changed,
8200 new_crtc_state->connectors_changed);
e7b07cee 8201
5c68c652
VL
8202 /* Disable cursor if disabling crtc */
8203 if (old_crtc_state->active && !new_crtc_state->active) {
8204 struct dc_cursor_position position;
8205
8206 memset(&position, 0, sizeof(position));
8207 mutex_lock(&dm->dc_lock);
8208 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8209 mutex_unlock(&dm->dc_lock);
8210 }
8211
27b3f4fc
LSL
8212 /* Copy all transient state flags into dc state */
8213 if (dm_new_crtc_state->stream) {
8214 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8215 dm_new_crtc_state->stream);
8216 }
8217
e7b07cee
HW
8218 /* handles headless hotplug case, updating new_state and
8219 * aconnector as needed
8220 */
8221
54d76575 8222 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8223
f1ad2f5e 8224 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8225
54d76575 8226 if (!dm_new_crtc_state->stream) {
e7b07cee 8227 /*
b830ebc9
HW
8228 * this could happen because of issues with
8229 * userspace notifications delivery.
8230 * In this case userspace tries to set mode on
1f6010a9
DF
8231 * display which is disconnected in fact.
8232 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8233 * We expect reset mode will come soon.
8234 *
8235 * This can also happen when unplug is done
8236 * during resume sequence ended
8237 *
8238 * In this case, we want to pretend we still
8239 * have a sink to keep the pipe running so that
8240 * hw state is consistent with the sw state
8241 */
f1ad2f5e 8242 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8243 __func__, acrtc->base.base.id);
8244 continue;
8245 }
8246
54d76575
LSL
8247 if (dm_old_crtc_state->stream)
8248 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8249
97028037
LP
8250 pm_runtime_get_noresume(dev->dev);
8251
e7b07cee 8252 acrtc->enabled = true;
0bc9706d
LSL
8253 acrtc->hw_mode = new_crtc_state->mode;
8254 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8255 mode_set_reset_required = true;
0bc9706d 8256 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8257 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8258 /* i.e. reset mode */
6ee90e88 8259 if (dm_old_crtc_state->stream)
54d76575 8260 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8261 mode_set_reset_required = true;
e7b07cee
HW
8262 }
8263 } /* for_each_crtc_in_state() */
8264
eb3dc897 8265 if (dc_state) {
6ee90e88 8266 /* if there mode set or reset, disable eDP PSR */
8267 if (mode_set_reset_required)
8268 amdgpu_dm_psr_disable_all(dm);
8269
eb3dc897 8270 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8271 mutex_lock(&dm->dc_lock);
eb3dc897 8272 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8273 mutex_unlock(&dm->dc_lock);
fa2123db 8274 }
e7b07cee 8275
0bc9706d 8276 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8277 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8278
54d76575 8279 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8280
54d76575 8281 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8282 const struct dc_stream_status *status =
54d76575 8283 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8284
eb3dc897 8285 if (!status)
09f609c3
LL
8286 status = dc_stream_get_status_from_state(dc_state,
8287 dm_new_crtc_state->stream);
e7b07cee 8288 if (!status)
54d76575 8289 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8290 else
8291 acrtc->otg_inst = status->primary_otg_inst;
8292 }
8293 }
0c8620d6
BL
8294#ifdef CONFIG_DRM_AMD_DC_HDCP
8295 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8296 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8297 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8298 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8299
8300 new_crtc_state = NULL;
8301
8302 if (acrtc)
8303 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8304
8305 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8306
8307 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8308 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8309 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8310 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8311 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8312 continue;
8313 }
8314
8315 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8316 hdcp_update_display(
8317 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8318 new_con_state->hdcp_content_type,
b1abe558
BL
8319 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8320 : false);
0c8620d6
BL
8321 }
8322#endif
e7b07cee 8323
02d6a6fc 8324 /* Handle connector state changes */
c2cea706 8325 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8326 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8327 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8328 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8329 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8330 struct dc_stream_update stream_update;
b232d4ed 8331 struct dc_info_packet hdr_packet;
e7b07cee 8332 struct dc_stream_status *status = NULL;
b232d4ed 8333 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8334
efc8278e 8335 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8336 memset(&stream_update, 0, sizeof(stream_update));
8337
44d09c6a 8338 if (acrtc) {
0bc9706d 8339 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8340 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8341 }
0bc9706d 8342
e7b07cee 8343 /* Skip any modesets/resets */
0bc9706d 8344 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8345 continue;
8346
54d76575 8347 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8348 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8349
b232d4ed
NK
8350 scaling_changed = is_scaling_state_different(dm_new_con_state,
8351 dm_old_con_state);
8352
8353 abm_changed = dm_new_crtc_state->abm_level !=
8354 dm_old_crtc_state->abm_level;
8355
8356 hdr_changed =
8357 is_hdr_metadata_different(old_con_state, new_con_state);
8358
8359 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8360 continue;
e7b07cee 8361
b6e881c9 8362 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8363 if (scaling_changed) {
02d6a6fc 8364 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8365 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8366
02d6a6fc
DF
8367 stream_update.src = dm_new_crtc_state->stream->src;
8368 stream_update.dst = dm_new_crtc_state->stream->dst;
8369 }
8370
b232d4ed 8371 if (abm_changed) {
02d6a6fc
DF
8372 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8373
8374 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8375 }
70e8ffc5 8376
b232d4ed
NK
8377 if (hdr_changed) {
8378 fill_hdr_info_packet(new_con_state, &hdr_packet);
8379 stream_update.hdr_static_metadata = &hdr_packet;
8380 }
8381
54d76575 8382 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8383 WARN_ON(!status);
3be5262e 8384 WARN_ON(!status->plane_count);
e7b07cee 8385
02d6a6fc
DF
8386 /*
8387 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8388 * Here we create an empty update on each plane.
8389 * To fix this, DC should permit updating only stream properties.
8390 */
8391 for (j = 0; j < status->plane_count; j++)
efc8278e 8392 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8393
8394
8395 mutex_lock(&dm->dc_lock);
8396 dc_commit_updates_for_stream(dm->dc,
efc8278e 8397 dummy_updates,
02d6a6fc
DF
8398 status->plane_count,
8399 dm_new_crtc_state->stream,
efc8278e
AJ
8400 &stream_update,
8401 dc_state);
02d6a6fc 8402 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8403 }
8404
b5e83f6f 8405 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8406 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8407 new_crtc_state, i) {
fe2a1965
LP
8408 if (old_crtc_state->active && !new_crtc_state->active)
8409 crtc_disable_count++;
8410
54d76575 8411 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8412 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8413
585d450c
AP
8414 /* For freesync config update on crtc state and params for irq */
8415 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8416
66b0c973
MK
8417 /* Handle vrr on->off / off->on transitions */
8418 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8419 dm_new_crtc_state);
e7b07cee
HW
8420 }
8421
8fe684e9
NK
8422 /**
8423 * Enable interrupts for CRTCs that are newly enabled or went through
8424 * a modeset. It was intentionally deferred until after the front end
8425 * state was modified to wait until the OTG was on and so the IRQ
8426 * handlers didn't access stale or invalid state.
8427 */
8428 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8429 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8430
585d450c
AP
8431 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8432
8fe684e9
NK
8433 if (new_crtc_state->active &&
8434 (!old_crtc_state->active ||
8435 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8436 dc_stream_retain(dm_new_crtc_state->stream);
8437 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8438 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 8439
24eb9374 8440#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
8441 /**
8442 * Frontend may have changed so reapply the CRC capture
8443 * settings for the stream.
8444 */
8445 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8446
e2881d6d 8447 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
c920888c 8448 amdgpu_dm_crtc_configure_crc_source(
e2881d6d
RS
8449 crtc, dm_new_crtc_state,
8450 dm_new_crtc_state->crc_src);
8451 }
24eb9374 8452#endif
8fe684e9
NK
8453 }
8454 }
e7b07cee 8455
420cd472 8456 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8457 if (new_crtc_state->async_flip)
420cd472
DF
8458 wait_for_vblank = false;
8459
e7b07cee 8460 /* update planes when needed per crtc*/
5cc6dcbd 8461 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8462 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8463
54d76575 8464 if (dm_new_crtc_state->stream)
eb3dc897 8465 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8466 dm, crtc, wait_for_vblank);
e7b07cee
HW
8467 }
8468
6ce8f316
NK
8469 /* Update audio instances for each connector. */
8470 amdgpu_dm_commit_audio(dev, state);
8471
e7b07cee
HW
8472 /*
8473 * send vblank event on all events not handled in flip and
8474 * mark consumed event for drm_atomic_helper_commit_hw_done
8475 */
4a580877 8476 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8477 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8478
0bc9706d
LSL
8479 if (new_crtc_state->event)
8480 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8481
0bc9706d 8482 new_crtc_state->event = NULL;
e7b07cee 8483 }
4a580877 8484 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8485
29c8f234
LL
8486 /* Signal HW programming completion */
8487 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8488
8489 if (wait_for_vblank)
320a1274 8490 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8491
8492 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8493
5f6fab24
AD
8494 /* return the stolen vga memory back to VRAM */
8495 if (!adev->mman.keep_stolen_vga_memory)
8496 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8497 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8498
1f6010a9
DF
8499 /*
8500 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8501 * so we can put the GPU into runtime suspend if we're not driving any
8502 * displays anymore
8503 */
fe2a1965
LP
8504 for (i = 0; i < crtc_disable_count; i++)
8505 pm_runtime_put_autosuspend(dev->dev);
97028037 8506 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8507
8508 if (dc_state_temp)
8509 dc_release_state(dc_state_temp);
e7b07cee
HW
8510}
8511
8512
8513static int dm_force_atomic_commit(struct drm_connector *connector)
8514{
8515 int ret = 0;
8516 struct drm_device *ddev = connector->dev;
8517 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8518 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8519 struct drm_plane *plane = disconnected_acrtc->base.primary;
8520 struct drm_connector_state *conn_state;
8521 struct drm_crtc_state *crtc_state;
8522 struct drm_plane_state *plane_state;
8523
8524 if (!state)
8525 return -ENOMEM;
8526
8527 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8528
8529 /* Construct an atomic state to restore previous display setting */
8530
8531 /*
8532 * Attach connectors to drm_atomic_state
8533 */
8534 conn_state = drm_atomic_get_connector_state(state, connector);
8535
8536 ret = PTR_ERR_OR_ZERO(conn_state);
8537 if (ret)
2dc39051 8538 goto out;
e7b07cee
HW
8539
8540 /* Attach crtc to drm_atomic_state*/
8541 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8542
8543 ret = PTR_ERR_OR_ZERO(crtc_state);
8544 if (ret)
2dc39051 8545 goto out;
e7b07cee
HW
8546
8547 /* force a restore */
8548 crtc_state->mode_changed = true;
8549
8550 /* Attach plane to drm_atomic_state */
8551 plane_state = drm_atomic_get_plane_state(state, plane);
8552
8553 ret = PTR_ERR_OR_ZERO(plane_state);
8554 if (ret)
2dc39051 8555 goto out;
e7b07cee
HW
8556
8557 /* Call commit internally with the state we just constructed */
8558 ret = drm_atomic_commit(state);
e7b07cee 8559
2dc39051 8560out:
e7b07cee 8561 drm_atomic_state_put(state);
2dc39051
VL
8562 if (ret)
8563 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
8564
8565 return ret;
8566}
8567
8568/*
1f6010a9
DF
8569 * This function handles all cases when set mode does not come upon hotplug.
8570 * This includes when a display is unplugged then plugged back into the
8571 * same port and when running without usermode desktop manager supprot
e7b07cee 8572 */
3ee6b26b
AD
8573void dm_restore_drm_connector_state(struct drm_device *dev,
8574 struct drm_connector *connector)
e7b07cee 8575{
c84dec2f 8576 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8577 struct amdgpu_crtc *disconnected_acrtc;
8578 struct dm_crtc_state *acrtc_state;
8579
8580 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8581 return;
8582
8583 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8584 if (!disconnected_acrtc)
8585 return;
e7b07cee 8586
70e8ffc5
HW
8587 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8588 if (!acrtc_state->stream)
e7b07cee
HW
8589 return;
8590
8591 /*
8592 * If the previous sink is not released and different from the current,
8593 * we deduce we are in a state where we can not rely on usermode call
8594 * to turn on the display, so we do it here
8595 */
8596 if (acrtc_state->stream->sink != aconnector->dc_sink)
8597 dm_force_atomic_commit(&aconnector->base);
8598}
8599
1f6010a9 8600/*
e7b07cee
HW
8601 * Grabs all modesetting locks to serialize against any blocking commits,
8602 * Waits for completion of all non blocking commits.
8603 */
3ee6b26b
AD
8604static int do_aquire_global_lock(struct drm_device *dev,
8605 struct drm_atomic_state *state)
e7b07cee
HW
8606{
8607 struct drm_crtc *crtc;
8608 struct drm_crtc_commit *commit;
8609 long ret;
8610
1f6010a9
DF
8611 /*
8612 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8613 * ensure that when the framework release it the
8614 * extra locks we are locking here will get released to
8615 */
8616 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8617 if (ret)
8618 return ret;
8619
8620 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8621 spin_lock(&crtc->commit_lock);
8622 commit = list_first_entry_or_null(&crtc->commit_list,
8623 struct drm_crtc_commit, commit_entry);
8624 if (commit)
8625 drm_crtc_commit_get(commit);
8626 spin_unlock(&crtc->commit_lock);
8627
8628 if (!commit)
8629 continue;
8630
1f6010a9
DF
8631 /*
8632 * Make sure all pending HW programming completed and
e7b07cee
HW
8633 * page flips done
8634 */
8635 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8636
8637 if (ret > 0)
8638 ret = wait_for_completion_interruptible_timeout(
8639 &commit->flip_done, 10*HZ);
8640
8641 if (ret == 0)
8642 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8643 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8644
8645 drm_crtc_commit_put(commit);
8646 }
8647
8648 return ret < 0 ? ret : 0;
8649}
8650
bb47de73
NK
8651static void get_freesync_config_for_crtc(
8652 struct dm_crtc_state *new_crtc_state,
8653 struct dm_connector_state *new_con_state)
98e6436d
AK
8654{
8655 struct mod_freesync_config config = {0};
98e6436d
AK
8656 struct amdgpu_dm_connector *aconnector =
8657 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8658 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8659 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8660
a057ec46 8661 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8662 vrefresh >= aconnector->min_vfreq &&
8663 vrefresh <= aconnector->max_vfreq;
bb47de73 8664
a057ec46
IB
8665 if (new_crtc_state->vrr_supported) {
8666 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8667 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8668 VRR_STATE_ACTIVE_VARIABLE :
8669 VRR_STATE_INACTIVE;
8670 config.min_refresh_in_uhz =
8671 aconnector->min_vfreq * 1000000;
8672 config.max_refresh_in_uhz =
8673 aconnector->max_vfreq * 1000000;
69ff8845 8674 config.vsif_supported = true;
180db303 8675 config.btr = true;
98e6436d
AK
8676 }
8677
bb47de73
NK
8678 new_crtc_state->freesync_config = config;
8679}
98e6436d 8680
bb47de73
NK
8681static void reset_freesync_config_for_crtc(
8682 struct dm_crtc_state *new_crtc_state)
8683{
8684 new_crtc_state->vrr_supported = false;
98e6436d 8685
bb47de73
NK
8686 memset(&new_crtc_state->vrr_infopacket, 0,
8687 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8688}
8689
4b9674e5
LL
8690static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8691 struct drm_atomic_state *state,
8692 struct drm_crtc *crtc,
8693 struct drm_crtc_state *old_crtc_state,
8694 struct drm_crtc_state *new_crtc_state,
8695 bool enable,
8696 bool *lock_and_validation_needed)
e7b07cee 8697{
eb3dc897 8698 struct dm_atomic_state *dm_state = NULL;
54d76575 8699 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8700 struct dc_stream_state *new_stream;
62f55537 8701 int ret = 0;
d4d4a645 8702
1f6010a9
DF
8703 /*
8704 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8705 * update changed items
8706 */
4b9674e5
LL
8707 struct amdgpu_crtc *acrtc = NULL;
8708 struct amdgpu_dm_connector *aconnector = NULL;
8709 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8710 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8711
4b9674e5 8712 new_stream = NULL;
9635b754 8713
4b9674e5
LL
8714 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8715 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8716 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8717 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8718
4b9674e5
LL
8719 /* TODO This hack should go away */
8720 if (aconnector && enable) {
8721 /* Make sure fake sink is created in plug-in scenario */
8722 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8723 &aconnector->base);
8724 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8725 &aconnector->base);
19f89e23 8726
4b9674e5
LL
8727 if (IS_ERR(drm_new_conn_state)) {
8728 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8729 goto fail;
8730 }
19f89e23 8731
4b9674e5
LL
8732 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8733 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8734
02d35a67
JFZ
8735 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8736 goto skip_modeset;
8737
cbd14ae7
SW
8738 new_stream = create_validate_stream_for_sink(aconnector,
8739 &new_crtc_state->mode,
8740 dm_new_conn_state,
8741 dm_old_crtc_state->stream);
19f89e23 8742
4b9674e5
LL
8743 /*
8744 * we can have no stream on ACTION_SET if a display
8745 * was disconnected during S3, in this case it is not an
8746 * error, the OS will be updated after detection, and
8747 * will do the right thing on next atomic commit
8748 */
19f89e23 8749
4b9674e5
LL
8750 if (!new_stream) {
8751 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8752 __func__, acrtc->base.base.id);
8753 ret = -ENOMEM;
8754 goto fail;
8755 }
e7b07cee 8756
3d4e52d0
VL
8757 /*
8758 * TODO: Check VSDB bits to decide whether this should
8759 * be enabled or not.
8760 */
8761 new_stream->triggered_crtc_reset.enabled =
8762 dm->force_timing_sync;
8763
4b9674e5 8764 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8765
88694af9
NK
8766 ret = fill_hdr_info_packet(drm_new_conn_state,
8767 &new_stream->hdr_static_metadata);
8768 if (ret)
8769 goto fail;
8770
7e930949
NK
8771 /*
8772 * If we already removed the old stream from the context
8773 * (and set the new stream to NULL) then we can't reuse
8774 * the old stream even if the stream and scaling are unchanged.
8775 * We'll hit the BUG_ON and black screen.
8776 *
8777 * TODO: Refactor this function to allow this check to work
8778 * in all conditions.
8779 */
8780 if (dm_new_crtc_state->stream &&
8781 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8782 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8783 new_crtc_state->mode_changed = false;
8784 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8785 new_crtc_state->mode_changed);
62f55537 8786 }
4b9674e5 8787 }
b830ebc9 8788
02d35a67 8789 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8790 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8791 goto skip_modeset;
e7b07cee 8792
4b9674e5
LL
8793 DRM_DEBUG_DRIVER(
8794 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8795 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8796 "connectors_changed:%d\n",
8797 acrtc->crtc_id,
8798 new_crtc_state->enable,
8799 new_crtc_state->active,
8800 new_crtc_state->planes_changed,
8801 new_crtc_state->mode_changed,
8802 new_crtc_state->active_changed,
8803 new_crtc_state->connectors_changed);
62f55537 8804
4b9674e5
LL
8805 /* Remove stream for any changed/disabled CRTC */
8806 if (!enable) {
62f55537 8807
4b9674e5
LL
8808 if (!dm_old_crtc_state->stream)
8809 goto skip_modeset;
eb3dc897 8810
4b9674e5
LL
8811 ret = dm_atomic_get_state(state, &dm_state);
8812 if (ret)
8813 goto fail;
e7b07cee 8814
4b9674e5
LL
8815 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8816 crtc->base.id);
62f55537 8817
4b9674e5
LL
8818 /* i.e. reset mode */
8819 if (dc_remove_stream_from_ctx(
8820 dm->dc,
8821 dm_state->context,
8822 dm_old_crtc_state->stream) != DC_OK) {
8823 ret = -EINVAL;
8824 goto fail;
8825 }
62f55537 8826
4b9674e5
LL
8827 dc_stream_release(dm_old_crtc_state->stream);
8828 dm_new_crtc_state->stream = NULL;
bb47de73 8829
4b9674e5 8830 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8831
4b9674e5 8832 *lock_and_validation_needed = true;
62f55537 8833
4b9674e5
LL
8834 } else {/* Add stream for any updated/enabled CRTC */
8835 /*
8836 * Quick fix to prevent NULL pointer on new_stream when
8837 * added MST connectors not found in existing crtc_state in the chained mode
8838 * TODO: need to dig out the root cause of that
8839 */
8840 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8841 goto skip_modeset;
62f55537 8842
4b9674e5
LL
8843 if (modereset_required(new_crtc_state))
8844 goto skip_modeset;
62f55537 8845
4b9674e5
LL
8846 if (modeset_required(new_crtc_state, new_stream,
8847 dm_old_crtc_state->stream)) {
62f55537 8848
4b9674e5 8849 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8850
4b9674e5
LL
8851 ret = dm_atomic_get_state(state, &dm_state);
8852 if (ret)
8853 goto fail;
27b3f4fc 8854
4b9674e5 8855 dm_new_crtc_state->stream = new_stream;
62f55537 8856
4b9674e5 8857 dc_stream_retain(new_stream);
1dc90497 8858
4b9674e5
LL
8859 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8860 crtc->base.id);
1dc90497 8861
4b9674e5
LL
8862 if (dc_add_stream_to_ctx(
8863 dm->dc,
8864 dm_state->context,
8865 dm_new_crtc_state->stream) != DC_OK) {
8866 ret = -EINVAL;
8867 goto fail;
9b690ef3
BL
8868 }
8869
4b9674e5
LL
8870 *lock_and_validation_needed = true;
8871 }
8872 }
e277adc5 8873
4b9674e5
LL
8874skip_modeset:
8875 /* Release extra reference */
8876 if (new_stream)
8877 dc_stream_release(new_stream);
e277adc5 8878
4b9674e5
LL
8879 /*
8880 * We want to do dc stream updates that do not require a
8881 * full modeset below.
8882 */
2afda735 8883 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8884 return 0;
8885 /*
8886 * Given above conditions, the dc state cannot be NULL because:
8887 * 1. We're in the process of enabling CRTCs (just been added
8888 * to the dc context, or already is on the context)
8889 * 2. Has a valid connector attached, and
8890 * 3. Is currently active and enabled.
8891 * => The dc stream state currently exists.
8892 */
8893 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8894
4b9674e5
LL
8895 /* Scaling or underscan settings */
8896 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8897 update_stream_scaling_settings(
8898 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8899
b05e2c5e
DF
8900 /* ABM settings */
8901 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8902
4b9674e5
LL
8903 /*
8904 * Color management settings. We also update color properties
8905 * when a modeset is needed, to ensure it gets reprogrammed.
8906 */
8907 if (dm_new_crtc_state->base.color_mgmt_changed ||
8908 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8909 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8910 if (ret)
8911 goto fail;
62f55537 8912 }
e7b07cee 8913
4b9674e5
LL
8914 /* Update Freesync settings. */
8915 get_freesync_config_for_crtc(dm_new_crtc_state,
8916 dm_new_conn_state);
8917
62f55537 8918 return ret;
9635b754
DS
8919
8920fail:
8921 if (new_stream)
8922 dc_stream_release(new_stream);
8923 return ret;
62f55537 8924}
9b690ef3 8925
f6ff2a08
NK
8926static bool should_reset_plane(struct drm_atomic_state *state,
8927 struct drm_plane *plane,
8928 struct drm_plane_state *old_plane_state,
8929 struct drm_plane_state *new_plane_state)
8930{
8931 struct drm_plane *other;
8932 struct drm_plane_state *old_other_state, *new_other_state;
8933 struct drm_crtc_state *new_crtc_state;
8934 int i;
8935
70a1efac
NK
8936 /*
8937 * TODO: Remove this hack once the checks below are sufficient
8938 * enough to determine when we need to reset all the planes on
8939 * the stream.
8940 */
8941 if (state->allow_modeset)
8942 return true;
8943
f6ff2a08
NK
8944 /* Exit early if we know that we're adding or removing the plane. */
8945 if (old_plane_state->crtc != new_plane_state->crtc)
8946 return true;
8947
8948 /* old crtc == new_crtc == NULL, plane not in context. */
8949 if (!new_plane_state->crtc)
8950 return false;
8951
8952 new_crtc_state =
8953 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8954
8955 if (!new_crtc_state)
8956 return true;
8957
7316c4ad
NK
8958 /* CRTC Degamma changes currently require us to recreate planes. */
8959 if (new_crtc_state->color_mgmt_changed)
8960 return true;
8961
f6ff2a08
NK
8962 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8963 return true;
8964
8965 /*
8966 * If there are any new primary or overlay planes being added or
8967 * removed then the z-order can potentially change. To ensure
8968 * correct z-order and pipe acquisition the current DC architecture
8969 * requires us to remove and recreate all existing planes.
8970 *
8971 * TODO: Come up with a more elegant solution for this.
8972 */
8973 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 8974 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
8975 if (other->type == DRM_PLANE_TYPE_CURSOR)
8976 continue;
8977
8978 if (old_other_state->crtc != new_plane_state->crtc &&
8979 new_other_state->crtc != new_plane_state->crtc)
8980 continue;
8981
8982 if (old_other_state->crtc != new_other_state->crtc)
8983 return true;
8984
dc4cb30d
NK
8985 /* Src/dst size and scaling updates. */
8986 if (old_other_state->src_w != new_other_state->src_w ||
8987 old_other_state->src_h != new_other_state->src_h ||
8988 old_other_state->crtc_w != new_other_state->crtc_w ||
8989 old_other_state->crtc_h != new_other_state->crtc_h)
8990 return true;
8991
8992 /* Rotation / mirroring updates. */
8993 if (old_other_state->rotation != new_other_state->rotation)
8994 return true;
8995
8996 /* Blending updates. */
8997 if (old_other_state->pixel_blend_mode !=
8998 new_other_state->pixel_blend_mode)
8999 return true;
9000
9001 /* Alpha updates. */
9002 if (old_other_state->alpha != new_other_state->alpha)
9003 return true;
9004
9005 /* Colorspace changes. */
9006 if (old_other_state->color_range != new_other_state->color_range ||
9007 old_other_state->color_encoding != new_other_state->color_encoding)
9008 return true;
9009
9a81cc60
NK
9010 /* Framebuffer checks fall at the end. */
9011 if (!old_other_state->fb || !new_other_state->fb)
9012 continue;
9013
9014 /* Pixel format changes can require bandwidth updates. */
9015 if (old_other_state->fb->format != new_other_state->fb->format)
9016 return true;
9017
6eed95b0
BN
9018 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9019 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9020
9021 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9022 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9023 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9024 return true;
9025 }
9026
9027 return false;
9028}
9029
b0455fda
SS
9030static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9031 struct drm_plane_state *new_plane_state,
9032 struct drm_framebuffer *fb)
9033{
e72868c4
SS
9034 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9035 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9036 unsigned int pitch;
e72868c4 9037 bool linear;
b0455fda
SS
9038
9039 if (fb->width > new_acrtc->max_cursor_width ||
9040 fb->height > new_acrtc->max_cursor_height) {
9041 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9042 new_plane_state->fb->width,
9043 new_plane_state->fb->height);
9044 return -EINVAL;
9045 }
9046 if (new_plane_state->src_w != fb->width << 16 ||
9047 new_plane_state->src_h != fb->height << 16) {
9048 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9049 return -EINVAL;
9050 }
9051
9052 /* Pitch in pixels */
9053 pitch = fb->pitches[0] / fb->format->cpp[0];
9054
9055 if (fb->width != pitch) {
9056 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9057 fb->width, pitch);
9058 return -EINVAL;
9059 }
9060
9061 switch (pitch) {
9062 case 64:
9063 case 128:
9064 case 256:
9065 /* FB pitch is supported by cursor plane */
9066 break;
9067 default:
9068 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9069 return -EINVAL;
9070 }
9071
e72868c4
SS
9072 /* Core DRM takes care of checking FB modifiers, so we only need to
9073 * check tiling flags when the FB doesn't have a modifier. */
9074 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9075 if (adev->family < AMDGPU_FAMILY_AI) {
9076 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9077 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9078 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9079 } else {
9080 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9081 }
9082 if (!linear) {
9083 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9084 return -EINVAL;
9085 }
9086 }
9087
b0455fda
SS
9088 return 0;
9089}
9090
9e869063
LL
9091static int dm_update_plane_state(struct dc *dc,
9092 struct drm_atomic_state *state,
9093 struct drm_plane *plane,
9094 struct drm_plane_state *old_plane_state,
9095 struct drm_plane_state *new_plane_state,
9096 bool enable,
9097 bool *lock_and_validation_needed)
62f55537 9098{
eb3dc897
NK
9099
9100 struct dm_atomic_state *dm_state = NULL;
62f55537 9101 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9102 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9103 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9104 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9105 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9106 bool needs_reset;
62f55537 9107 int ret = 0;
e7b07cee 9108
9b690ef3 9109
9e869063
LL
9110 new_plane_crtc = new_plane_state->crtc;
9111 old_plane_crtc = old_plane_state->crtc;
9112 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9113 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9114
626bf90f
SS
9115 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9116 if (!enable || !new_plane_crtc ||
9117 drm_atomic_plane_disabling(plane->state, new_plane_state))
9118 return 0;
9119
9120 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9121
5f581248
SS
9122 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9123 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9124 return -EINVAL;
9125 }
9126
24f99d2b 9127 if (new_plane_state->fb) {
b0455fda
SS
9128 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9129 new_plane_state->fb);
9130 if (ret)
9131 return ret;
24f99d2b
SS
9132 }
9133
9e869063 9134 return 0;
626bf90f 9135 }
9b690ef3 9136
f6ff2a08
NK
9137 needs_reset = should_reset_plane(state, plane, old_plane_state,
9138 new_plane_state);
9139
9e869063
LL
9140 /* Remove any changed/removed planes */
9141 if (!enable) {
f6ff2a08 9142 if (!needs_reset)
9e869063 9143 return 0;
a7b06724 9144
9e869063
LL
9145 if (!old_plane_crtc)
9146 return 0;
62f55537 9147
9e869063
LL
9148 old_crtc_state = drm_atomic_get_old_crtc_state(
9149 state, old_plane_crtc);
9150 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9151
9e869063
LL
9152 if (!dm_old_crtc_state->stream)
9153 return 0;
62f55537 9154
9e869063
LL
9155 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9156 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9157
9e869063
LL
9158 ret = dm_atomic_get_state(state, &dm_state);
9159 if (ret)
9160 return ret;
eb3dc897 9161
9e869063
LL
9162 if (!dc_remove_plane_from_context(
9163 dc,
9164 dm_old_crtc_state->stream,
9165 dm_old_plane_state->dc_state,
9166 dm_state->context)) {
62f55537 9167
c3537613 9168 return -EINVAL;
9e869063 9169 }
e7b07cee 9170
9b690ef3 9171
9e869063
LL
9172 dc_plane_state_release(dm_old_plane_state->dc_state);
9173 dm_new_plane_state->dc_state = NULL;
1dc90497 9174
9e869063 9175 *lock_and_validation_needed = true;
1dc90497 9176
9e869063
LL
9177 } else { /* Add new planes */
9178 struct dc_plane_state *dc_new_plane_state;
1dc90497 9179
9e869063
LL
9180 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9181 return 0;
e7b07cee 9182
9e869063
LL
9183 if (!new_plane_crtc)
9184 return 0;
e7b07cee 9185
9e869063
LL
9186 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9187 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9188
9e869063
LL
9189 if (!dm_new_crtc_state->stream)
9190 return 0;
62f55537 9191
f6ff2a08 9192 if (!needs_reset)
9e869063 9193 return 0;
62f55537 9194
8c44515b
AP
9195 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9196 if (ret)
9197 return ret;
9198
9e869063 9199 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9200
9e869063
LL
9201 dc_new_plane_state = dc_create_plane_state(dc);
9202 if (!dc_new_plane_state)
9203 return -ENOMEM;
62f55537 9204
9e869063
LL
9205 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9206 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9207
695af5f9 9208 ret = fill_dc_plane_attributes(
1348969a 9209 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9210 dc_new_plane_state,
9211 new_plane_state,
9212 new_crtc_state);
9213 if (ret) {
9214 dc_plane_state_release(dc_new_plane_state);
9215 return ret;
9216 }
62f55537 9217
9e869063
LL
9218 ret = dm_atomic_get_state(state, &dm_state);
9219 if (ret) {
9220 dc_plane_state_release(dc_new_plane_state);
9221 return ret;
9222 }
eb3dc897 9223
9e869063
LL
9224 /*
9225 * Any atomic check errors that occur after this will
9226 * not need a release. The plane state will be attached
9227 * to the stream, and therefore part of the atomic
9228 * state. It'll be released when the atomic state is
9229 * cleaned.
9230 */
9231 if (!dc_add_plane_to_context(
9232 dc,
9233 dm_new_crtc_state->stream,
9234 dc_new_plane_state,
9235 dm_state->context)) {
62f55537 9236
9e869063
LL
9237 dc_plane_state_release(dc_new_plane_state);
9238 return -EINVAL;
9239 }
8c45c5db 9240
9e869063 9241 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9242
9e869063
LL
9243 /* Tell DC to do a full surface update every time there
9244 * is a plane change. Inefficient, but works for now.
9245 */
9246 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9247
9248 *lock_and_validation_needed = true;
62f55537 9249 }
e7b07cee
HW
9250
9251
62f55537
AG
9252 return ret;
9253}
a87fa993 9254
12f4849a
SS
9255static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9256 struct drm_crtc *crtc,
9257 struct drm_crtc_state *new_crtc_state)
9258{
9259 struct drm_plane_state *new_cursor_state, *new_primary_state;
9260 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9261
9262 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9263 * cursor per pipe but it's going to inherit the scaling and
9264 * positioning from the underlying pipe. Check the cursor plane's
9265 * blending properties match the primary plane's. */
9266
9267 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9268 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9269 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9270 return 0;
9271 }
9272
9273 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9274 (new_cursor_state->src_w >> 16);
9275 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9276 (new_cursor_state->src_h >> 16);
9277
9278 primary_scale_w = new_primary_state->crtc_w * 1000 /
9279 (new_primary_state->src_w >> 16);
9280 primary_scale_h = new_primary_state->crtc_h * 1000 /
9281 (new_primary_state->src_h >> 16);
9282
9283 if (cursor_scale_w != primary_scale_w ||
9284 cursor_scale_h != primary_scale_h) {
9285 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9286 return -EINVAL;
9287 }
9288
9289 return 0;
9290}
9291
e10517b3 9292#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9293static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9294{
9295 struct drm_connector *connector;
9296 struct drm_connector_state *conn_state;
9297 struct amdgpu_dm_connector *aconnector = NULL;
9298 int i;
9299 for_each_new_connector_in_state(state, connector, conn_state, i) {
9300 if (conn_state->crtc != crtc)
9301 continue;
9302
9303 aconnector = to_amdgpu_dm_connector(connector);
9304 if (!aconnector->port || !aconnector->mst_port)
9305 aconnector = NULL;
9306 else
9307 break;
9308 }
9309
9310 if (!aconnector)
9311 return 0;
9312
9313 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9314}
e10517b3 9315#endif
44be939f 9316
b8592b48
LL
9317/**
9318 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9319 * @dev: The DRM device
9320 * @state: The atomic state to commit
9321 *
9322 * Validate that the given atomic state is programmable by DC into hardware.
9323 * This involves constructing a &struct dc_state reflecting the new hardware
9324 * state we wish to commit, then querying DC to see if it is programmable. It's
9325 * important not to modify the existing DC state. Otherwise, atomic_check
9326 * may unexpectedly commit hardware changes.
9327 *
9328 * When validating the DC state, it's important that the right locks are
9329 * acquired. For full updates case which removes/adds/updates streams on one
9330 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9331 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9332 * flip using DRMs synchronization events.
b8592b48
LL
9333 *
9334 * Note that DM adds the affected connectors for all CRTCs in state, when that
9335 * might not seem necessary. This is because DC stream creation requires the
9336 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9337 * be possible but non-trivial - a possible TODO item.
9338 *
9339 * Return: -Error code if validation failed.
9340 */
7578ecda
AD
9341static int amdgpu_dm_atomic_check(struct drm_device *dev,
9342 struct drm_atomic_state *state)
62f55537 9343{
1348969a 9344 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9345 struct dm_atomic_state *dm_state = NULL;
62f55537 9346 struct dc *dc = adev->dm.dc;
62f55537 9347 struct drm_connector *connector;
c2cea706 9348 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9349 struct drm_crtc *crtc;
fc9e9920 9350 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9351 struct drm_plane *plane;
9352 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9353 enum dc_status status;
1e88ad0a 9354 int ret, i;
62f55537 9355 bool lock_and_validation_needed = false;
886876ec 9356 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9357
e8a98235 9358 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9359
62f55537 9360 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9361 if (ret)
9362 goto fail;
62f55537 9363
c5892a10
SW
9364 /* Check connector changes */
9365 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9366 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9367 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9368
9369 /* Skip connectors that are disabled or part of modeset already. */
9370 if (!old_con_state->crtc && !new_con_state->crtc)
9371 continue;
9372
9373 if (!new_con_state->crtc)
9374 continue;
9375
9376 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9377 if (IS_ERR(new_crtc_state)) {
9378 ret = PTR_ERR(new_crtc_state);
9379 goto fail;
9380 }
9381
9382 if (dm_old_con_state->abm_level !=
9383 dm_new_con_state->abm_level)
9384 new_crtc_state->connectors_changed = true;
9385 }
9386
e10517b3 9387#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9388 if (adev->asic_type >= CHIP_NAVI10) {
9389 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9390 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9391 ret = add_affected_mst_dsc_crtcs(state, crtc);
9392 if (ret)
9393 goto fail;
9394 }
9395 }
9396 }
e10517b3 9397#endif
1e88ad0a 9398 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9399 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9400
1e88ad0a 9401 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9402 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9403 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9404 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9405 continue;
7bef1af3 9406
1e88ad0a
S
9407 if (!new_crtc_state->enable)
9408 continue;
fc9e9920 9409
1e88ad0a
S
9410 ret = drm_atomic_add_affected_connectors(state, crtc);
9411 if (ret)
9412 return ret;
fc9e9920 9413
1e88ad0a
S
9414 ret = drm_atomic_add_affected_planes(state, crtc);
9415 if (ret)
9416 goto fail;
115a385c 9417
cbac53f7 9418 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9419 new_crtc_state->mode_changed = true;
e7b07cee
HW
9420 }
9421
2d9e6431
NK
9422 /*
9423 * Add all primary and overlay planes on the CRTC to the state
9424 * whenever a plane is enabled to maintain correct z-ordering
9425 * and to enable fast surface updates.
9426 */
9427 drm_for_each_crtc(crtc, dev) {
9428 bool modified = false;
9429
9430 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9431 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9432 continue;
9433
9434 if (new_plane_state->crtc == crtc ||
9435 old_plane_state->crtc == crtc) {
9436 modified = true;
9437 break;
9438 }
9439 }
9440
9441 if (!modified)
9442 continue;
9443
9444 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9445 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9446 continue;
9447
9448 new_plane_state =
9449 drm_atomic_get_plane_state(state, plane);
9450
9451 if (IS_ERR(new_plane_state)) {
9452 ret = PTR_ERR(new_plane_state);
9453 goto fail;
9454 }
9455 }
9456 }
9457
62f55537 9458 /* Remove exiting planes if they are modified */
9e869063
LL
9459 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9460 ret = dm_update_plane_state(dc, state, plane,
9461 old_plane_state,
9462 new_plane_state,
9463 false,
9464 &lock_and_validation_needed);
9465 if (ret)
9466 goto fail;
62f55537
AG
9467 }
9468
9469 /* Disable all crtcs which require disable */
4b9674e5
LL
9470 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9471 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9472 old_crtc_state,
9473 new_crtc_state,
9474 false,
9475 &lock_and_validation_needed);
9476 if (ret)
9477 goto fail;
62f55537
AG
9478 }
9479
9480 /* Enable all crtcs which require enable */
4b9674e5
LL
9481 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9482 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9483 old_crtc_state,
9484 new_crtc_state,
9485 true,
9486 &lock_and_validation_needed);
9487 if (ret)
9488 goto fail;
62f55537
AG
9489 }
9490
9491 /* Add new/modified planes */
9e869063
LL
9492 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9493 ret = dm_update_plane_state(dc, state, plane,
9494 old_plane_state,
9495 new_plane_state,
9496 true,
9497 &lock_and_validation_needed);
9498 if (ret)
9499 goto fail;
62f55537
AG
9500 }
9501
b349f76e
ES
9502 /* Run this here since we want to validate the streams we created */
9503 ret = drm_atomic_helper_check_planes(dev, state);
9504 if (ret)
9505 goto fail;
62f55537 9506
12f4849a
SS
9507 /* Check cursor planes scaling */
9508 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9509 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9510 if (ret)
9511 goto fail;
9512 }
9513
43d10d30
NK
9514 if (state->legacy_cursor_update) {
9515 /*
9516 * This is a fast cursor update coming from the plane update
9517 * helper, check if it can be done asynchronously for better
9518 * performance.
9519 */
9520 state->async_update =
9521 !drm_atomic_helper_async_check(dev, state);
9522
9523 /*
9524 * Skip the remaining global validation if this is an async
9525 * update. Cursor updates can be done without affecting
9526 * state or bandwidth calcs and this avoids the performance
9527 * penalty of locking the private state object and
9528 * allocating a new dc_state.
9529 */
9530 if (state->async_update)
9531 return 0;
9532 }
9533
ebdd27e1 9534 /* Check scaling and underscan changes*/
1f6010a9 9535 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9536 * new stream into context w\o causing full reset. Need to
9537 * decide how to handle.
9538 */
c2cea706 9539 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9540 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9541 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9542 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9543
9544 /* Skip any modesets/resets */
0bc9706d
LSL
9545 if (!acrtc || drm_atomic_crtc_needs_modeset(
9546 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9547 continue;
9548
b830ebc9 9549 /* Skip any thing not scale or underscan changes */
54d76575 9550 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9551 continue;
9552
9553 lock_and_validation_needed = true;
9554 }
9555
f6d7c7fa
NK
9556 /**
9557 * Streams and planes are reset when there are changes that affect
9558 * bandwidth. Anything that affects bandwidth needs to go through
9559 * DC global validation to ensure that the configuration can be applied
9560 * to hardware.
9561 *
9562 * We have to currently stall out here in atomic_check for outstanding
9563 * commits to finish in this case because our IRQ handlers reference
9564 * DRM state directly - we can end up disabling interrupts too early
9565 * if we don't.
9566 *
9567 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9568 */
f6d7c7fa 9569 if (lock_and_validation_needed) {
eb3dc897
NK
9570 ret = dm_atomic_get_state(state, &dm_state);
9571 if (ret)
9572 goto fail;
e7b07cee
HW
9573
9574 ret = do_aquire_global_lock(dev, state);
9575 if (ret)
9576 goto fail;
1dc90497 9577
d9fe1a4c 9578#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9579 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9580 goto fail;
9581
29b9ba74
ML
9582 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9583 if (ret)
9584 goto fail;
d9fe1a4c 9585#endif
29b9ba74 9586
ded58c7b
ZL
9587 /*
9588 * Perform validation of MST topology in the state:
9589 * We need to perform MST atomic check before calling
9590 * dc_validate_global_state(), or there is a chance
9591 * to get stuck in an infinite loop and hang eventually.
9592 */
9593 ret = drm_dp_mst_atomic_check(state);
9594 if (ret)
9595 goto fail;
74a16675
RS
9596 status = dc_validate_global_state(dc, dm_state->context, false);
9597 if (status != DC_OK) {
9598 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9599 dc_status_to_str(status), status);
e7b07cee
HW
9600 ret = -EINVAL;
9601 goto fail;
9602 }
bd200d19 9603 } else {
674e78ac 9604 /*
bd200d19
NK
9605 * The commit is a fast update. Fast updates shouldn't change
9606 * the DC context, affect global validation, and can have their
9607 * commit work done in parallel with other commits not touching
9608 * the same resource. If we have a new DC context as part of
9609 * the DM atomic state from validation we need to free it and
9610 * retain the existing one instead.
fde9f39a
MR
9611 *
9612 * Furthermore, since the DM atomic state only contains the DC
9613 * context and can safely be annulled, we can free the state
9614 * and clear the associated private object now to free
9615 * some memory and avoid a possible use-after-free later.
674e78ac 9616 */
bd200d19 9617
fde9f39a
MR
9618 for (i = 0; i < state->num_private_objs; i++) {
9619 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9620
fde9f39a
MR
9621 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9622 int j = state->num_private_objs-1;
bd200d19 9623
fde9f39a
MR
9624 dm_atomic_destroy_state(obj,
9625 state->private_objs[i].state);
9626
9627 /* If i is not at the end of the array then the
9628 * last element needs to be moved to where i was
9629 * before the array can safely be truncated.
9630 */
9631 if (i != j)
9632 state->private_objs[i] =
9633 state->private_objs[j];
bd200d19 9634
fde9f39a
MR
9635 state->private_objs[j].ptr = NULL;
9636 state->private_objs[j].state = NULL;
9637 state->private_objs[j].old_state = NULL;
9638 state->private_objs[j].new_state = NULL;
9639
9640 state->num_private_objs = j;
9641 break;
9642 }
bd200d19 9643 }
e7b07cee
HW
9644 }
9645
caff0e66
NK
9646 /* Store the overall update type for use later in atomic check. */
9647 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9648 struct dm_crtc_state *dm_new_crtc_state =
9649 to_dm_crtc_state(new_crtc_state);
9650
f6d7c7fa
NK
9651 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9652 UPDATE_TYPE_FULL :
9653 UPDATE_TYPE_FAST;
e7b07cee
HW
9654 }
9655
9656 /* Must be success */
9657 WARN_ON(ret);
e8a98235
RS
9658
9659 trace_amdgpu_dm_atomic_check_finish(state, ret);
9660
e7b07cee
HW
9661 return ret;
9662
9663fail:
9664 if (ret == -EDEADLK)
01e28f9c 9665 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9666 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9667 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9668 else
01e28f9c 9669 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9670
e8a98235
RS
9671 trace_amdgpu_dm_atomic_check_finish(state, ret);
9672
e7b07cee
HW
9673 return ret;
9674}
9675
3ee6b26b
AD
9676static bool is_dp_capable_without_timing_msa(struct dc *dc,
9677 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9678{
9679 uint8_t dpcd_data;
9680 bool capable = false;
9681
c84dec2f 9682 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9683 dm_helpers_dp_read_dpcd(
9684 NULL,
c84dec2f 9685 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9686 DP_DOWN_STREAM_PORT_COUNT,
9687 &dpcd_data,
9688 sizeof(dpcd_data))) {
9689 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9690 }
9691
9692 return capable;
9693}
98e6436d
AK
9694void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9695 struct edid *edid)
e7b07cee
HW
9696{
9697 int i;
e7b07cee
HW
9698 bool edid_check_required;
9699 struct detailed_timing *timing;
9700 struct detailed_non_pixel *data;
9701 struct detailed_data_monitor_range *range;
c84dec2f
HW
9702 struct amdgpu_dm_connector *amdgpu_dm_connector =
9703 to_amdgpu_dm_connector(connector);
bb47de73 9704 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9705
9706 struct drm_device *dev = connector->dev;
1348969a 9707 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9708 bool freesync_capable = false;
b830ebc9 9709
8218d7f1
HW
9710 if (!connector->state) {
9711 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9712 goto update;
8218d7f1
HW
9713 }
9714
98e6436d
AK
9715 if (!edid) {
9716 dm_con_state = to_dm_connector_state(connector->state);
9717
9718 amdgpu_dm_connector->min_vfreq = 0;
9719 amdgpu_dm_connector->max_vfreq = 0;
9720 amdgpu_dm_connector->pixel_clock_mhz = 0;
9721
bb47de73 9722 goto update;
98e6436d
AK
9723 }
9724
8218d7f1
HW
9725 dm_con_state = to_dm_connector_state(connector->state);
9726
e7b07cee 9727 edid_check_required = false;
c84dec2f 9728 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9729 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9730 goto update;
e7b07cee
HW
9731 }
9732 if (!adev->dm.freesync_module)
bb47de73 9733 goto update;
e7b07cee
HW
9734 /*
9735 * if edid non zero restrict freesync only for dp and edp
9736 */
9737 if (edid) {
c84dec2f
HW
9738 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9739 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9740 edid_check_required = is_dp_capable_without_timing_msa(
9741 adev->dm.dc,
c84dec2f 9742 amdgpu_dm_connector);
e7b07cee
HW
9743 }
9744 }
e7b07cee
HW
9745 if (edid_check_required == true && (edid->version > 1 ||
9746 (edid->version == 1 && edid->revision > 1))) {
9747 for (i = 0; i < 4; i++) {
9748
9749 timing = &edid->detailed_timings[i];
9750 data = &timing->data.other_data;
9751 range = &data->data.range;
9752 /*
9753 * Check if monitor has continuous frequency mode
9754 */
9755 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9756 continue;
9757 /*
9758 * Check for flag range limits only. If flag == 1 then
9759 * no additional timing information provided.
9760 * Default GTF, GTF Secondary curve and CVT are not
9761 * supported
9762 */
9763 if (range->flags != 1)
9764 continue;
9765
c84dec2f
HW
9766 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9767 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9768 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee 9769 range->pixel_clock_mhz * 10;
a0ffc3fd
SW
9770
9771 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9772 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9773
e7b07cee
HW
9774 break;
9775 }
9776
c84dec2f 9777 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9778 amdgpu_dm_connector->min_vfreq > 10) {
9779
bb47de73 9780 freesync_capable = true;
e7b07cee
HW
9781 }
9782 }
bb47de73
NK
9783
9784update:
9785 if (dm_con_state)
9786 dm_con_state->freesync_capable = freesync_capable;
9787
9788 if (connector->vrr_capable_property)
9789 drm_connector_set_vrr_capable_property(connector,
9790 freesync_capable);
e7b07cee
HW
9791}
9792
8c322309
RL
9793static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9794{
9795 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9796
9797 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9798 return;
9799 if (link->type == dc_connection_none)
9800 return;
9801 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9802 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9803 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9804
9805 if (dpcd_data[0] == 0) {
1cfbbdde 9806 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9807 link->psr_settings.psr_feature_enabled = false;
9808 } else {
1cfbbdde 9809 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9810 link->psr_settings.psr_feature_enabled = true;
9811 }
9812
9813 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9814 }
9815}
9816
9817/*
9818 * amdgpu_dm_link_setup_psr() - configure psr link
9819 * @stream: stream state
9820 *
9821 * Return: true if success
9822 */
9823static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9824{
9825 struct dc_link *link = NULL;
9826 struct psr_config psr_config = {0};
9827 struct psr_context psr_context = {0};
8c322309
RL
9828 bool ret = false;
9829
9830 if (stream == NULL)
9831 return false;
9832
9833 link = stream->link;
8c322309 9834
d1ebfdd8 9835 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9836
9837 if (psr_config.psr_version > 0) {
9838 psr_config.psr_exit_link_training_required = 0x1;
9839 psr_config.psr_frame_capture_indication_req = 0;
9840 psr_config.psr_rfb_setup_time = 0x37;
9841 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9842 psr_config.allow_smu_optimizations = 0x0;
9843
9844 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9845
9846 }
d1ebfdd8 9847 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9848
9849 return ret;
9850}
9851
9852/*
9853 * amdgpu_dm_psr_enable() - enable psr f/w
9854 * @stream: stream state
9855 *
9856 * Return: true if success
9857 */
9858bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9859{
9860 struct dc_link *link = stream->link;
5b5abe95
AK
9861 unsigned int vsync_rate_hz = 0;
9862 struct dc_static_screen_params params = {0};
9863 /* Calculate number of static frames before generating interrupt to
9864 * enter PSR.
9865 */
5b5abe95
AK
9866 // Init fail safe of 2 frames static
9867 unsigned int num_frames_static = 2;
8c322309
RL
9868
9869 DRM_DEBUG_DRIVER("Enabling psr...\n");
9870
5b5abe95
AK
9871 vsync_rate_hz = div64_u64(div64_u64((
9872 stream->timing.pix_clk_100hz * 100),
9873 stream->timing.v_total),
9874 stream->timing.h_total);
9875
9876 /* Round up
9877 * Calculate number of frames such that at least 30 ms of time has
9878 * passed.
9879 */
7aa62404
RL
9880 if (vsync_rate_hz != 0) {
9881 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9882 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9883 }
5b5abe95
AK
9884
9885 params.triggers.cursor_update = true;
9886 params.triggers.overlay_update = true;
9887 params.triggers.surface_update = true;
9888 params.num_frames = num_frames_static;
8c322309 9889
5b5abe95 9890 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9891 &stream, 1,
5b5abe95 9892 &params);
8c322309 9893
1d496907 9894 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
9895}
9896
9897/*
9898 * amdgpu_dm_psr_disable() - disable psr f/w
9899 * @stream: stream state
9900 *
9901 * Return: true if success
9902 */
9903static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9904{
9905
9906 DRM_DEBUG_DRIVER("Disabling psr...\n");
9907
1d496907 9908 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 9909}
3d4e52d0 9910
6ee90e88 9911/*
9912 * amdgpu_dm_psr_disable() - disable psr f/w
9913 * if psr is enabled on any stream
9914 *
9915 * Return: true if success
9916 */
9917static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9918{
9919 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9920 return dc_set_psr_allow_active(dm->dc, false);
9921}
9922
3d4e52d0
VL
9923void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9924{
1348969a 9925 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9926 struct dc *dc = adev->dm.dc;
9927 int i;
9928
9929 mutex_lock(&adev->dm.dc_lock);
9930 if (dc->current_state) {
9931 for (i = 0; i < dc->current_state->stream_count; ++i)
9932 dc->current_state->streams[i]
9933 ->triggered_crtc_reset.enabled =
9934 adev->dm.force_timing_sync;
9935
9936 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9937 dc_trigger_sync(dc, dc->current_state);
9938 }
9939 mutex_unlock(&adev->dm.dc_lock);
9940}
9d83722d
RS
9941
9942void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9943 uint32_t value, const char *func_name)
9944{
9945#ifdef DM_CHECK_ADDR_0
9946 if (address == 0) {
9947 DC_ERR("invalid register write. address = 0");
9948 return;
9949 }
9950#endif
9951 cgs_write_register(ctx->cgs_device, address, value);
9952 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9953}
9954
9955uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9956 const char *func_name)
9957{
9958 uint32_t value;
9959#ifdef DM_CHECK_ADDR_0
9960 if (address == 0) {
9961 DC_ERR("invalid register read; address = 0\n");
9962 return 0;
9963 }
9964#endif
9965
9966 if (ctx->dmub_srv &&
9967 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9968 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9969 ASSERT(false);
9970 return 0;
9971 }
9972
9973 value = cgs_read_register(ctx->cgs_device, address);
9974
9975 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9976
9977 return value;
9978}