drm/amd/pm: fix the return value of pm message
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
9d83722d 37#include "amdgpu_dm_trace.h"
4562236b
HW
38
39#include "vid.h"
40#include "amdgpu.h"
a49dcb88 41#include "amdgpu_display.h"
a94d5569 42#include "amdgpu_ucode.h"
4562236b
HW
43#include "atom.h"
44#include "amdgpu_dm.h"
52704fca
BL
45#ifdef CONFIG_DRM_AMD_DC_HDCP
46#include "amdgpu_dm_hdcp.h"
53e108aa 47#include <drm/drm_hdcp.h>
52704fca 48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
99#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
101#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
103#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
105#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 107
a94d5569
DF
108#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 110
5ea23931
RL
111#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
8c7aea40
NK
114/* Number of bytes in PSP header for firmware. */
115#define PSP_HEADER_BYTES 0x100
116
117/* Number of bytes in PSP footer for firmware. */
118#define PSP_FOOTER_BYTES 0x100
119
b8592b48
LL
120/**
121 * DOC: overview
122 *
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
126 *
127 * The root control structure is &struct amdgpu_display_manager.
128 */
129
7578ecda
AD
130/* basic init/fini API */
131static int amdgpu_dm_init(struct amdgpu_device *adev);
132static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
0f877894
OV
134static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135{
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 default:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
150 }
151}
152
153static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154{
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 return;
161
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
164
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
167 subconnector);
168}
169
1f6010a9
DF
170/*
171 * initializes drm_device display related structures, based on the information
7578ecda
AD
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
174 *
175 * Returns 0 on success
176 */
177static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178/* removes and deallocates the drm structures, created by the above function */
179static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
7578ecda 181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 182 struct drm_plane *plane,
cc1fec57
NK
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
7578ecda
AD
185static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
190 uint32_t link_index,
191 struct amdgpu_encoder *amdgpu_encoder);
192static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
195
196static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
7578ecda
AD
198static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
202
674e78ac
NK
203static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
7578ecda 205
8c322309
RL
206static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 210static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 211
dfbbfe3c
BN
212static const struct drm_format_info *
213amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
4562236b
HW
215/*
216 * dm_vblank_get_counter
217 *
218 * @brief
219 * Get counter for number of vertical blanks
220 *
221 * @param
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
224 *
225 * @return
226 * Counter for vertical blanks
227 */
228static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229{
230 if (crtc >= adev->mode_info.num_crtc)
231 return 0;
232 else {
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
585d450c 235 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 crtc);
4562236b
HW
238 return 0;
239 }
240
585d450c 241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
242 }
243}
244
245static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 246 u32 *vbl, u32 *position)
4562236b 247{
81c50963
ST
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
4562236b
HW
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 return -EINVAL;
252 else {
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
585d450c 255 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 crtc);
4562236b
HW
258 return 0;
259 }
260
81c50963
ST
261 /*
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
264 */
585d450c 265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
266 &v_blank_start,
267 &v_blank_end,
268 &h_position,
269 &v_position);
270
e806208d
AG
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
273 }
274
275 return 0;
276}
277
278static bool dm_is_idle(void *handle)
279{
280 /* XXX todo */
281 return true;
282}
283
284static int dm_wait_for_idle(void *handle)
285{
286 /* XXX todo */
287 return 0;
288}
289
290static bool dm_check_soft_reset(void *handle)
291{
292 return false;
293}
294
295static int dm_soft_reset(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
3ee6b26b
AD
301static struct amdgpu_crtc *
302get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 int otg_inst)
4562236b 304{
4a580877 305 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
308
4562236b
HW
309 if (otg_inst == -1) {
310 WARN_ON(1);
311 return adev->mode_info.crtcs[0];
312 }
313
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317 if (amdgpu_crtc->otg_inst == otg_inst)
318 return amdgpu_crtc;
319 }
320
321 return NULL;
322}
323
585d450c
AP
324static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325{
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
330}
331
66b0c973
MK
332static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333{
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336}
337
b8e8c934
HW
338/**
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
341 *
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
344 */
4562236b
HW
345static void dm_pflip_high_irq(void *interrupt_params)
346{
4562236b
HW
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
350 unsigned long flags;
71bbe51a 351 struct drm_pending_vblank_event *e;
71bbe51a
MK
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 bool vrr_active;
4562236b
HW
354
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357 /* IRQ could occur when in initial stage */
1f6010a9 358 /* TODO work and BO cleanup */
4562236b
HW
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 return;
362 }
363
4a580877 364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
365
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
371 amdgpu_crtc);
4a580877 372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
373 return;
374 }
375
71bbe51a
MK
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
4562236b 379
71bbe51a
MK
380 if (!e)
381 WARN_ON(1);
1159898a 382
585d450c 383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
384
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 if (!vrr_active ||
585d450c 387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
393 */
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 395
71bbe51a
MK
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
398 */
399 if (e) {
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
404 }
405 } else if (e) {
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
412 *
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
417 */
418
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
422
4a580877 423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
424 e = NULL;
425 }
4562236b 426
fdd1fe57
MK
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
431 */
5d1c59c4 432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 434
54f5499a 435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 437
71bbe51a
MK
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
4562236b
HW
441}
442
d2574c33
MK
443static void dm_vupdate_high_irq(void *interrupt_params)
444{
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
09aef2c4 448 unsigned long flags;
585d450c 449 int vrr_active;
d2574c33
MK
450
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453 if (acrtc) {
585d450c 454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 455
7f2be468
LP
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 acrtc->crtc_id,
585d450c 458 vrr_active);
d2574c33
MK
459
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
465 */
585d450c 466 if (vrr_active) {
d2574c33 467 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
468
469 /* BTR processing for pre-DCE12 ASICs */
585d450c 470 if (acrtc->dm_irq_params.stream &&
09aef2c4 471 adev->family < AMDGPU_FAMILY_AI) {
4a580877 472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
585d450c
AP
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
477
478 dc_stream_adjust_vmin_vmax(
479 adev->dm.dc,
585d450c
AP
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
483 }
484 }
d2574c33
MK
485 }
486}
487
b8e8c934
HW
488/**
489 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 490 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
491 *
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493 * event handler.
494 */
4562236b
HW
495static void dm_crtc_high_irq(void *interrupt_params)
496{
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
4562236b 499 struct amdgpu_crtc *acrtc;
09aef2c4 500 unsigned long flags;
585d450c 501 int vrr_active;
4562236b 502
b57de80a 503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
504 if (!acrtc)
505 return;
506
585d450c 507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 508
2b5aed9a 509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 510 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 511
2346ef47
NK
512 /**
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
517 */
585d450c 518 if (!vrr_active)
2346ef47
NK
519 drm_crtc_handle_vblank(&acrtc->base);
520
521 /**
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
524 */
16f17eda 525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
526
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
529 return;
16f17eda 530
4a580877 531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 532
585d450c
AP
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 537 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
16f17eda 540
585d450c
AP
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
543 }
544
2b5aed9a
MK
545 /*
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
550 *
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
554 */
2346ef47
NK
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 557 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
558 if (acrtc->event) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 acrtc->event = NULL;
561 drm_crtc_vblank_put(&acrtc->base);
562 }
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 }
565
4a580877 566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
567}
568
4562236b
HW
569static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
571{
572 return 0;
573}
574
575static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
577{
578 return 0;
579}
580
581/* Prototypes of private functions */
582static int dm_early_init(void* handle);
583
a32e24b4 584/* Allocate memory for FBC compressed data */
3e332d3a 585static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 586{
3e332d3a 587 struct drm_device *dev = connector->dev;
1348969a 588 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 589 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
42e67c3b
RL
592 unsigned long max_size = 0;
593
594 if (adev->dm.dc->fbc_compressor == NULL)
595 return;
a32e24b4 596
3e332d3a 597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
598 return;
599
3e332d3a
RL
600 if (compressor->bo_ptr)
601 return;
42e67c3b 602
42e67c3b 603
3e332d3a
RL
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
607 }
608
609 if (max_size) {
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 612 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
613
614 if (r)
42e67c3b
RL
615 DRM_ERROR("DM: Failed to initialize FBC\n");
616 else {
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 }
620
a32e24b4
RL
621 }
622
623}
a32e24b4 624
6ce8f316
NK
625static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
628{
629 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 630 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
634 int ret = 0;
635
636 *enabled = false;
637
638 mutex_lock(&adev->dm.audio_lock);
639
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
644 continue;
645
646 *enabled = true;
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650 break;
651 }
652 drm_connector_list_iter_end(&conn_iter);
653
654 mutex_unlock(&adev->dm.audio_lock);
655
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658 return ret;
659}
660
661static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
663};
664
665static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
667{
668 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 669 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
670 struct drm_audio_component *acomp = data;
671
672 acomp->ops = &amdgpu_dm_audio_component_ops;
673 acomp->dev = kdev;
674 adev->dm.audio_component = acomp;
675
676 return 0;
677}
678
679static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
681{
682 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 683 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
684 struct drm_audio_component *acomp = data;
685
686 acomp->ops = NULL;
687 acomp->dev = NULL;
688 adev->dm.audio_component = NULL;
689}
690
691static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
694};
695
696static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697{
698 int i, ret;
699
700 if (!amdgpu_audio)
701 return 0;
702
703 adev->mode_info.audio.enabled = true;
704
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
717 }
718
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 if (ret < 0)
721 return ret;
722
723 adev->dm.audio_registered = true;
724
725 return 0;
726}
727
728static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729{
730 if (!amdgpu_audio)
731 return;
732
733 if (!adev->mode_info.audio.enabled)
734 return;
735
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
739 }
740
741 /* TODO: Disable audio? */
742
743 adev->mode_info.audio.enabled = false;
744}
745
dfd84d90 746static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
747{
748 struct drm_audio_component *acomp = adev->dm.audio_component;
749
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 pin, -1);
755 }
756}
757
743b9786
NK
758static int dm_dmub_hw_init(struct amdgpu_device *adev)
759{
743b9786
NK
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
770 bool has_hw_support;
771
772 if (!dmub_srv)
773 /* DMUB isn't supported on the ASIC. */
774 return 0;
775
8c7aea40
NK
776 if (!fb_info) {
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
778 return -EINVAL;
779 }
780
743b9786
NK
781 if (!dmub_fw) {
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
784 return -EINVAL;
785 }
786
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 return -EINVAL;
791 }
792
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
795 return 0;
796 }
797
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
743b9786
NK
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 802 PSP_HEADER_BYTES;
743b9786
NK
803
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
807
808 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
ddde28a5
HW
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
818 */
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 fw_inst_const_size);
822 }
823
a576b345
NK
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
827
828 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 adev->bios_size);
831
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
841
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
846
31a7f4bb
HW
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
850
743b9786
NK
851 if (dmcu)
852 hw_params.psp_version = dmcu->psp_version;
853
8c7aea40
NK
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
856
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 return -EINVAL;
861 }
862
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868 /* Init DMCU and ABM if available. */
869 if (dmcu && abm) {
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 }
873
9a71c7d3
NK
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 return -ENOMEM;
878 }
879
743b9786
NK
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
882
883 return 0;
884}
885
e6cd859d 886#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 887static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 888{
c0fb85ae
YZ
889 uint64_t pt_base;
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 894
c0fb85ae
YZ
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 897
c0fb85ae
YZ
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 /*
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
904 */
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 else
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 908
c0fb85ae
YZ
909 agp_base = 0;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 912
c44a22b3 913
c0fb85ae
YZ
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 920
c0fb85ae
YZ
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936 pa_config->is_hvm_enabled = 0;
c44a22b3 937
c44a22b3 938}
e6cd859d 939#endif
c44a22b3 940
c920888c
WL
941#ifdef CONFIG_DEBUG_FS
942static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
943{
944 dm->crc_win_x_start_property =
945 drm_property_create_range(adev_to_drm(dm->adev),
946 DRM_MODE_PROP_ATOMIC,
947 "AMD_CRC_WIN_X_START", 0, U16_MAX);
948 if (!dm->crc_win_x_start_property)
949 return -ENOMEM;
950
951 dm->crc_win_y_start_property =
952 drm_property_create_range(adev_to_drm(dm->adev),
953 DRM_MODE_PROP_ATOMIC,
954 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
955 if (!dm->crc_win_y_start_property)
956 return -ENOMEM;
957
958 dm->crc_win_x_end_property =
959 drm_property_create_range(adev_to_drm(dm->adev),
960 DRM_MODE_PROP_ATOMIC,
961 "AMD_CRC_WIN_X_END", 0, U16_MAX);
962 if (!dm->crc_win_x_end_property)
963 return -ENOMEM;
964
965 dm->crc_win_y_end_property =
966 drm_property_create_range(adev_to_drm(dm->adev),
967 DRM_MODE_PROP_ATOMIC,
968 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
969 if (!dm->crc_win_y_end_property)
970 return -ENOMEM;
971
972 return 0;
973}
974#endif
975
7578ecda 976static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
977{
978 struct dc_init_data init_data;
52704fca
BL
979#ifdef CONFIG_DRM_AMD_DC_HDCP
980 struct dc_callback_init init_params;
981#endif
743b9786 982 int r;
52704fca 983
4a580877 984 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
985 adev->dm.adev = adev;
986
4562236b
HW
987 /* Zero all the fields */
988 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
989#ifdef CONFIG_DRM_AMD_DC_HDCP
990 memset(&init_params, 0, sizeof(init_params));
991#endif
4562236b 992
674e78ac 993 mutex_init(&adev->dm.dc_lock);
6ce8f316 994 mutex_init(&adev->dm.audio_lock);
674e78ac 995
4562236b
HW
996 if(amdgpu_dm_irq_init(adev)) {
997 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
998 goto error;
999 }
1000
1001 init_data.asic_id.chip_family = adev->family;
1002
2dc31ca1 1003 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1004 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1005
770d13b1 1006 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1007 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1008 init_data.asic_id.atombios_base_address =
1009 adev->mode_info.atom_context->bios;
1010
1011 init_data.driver = adev;
1012
1013 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1014
1015 if (!adev->dm.cgs_device) {
1016 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1017 goto error;
1018 }
1019
1020 init_data.cgs_device = adev->dm.cgs_device;
1021
4562236b
HW
1022 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1023
60fb100b
AD
1024 switch (adev->asic_type) {
1025 case CHIP_CARRIZO:
1026 case CHIP_STONEY:
1027 case CHIP_RAVEN:
fe3db437 1028 case CHIP_RENOIR:
6e227308 1029 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1030 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1031 init_data.flags.disable_dmcu = true;
60fb100b 1032 break;
6df9218a
CL
1033#if defined(CONFIG_DRM_AMD_DC_DCN)
1034 case CHIP_VANGOGH:
1035 init_data.flags.gpu_vm_support = true;
1036 break;
1037#endif
60fb100b
AD
1038 default:
1039 break;
1040 }
6e227308 1041
04b94af4
AD
1042 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043 init_data.flags.fbc_support = true;
1044
d99f38ae
AD
1045 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046 init_data.flags.multi_mon_pp_mclk_switch = true;
1047
eaf56410
LL
1048 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049 init_data.flags.disable_fractional_pwm = true;
1050
27eaa492 1051 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1052
48321c3d 1053 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 1054
4562236b
HW
1055 /* Display Core create. */
1056 adev->dm.dc = dc_create(&init_data);
1057
423788c7 1058 if (adev->dm.dc) {
76121231 1059 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1060 } else {
76121231 1061 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1062 goto error;
1063 }
4562236b 1064
8a791dab
HW
1065 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068 }
1069
f99d8762
HW
1070 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072
8a791dab
HW
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074 adev->dm.dc->debug.disable_stutter = true;
1075
1076 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077 adev->dm.dc->debug.disable_dsc = true;
1078
1079 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080 adev->dm.dc->debug.disable_clock_gate = true;
1081
743b9786
NK
1082 r = dm_dmub_hw_init(adev);
1083 if (r) {
1084 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085 goto error;
1086 }
1087
bb6785c1
NK
1088 dc_hardware_init(adev->dm.dc);
1089
0b08c54b 1090#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1091 if (adev->apu_flags) {
e6cd859d
AD
1092 struct dc_phy_addr_space_config pa_config;
1093
0b08c54b 1094 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1095
0b08c54b
YZ
1096 // Call the DC init_memory func
1097 dc_setup_system_context(adev->dm.dc, &pa_config);
1098 }
1099#endif
c0fb85ae 1100
4562236b
HW
1101 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102 if (!adev->dm.freesync_module) {
1103 DRM_ERROR(
1104 "amdgpu: failed to initialize freesync_module.\n");
1105 } else
f1ad2f5e 1106 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1107 adev->dm.freesync_module);
1108
e277adc5
LSL
1109 amdgpu_dm_init_color_mod();
1110
52704fca 1111#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1112 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1113 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1114
96a3b32e
BL
1115 if (!adev->dm.hdcp_workqueue)
1116 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117 else
1118 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1119
96a3b32e
BL
1120 dc_init_callbacks(adev->dm.dc, &init_params);
1121 }
c920888c
WL
1122#endif
1123#ifdef CONFIG_DEBUG_FS
1124 if (create_crtc_crc_properties(&adev->dm))
1125 DRM_ERROR("amdgpu: failed to create crc property.\n");
52704fca 1126#endif
4562236b
HW
1127 if (amdgpu_dm_initialize_drm_device(adev)) {
1128 DRM_ERROR(
1129 "amdgpu: failed to initialize sw for display support.\n");
1130 goto error;
1131 }
1132
f74367e4
AD
1133 /* create fake encoders for MST */
1134 dm_dp_create_fake_mst_encoders(adev);
1135
4562236b
HW
1136 /* TODO: Add_display_info? */
1137
1138 /* TODO use dynamic cursor width */
4a580877
LT
1139 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1140 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1141
4a580877 1142 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1143 DRM_ERROR(
1144 "amdgpu: failed to initialize sw for display support.\n");
1145 goto error;
1146 }
1147
c0fb85ae 1148
f1ad2f5e 1149 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1150
1151 return 0;
1152error:
1153 amdgpu_dm_fini(adev);
1154
59d0f396 1155 return -EINVAL;
4562236b
HW
1156}
1157
7578ecda 1158static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1159{
f74367e4
AD
1160 int i;
1161
1162 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1163 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1164 }
1165
6ce8f316
NK
1166 amdgpu_dm_audio_fini(adev);
1167
4562236b 1168 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1169
52704fca
BL
1170#ifdef CONFIG_DRM_AMD_DC_HDCP
1171 if (adev->dm.hdcp_workqueue) {
1172 hdcp_destroy(adev->dm.hdcp_workqueue);
1173 adev->dm.hdcp_workqueue = NULL;
1174 }
1175
1176 if (adev->dm.dc)
1177 dc_deinit_callbacks(adev->dm.dc);
1178#endif
9a71c7d3
NK
1179 if (adev->dm.dc->ctx->dmub_srv) {
1180 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1181 adev->dm.dc->ctx->dmub_srv = NULL;
1182 }
1183
743b9786
NK
1184 if (adev->dm.dmub_bo)
1185 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1186 &adev->dm.dmub_bo_gpu_addr,
1187 &adev->dm.dmub_bo_cpu_addr);
52704fca 1188
c8bdf2b6
ED
1189 /* DC Destroy TODO: Replace destroy DAL */
1190 if (adev->dm.dc)
1191 dc_destroy(&adev->dm.dc);
4562236b
HW
1192 /*
1193 * TODO: pageflip, vlank interrupt
1194 *
1195 * amdgpu_dm_irq_fini(adev);
1196 */
1197
1198 if (adev->dm.cgs_device) {
1199 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1200 adev->dm.cgs_device = NULL;
1201 }
1202 if (adev->dm.freesync_module) {
1203 mod_freesync_destroy(adev->dm.freesync_module);
1204 adev->dm.freesync_module = NULL;
1205 }
674e78ac 1206
6ce8f316 1207 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1208 mutex_destroy(&adev->dm.dc_lock);
1209
4562236b
HW
1210 return;
1211}
1212
a94d5569 1213static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1214{
a7669aff 1215 const char *fw_name_dmcu = NULL;
a94d5569
DF
1216 int r;
1217 const struct dmcu_firmware_header_v1_0 *hdr;
1218
1219 switch(adev->asic_type) {
55e56389
MR
1220#if defined(CONFIG_DRM_AMD_DC_SI)
1221 case CHIP_TAHITI:
1222 case CHIP_PITCAIRN:
1223 case CHIP_VERDE:
1224 case CHIP_OLAND:
1225#endif
a94d5569
DF
1226 case CHIP_BONAIRE:
1227 case CHIP_HAWAII:
1228 case CHIP_KAVERI:
1229 case CHIP_KABINI:
1230 case CHIP_MULLINS:
1231 case CHIP_TONGA:
1232 case CHIP_FIJI:
1233 case CHIP_CARRIZO:
1234 case CHIP_STONEY:
1235 case CHIP_POLARIS11:
1236 case CHIP_POLARIS10:
1237 case CHIP_POLARIS12:
1238 case CHIP_VEGAM:
1239 case CHIP_VEGA10:
1240 case CHIP_VEGA12:
1241 case CHIP_VEGA20:
476e955d 1242 case CHIP_NAVI10:
baebcf2e 1243 case CHIP_NAVI14:
30221ad8 1244 case CHIP_RENOIR:
79037324 1245 case CHIP_SIENNA_CICHLID:
a6c5308f 1246 case CHIP_NAVY_FLOUNDER:
2a411205 1247 case CHIP_DIMGREY_CAVEFISH:
469989ca 1248 case CHIP_VANGOGH:
a94d5569 1249 return 0;
5ea23931
RL
1250 case CHIP_NAVI12:
1251 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1252 break;
a94d5569 1253 case CHIP_RAVEN:
a7669aff
HW
1254 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1255 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1256 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1257 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1258 else
a7669aff 1259 return 0;
a94d5569
DF
1260 break;
1261 default:
1262 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1263 return -EINVAL;
a94d5569
DF
1264 }
1265
1266 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1267 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1268 return 0;
1269 }
1270
1271 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1272 if (r == -ENOENT) {
1273 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1274 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1275 adev->dm.fw_dmcu = NULL;
1276 return 0;
1277 }
1278 if (r) {
1279 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1280 fw_name_dmcu);
1281 return r;
1282 }
1283
1284 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1285 if (r) {
1286 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1287 fw_name_dmcu);
1288 release_firmware(adev->dm.fw_dmcu);
1289 adev->dm.fw_dmcu = NULL;
1290 return r;
1291 }
1292
1293 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1294 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1295 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1296 adev->firmware.fw_size +=
1297 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1298
1299 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1301 adev->firmware.fw_size +=
1302 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1303
ee6e89c0
DF
1304 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1305
a94d5569
DF
1306 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1307
4562236b
HW
1308 return 0;
1309}
1310
743b9786
NK
1311static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1312{
1313 struct amdgpu_device *adev = ctx;
1314
1315 return dm_read_reg(adev->dm.dc->ctx, address);
1316}
1317
1318static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1319 uint32_t value)
1320{
1321 struct amdgpu_device *adev = ctx;
1322
1323 return dm_write_reg(adev->dm.dc->ctx, address, value);
1324}
1325
1326static int dm_dmub_sw_init(struct amdgpu_device *adev)
1327{
1328 struct dmub_srv_create_params create_params;
8c7aea40
NK
1329 struct dmub_srv_region_params region_params;
1330 struct dmub_srv_region_info region_info;
1331 struct dmub_srv_fb_params fb_params;
1332 struct dmub_srv_fb_info *fb_info;
1333 struct dmub_srv *dmub_srv;
743b9786
NK
1334 const struct dmcub_firmware_header_v1_0 *hdr;
1335 const char *fw_name_dmub;
1336 enum dmub_asic dmub_asic;
1337 enum dmub_status status;
1338 int r;
1339
1340 switch (adev->asic_type) {
1341 case CHIP_RENOIR:
1342 dmub_asic = DMUB_ASIC_DCN21;
1343 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1344 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1345 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1346 break;
79037324
BL
1347 case CHIP_SIENNA_CICHLID:
1348 dmub_asic = DMUB_ASIC_DCN30;
1349 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1350 break;
5ce868fc
BL
1351 case CHIP_NAVY_FLOUNDER:
1352 dmub_asic = DMUB_ASIC_DCN30;
1353 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1354 break;
469989ca
RL
1355 case CHIP_VANGOGH:
1356 dmub_asic = DMUB_ASIC_DCN301;
1357 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1358 break;
2a411205
BL
1359 case CHIP_DIMGREY_CAVEFISH:
1360 dmub_asic = DMUB_ASIC_DCN302;
1361 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1362 break;
743b9786
NK
1363
1364 default:
1365 /* ASIC doesn't support DMUB. */
1366 return 0;
1367 }
1368
743b9786
NK
1369 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1370 if (r) {
1371 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1372 return 0;
1373 }
1374
1375 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1376 if (r) {
1377 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1378 return 0;
1379 }
1380
743b9786 1381 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1382
9a6ed547
NK
1383 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1384 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1385 AMDGPU_UCODE_ID_DMCUB;
1386 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1387 adev->dm.dmub_fw;
1388 adev->firmware.fw_size +=
1389 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1390
9a6ed547
NK
1391 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1392 adev->dm.dmcub_fw_version);
1393 }
1394
1395 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1396
8c7aea40
NK
1397 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1398 dmub_srv = adev->dm.dmub_srv;
1399
1400 if (!dmub_srv) {
1401 DRM_ERROR("Failed to allocate DMUB service!\n");
1402 return -ENOMEM;
1403 }
1404
1405 memset(&create_params, 0, sizeof(create_params));
1406 create_params.user_ctx = adev;
1407 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1408 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1409 create_params.asic = dmub_asic;
1410
1411 /* Create the DMUB service. */
1412 status = dmub_srv_create(dmub_srv, &create_params);
1413 if (status != DMUB_STATUS_OK) {
1414 DRM_ERROR("Error creating DMUB service: %d\n", status);
1415 return -EINVAL;
1416 }
1417
1418 /* Calculate the size of all the regions for the DMUB service. */
1419 memset(&region_params, 0, sizeof(region_params));
1420
1421 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1422 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1423 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1424 region_params.vbios_size = adev->bios_size;
0922b899 1425 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1426 adev->dm.dmub_fw->data +
1427 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1428 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1429 region_params.fw_inst_const =
1430 adev->dm.dmub_fw->data +
1431 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1432 PSP_HEADER_BYTES;
8c7aea40
NK
1433
1434 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1435 &region_info);
1436
1437 if (status != DMUB_STATUS_OK) {
1438 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1439 return -EINVAL;
1440 }
1441
1442 /*
1443 * Allocate a framebuffer based on the total size of all the regions.
1444 * TODO: Move this into GART.
1445 */
1446 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1447 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1448 &adev->dm.dmub_bo_gpu_addr,
1449 &adev->dm.dmub_bo_cpu_addr);
1450 if (r)
1451 return r;
1452
1453 /* Rebase the regions on the framebuffer address. */
1454 memset(&fb_params, 0, sizeof(fb_params));
1455 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1456 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1457 fb_params.region_info = &region_info;
1458
1459 adev->dm.dmub_fb_info =
1460 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1461 fb_info = adev->dm.dmub_fb_info;
1462
1463 if (!fb_info) {
1464 DRM_ERROR(
1465 "Failed to allocate framebuffer info for DMUB service!\n");
1466 return -ENOMEM;
1467 }
1468
1469 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1470 if (status != DMUB_STATUS_OK) {
1471 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1472 return -EINVAL;
1473 }
1474
743b9786
NK
1475 return 0;
1476}
1477
a94d5569
DF
1478static int dm_sw_init(void *handle)
1479{
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1481 int r;
1482
1483 r = dm_dmub_sw_init(adev);
1484 if (r)
1485 return r;
a94d5569
DF
1486
1487 return load_dmcu_fw(adev);
1488}
1489
4562236b
HW
1490static int dm_sw_fini(void *handle)
1491{
a94d5569
DF
1492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493
8c7aea40
NK
1494 kfree(adev->dm.dmub_fb_info);
1495 adev->dm.dmub_fb_info = NULL;
1496
743b9786
NK
1497 if (adev->dm.dmub_srv) {
1498 dmub_srv_destroy(adev->dm.dmub_srv);
1499 adev->dm.dmub_srv = NULL;
1500 }
1501
75e1658e
ND
1502 release_firmware(adev->dm.dmub_fw);
1503 adev->dm.dmub_fw = NULL;
743b9786 1504
75e1658e
ND
1505 release_firmware(adev->dm.fw_dmcu);
1506 adev->dm.fw_dmcu = NULL;
a94d5569 1507
4562236b
HW
1508 return 0;
1509}
1510
7abcf6b5 1511static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1512{
c84dec2f 1513 struct amdgpu_dm_connector *aconnector;
4562236b 1514 struct drm_connector *connector;
f8d2d39e 1515 struct drm_connector_list_iter iter;
7abcf6b5 1516 int ret = 0;
4562236b 1517
f8d2d39e
LP
1518 drm_connector_list_iter_begin(dev, &iter);
1519 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1520 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1521 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1522 aconnector->mst_mgr.aux) {
f1ad2f5e 1523 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1524 aconnector,
1525 aconnector->base.base.id);
7abcf6b5
AG
1526
1527 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1528 if (ret < 0) {
1529 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1530 aconnector->dc_link->type =
1531 dc_connection_single;
1532 break;
7abcf6b5 1533 }
f8d2d39e 1534 }
4562236b 1535 }
f8d2d39e 1536 drm_connector_list_iter_end(&iter);
4562236b 1537
7abcf6b5
AG
1538 return ret;
1539}
1540
1541static int dm_late_init(void *handle)
1542{
42e67c3b 1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1544
bbf854dc
DF
1545 struct dmcu_iram_parameters params;
1546 unsigned int linear_lut[16];
1547 int i;
17bdb4a8 1548 struct dmcu *dmcu = NULL;
5cb32419 1549 bool ret = true;
bbf854dc 1550
17bdb4a8
JFZ
1551 dmcu = adev->dm.dc->res_pool->dmcu;
1552
bbf854dc
DF
1553 for (i = 0; i < 16; i++)
1554 linear_lut[i] = 0xFFFF * i / 15;
1555
1556 params.set = 0;
1557 params.backlight_ramping_start = 0xCCCC;
1558 params.backlight_ramping_reduction = 0xCCCCCCCC;
1559 params.backlight_lut_array_size = 16;
1560 params.backlight_lut_array = linear_lut;
1561
2ad0cdf9
AK
1562 /* Min backlight level after ABM reduction, Don't allow below 1%
1563 * 0xFFFF x 0.01 = 0x28F
1564 */
1565 params.min_abm_backlight = 0x28F;
1566
5cb32419
RL
1567 /* In the case where abm is implemented on dmcub,
1568 * dmcu object will be null.
1569 * ABM 2.4 and up are implemented on dmcub.
1570 */
1571 if (dmcu)
1572 ret = dmcu_load_iram(dmcu, params);
1573 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1574 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1575
14ed1c90
HW
1576 if (!ret)
1577 return -EINVAL;
bbf854dc 1578
4a580877 1579 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1580}
1581
1582static void s3_handle_mst(struct drm_device *dev, bool suspend)
1583{
c84dec2f 1584 struct amdgpu_dm_connector *aconnector;
4562236b 1585 struct drm_connector *connector;
f8d2d39e 1586 struct drm_connector_list_iter iter;
fe7553be
LP
1587 struct drm_dp_mst_topology_mgr *mgr;
1588 int ret;
1589 bool need_hotplug = false;
4562236b 1590
f8d2d39e
LP
1591 drm_connector_list_iter_begin(dev, &iter);
1592 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1593 aconnector = to_amdgpu_dm_connector(connector);
1594 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1595 aconnector->mst_port)
1596 continue;
1597
1598 mgr = &aconnector->mst_mgr;
1599
1600 if (suspend) {
1601 drm_dp_mst_topology_mgr_suspend(mgr);
1602 } else {
6f85f738 1603 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1604 if (ret < 0) {
1605 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1606 need_hotplug = true;
1607 }
1608 }
4562236b 1609 }
f8d2d39e 1610 drm_connector_list_iter_end(&iter);
fe7553be
LP
1611
1612 if (need_hotplug)
1613 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1614}
1615
9340dfd3
HW
1616static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1617{
1618 struct smu_context *smu = &adev->smu;
1619 int ret = 0;
1620
1621 if (!is_support_sw_smu(adev))
1622 return 0;
1623
1624 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1625 * on window driver dc implementation.
1626 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1627 * should be passed to smu during boot up and resume from s3.
1628 * boot up: dc calculate dcn watermark clock settings within dc_create,
1629 * dcn20_resource_construct
1630 * then call pplib functions below to pass the settings to smu:
1631 * smu_set_watermarks_for_clock_ranges
1632 * smu_set_watermarks_table
1633 * navi10_set_watermarks_table
1634 * smu_write_watermarks_table
1635 *
1636 * For Renoir, clock settings of dcn watermark are also fixed values.
1637 * dc has implemented different flow for window driver:
1638 * dc_hardware_init / dc_set_power_state
1639 * dcn10_init_hw
1640 * notify_wm_ranges
1641 * set_wm_ranges
1642 * -- Linux
1643 * smu_set_watermarks_for_clock_ranges
1644 * renoir_set_watermarks_table
1645 * smu_write_watermarks_table
1646 *
1647 * For Linux,
1648 * dc_hardware_init -> amdgpu_dm_init
1649 * dc_set_power_state --> dm_resume
1650 *
1651 * therefore, this function apply to navi10/12/14 but not Renoir
1652 * *
1653 */
1654 switch(adev->asic_type) {
1655 case CHIP_NAVI10:
1656 case CHIP_NAVI14:
1657 case CHIP_NAVI12:
1658 break;
1659 default:
1660 return 0;
1661 }
1662
e7a95eea
EQ
1663 ret = smu_write_watermarks_table(smu);
1664 if (ret) {
1665 DRM_ERROR("Failed to update WMTABLE!\n");
1666 return ret;
9340dfd3
HW
1667 }
1668
9340dfd3
HW
1669 return 0;
1670}
1671
b8592b48
LL
1672/**
1673 * dm_hw_init() - Initialize DC device
28d687ea 1674 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1675 *
1676 * Initialize the &struct amdgpu_display_manager device. This involves calling
1677 * the initializers of each DM component, then populating the struct with them.
1678 *
1679 * Although the function implies hardware initialization, both hardware and
1680 * software are initialized here. Splitting them out to their relevant init
1681 * hooks is a future TODO item.
1682 *
1683 * Some notable things that are initialized here:
1684 *
1685 * - Display Core, both software and hardware
1686 * - DC modules that we need (freesync and color management)
1687 * - DRM software states
1688 * - Interrupt sources and handlers
1689 * - Vblank support
1690 * - Debug FS entries, if enabled
1691 */
4562236b
HW
1692static int dm_hw_init(void *handle)
1693{
1694 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695 /* Create DAL display manager */
1696 amdgpu_dm_init(adev);
4562236b
HW
1697 amdgpu_dm_hpd_init(adev);
1698
4562236b
HW
1699 return 0;
1700}
1701
b8592b48
LL
1702/**
1703 * dm_hw_fini() - Teardown DC device
28d687ea 1704 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1705 *
1706 * Teardown components within &struct amdgpu_display_manager that require
1707 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1708 * were loaded. Also flush IRQ workqueues and disable them.
1709 */
4562236b
HW
1710static int dm_hw_fini(void *handle)
1711{
1712 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1713
1714 amdgpu_dm_hpd_fini(adev);
1715
1716 amdgpu_dm_irq_fini(adev);
21de3396 1717 amdgpu_dm_fini(adev);
4562236b
HW
1718 return 0;
1719}
1720
cdaae837
BL
1721
1722static int dm_enable_vblank(struct drm_crtc *crtc);
1723static void dm_disable_vblank(struct drm_crtc *crtc);
1724
1725static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1726 struct dc_state *state, bool enable)
1727{
1728 enum dc_irq_source irq_source;
1729 struct amdgpu_crtc *acrtc;
1730 int rc = -EBUSY;
1731 int i = 0;
1732
1733 for (i = 0; i < state->stream_count; i++) {
1734 acrtc = get_crtc_by_otg_inst(
1735 adev, state->stream_status[i].primary_otg_inst);
1736
1737 if (acrtc && state->stream_status[i].plane_count != 0) {
1738 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1739 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1740 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1741 acrtc->crtc_id, enable ? "en" : "dis", rc);
1742 if (rc)
1743 DRM_WARN("Failed to %s pflip interrupts\n",
1744 enable ? "enable" : "disable");
1745
1746 if (enable) {
1747 rc = dm_enable_vblank(&acrtc->base);
1748 if (rc)
1749 DRM_WARN("Failed to enable vblank interrupts\n");
1750 } else {
1751 dm_disable_vblank(&acrtc->base);
1752 }
1753
1754 }
1755 }
1756
1757}
1758
dfd84d90 1759static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1760{
1761 struct dc_state *context = NULL;
1762 enum dc_status res = DC_ERROR_UNEXPECTED;
1763 int i;
1764 struct dc_stream_state *del_streams[MAX_PIPES];
1765 int del_streams_count = 0;
1766
1767 memset(del_streams, 0, sizeof(del_streams));
1768
1769 context = dc_create_state(dc);
1770 if (context == NULL)
1771 goto context_alloc_fail;
1772
1773 dc_resource_state_copy_construct_current(dc, context);
1774
1775 /* First remove from context all streams */
1776 for (i = 0; i < context->stream_count; i++) {
1777 struct dc_stream_state *stream = context->streams[i];
1778
1779 del_streams[del_streams_count++] = stream;
1780 }
1781
1782 /* Remove all planes for removed streams and then remove the streams */
1783 for (i = 0; i < del_streams_count; i++) {
1784 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1785 res = DC_FAIL_DETACH_SURFACES;
1786 goto fail;
1787 }
1788
1789 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1790 if (res != DC_OK)
1791 goto fail;
1792 }
1793
1794
1795 res = dc_validate_global_state(dc, context, false);
1796
1797 if (res != DC_OK) {
1798 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1799 goto fail;
1800 }
1801
1802 res = dc_commit_state(dc, context);
1803
1804fail:
1805 dc_release_state(context);
1806
1807context_alloc_fail:
1808 return res;
1809}
1810
4562236b
HW
1811static int dm_suspend(void *handle)
1812{
1813 struct amdgpu_device *adev = handle;
1814 struct amdgpu_display_manager *dm = &adev->dm;
1815 int ret = 0;
4562236b 1816
53b3f8f4 1817 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1818 mutex_lock(&dm->dc_lock);
1819 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1820
1821 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1822
1823 amdgpu_dm_commit_zero_streams(dm->dc);
1824
1825 amdgpu_dm_irq_suspend(adev);
1826
1827 return ret;
1828 }
4562236b 1829
d2f0b53b 1830 WARN_ON(adev->dm.cached_state);
4a580877 1831 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1832
4a580877 1833 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1834
4562236b
HW
1835 amdgpu_dm_irq_suspend(adev);
1836
a3621485 1837
32f5062d 1838 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1839
1c2075d4 1840 return 0;
4562236b
HW
1841}
1842
1daf8c63
AD
1843static struct amdgpu_dm_connector *
1844amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1845 struct drm_crtc *crtc)
4562236b
HW
1846{
1847 uint32_t i;
c2cea706 1848 struct drm_connector_state *new_con_state;
4562236b
HW
1849 struct drm_connector *connector;
1850 struct drm_crtc *crtc_from_state;
1851
c2cea706
LSL
1852 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1853 crtc_from_state = new_con_state->crtc;
4562236b
HW
1854
1855 if (crtc_from_state == crtc)
c84dec2f 1856 return to_amdgpu_dm_connector(connector);
4562236b
HW
1857 }
1858
1859 return NULL;
1860}
1861
fbbdadf2
BL
1862static void emulated_link_detect(struct dc_link *link)
1863{
1864 struct dc_sink_init_data sink_init_data = { 0 };
1865 struct display_sink_capability sink_caps = { 0 };
1866 enum dc_edid_status edid_status;
1867 struct dc_context *dc_ctx = link->ctx;
1868 struct dc_sink *sink = NULL;
1869 struct dc_sink *prev_sink = NULL;
1870
1871 link->type = dc_connection_none;
1872 prev_sink = link->local_sink;
1873
1874 if (prev_sink != NULL)
1875 dc_sink_retain(prev_sink);
1876
1877 switch (link->connector_signal) {
1878 case SIGNAL_TYPE_HDMI_TYPE_A: {
1879 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1880 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1881 break;
1882 }
1883
1884 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1885 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1886 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1887 break;
1888 }
1889
1890 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1891 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1892 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1893 break;
1894 }
1895
1896 case SIGNAL_TYPE_LVDS: {
1897 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1898 sink_caps.signal = SIGNAL_TYPE_LVDS;
1899 break;
1900 }
1901
1902 case SIGNAL_TYPE_EDP: {
1903 sink_caps.transaction_type =
1904 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1905 sink_caps.signal = SIGNAL_TYPE_EDP;
1906 break;
1907 }
1908
1909 case SIGNAL_TYPE_DISPLAY_PORT: {
1910 sink_caps.transaction_type =
1911 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1912 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1913 break;
1914 }
1915
1916 default:
1917 DC_ERROR("Invalid connector type! signal:%d\n",
1918 link->connector_signal);
1919 return;
1920 }
1921
1922 sink_init_data.link = link;
1923 sink_init_data.sink_signal = sink_caps.signal;
1924
1925 sink = dc_sink_create(&sink_init_data);
1926 if (!sink) {
1927 DC_ERROR("Failed to create sink!\n");
1928 return;
1929 }
1930
dcd5fb82 1931 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1932 link->local_sink = sink;
1933
1934 edid_status = dm_helpers_read_local_edid(
1935 link->ctx,
1936 link,
1937 sink);
1938
1939 if (edid_status != EDID_OK)
1940 DC_ERROR("Failed to read EDID");
1941
1942}
1943
cdaae837
BL
1944static void dm_gpureset_commit_state(struct dc_state *dc_state,
1945 struct amdgpu_display_manager *dm)
1946{
1947 struct {
1948 struct dc_surface_update surface_updates[MAX_SURFACES];
1949 struct dc_plane_info plane_infos[MAX_SURFACES];
1950 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1951 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1952 struct dc_stream_update stream_update;
1953 } * bundle;
1954 int k, m;
1955
1956 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1957
1958 if (!bundle) {
1959 dm_error("Failed to allocate update bundle\n");
1960 goto cleanup;
1961 }
1962
1963 for (k = 0; k < dc_state->stream_count; k++) {
1964 bundle->stream_update.stream = dc_state->streams[k];
1965
1966 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1967 bundle->surface_updates[m].surface =
1968 dc_state->stream_status->plane_states[m];
1969 bundle->surface_updates[m].surface->force_full_update =
1970 true;
1971 }
1972 dc_commit_updates_for_stream(
1973 dm->dc, bundle->surface_updates,
1974 dc_state->stream_status->plane_count,
1975 dc_state->streams[k], &bundle->stream_update, dc_state);
1976 }
1977
1978cleanup:
1979 kfree(bundle);
1980
1981 return;
1982}
1983
3c4d55c9
AP
1984static void dm_set_dpms_off(struct dc_link *link)
1985{
1986 struct dc_stream_state *stream_state;
1987 struct amdgpu_dm_connector *aconnector = link->priv;
1988 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1989 struct dc_stream_update stream_update;
1990 bool dpms_off = true;
1991
1992 memset(&stream_update, 0, sizeof(stream_update));
1993 stream_update.dpms_off = &dpms_off;
1994
1995 mutex_lock(&adev->dm.dc_lock);
1996 stream_state = dc_stream_find_from_link(link);
1997
1998 if (stream_state == NULL) {
1999 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2000 mutex_unlock(&adev->dm.dc_lock);
2001 return;
2002 }
2003
2004 stream_update.stream = stream_state;
2005 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2006 stream_state, &stream_update,
2007 stream_state->ctx->dc->current_state);
2008 mutex_unlock(&adev->dm.dc_lock);
2009}
2010
4562236b
HW
2011static int dm_resume(void *handle)
2012{
2013 struct amdgpu_device *adev = handle;
4a580877 2014 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2015 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2016 struct amdgpu_dm_connector *aconnector;
4562236b 2017 struct drm_connector *connector;
f8d2d39e 2018 struct drm_connector_list_iter iter;
4562236b 2019 struct drm_crtc *crtc;
c2cea706 2020 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2021 struct dm_crtc_state *dm_new_crtc_state;
2022 struct drm_plane *plane;
2023 struct drm_plane_state *new_plane_state;
2024 struct dm_plane_state *dm_new_plane_state;
113b7a01 2025 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2026 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2027 struct dc_state *dc_state;
2028 int i, r, j;
4562236b 2029
53b3f8f4 2030 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2031 dc_state = dm->cached_dc_state;
2032
2033 r = dm_dmub_hw_init(adev);
2034 if (r)
2035 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2036
2037 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2038 dc_resume(dm->dc);
2039
2040 amdgpu_dm_irq_resume_early(adev);
2041
2042 for (i = 0; i < dc_state->stream_count; i++) {
2043 dc_state->streams[i]->mode_changed = true;
2044 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2045 dc_state->stream_status->plane_states[j]->update_flags.raw
2046 = 0xffffffff;
2047 }
2048 }
2049
2050 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2051
cdaae837
BL
2052 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2053
2054 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2055
2056 dc_release_state(dm->cached_dc_state);
2057 dm->cached_dc_state = NULL;
2058
2059 amdgpu_dm_irq_resume_late(adev);
2060
2061 mutex_unlock(&dm->dc_lock);
2062
2063 return 0;
2064 }
113b7a01
LL
2065 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2066 dc_release_state(dm_state->context);
2067 dm_state->context = dc_create_state(dm->dc);
2068 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2069 dc_resource_state_construct(dm->dc, dm_state->context);
2070
8c7aea40
NK
2071 /* Before powering on DC we need to re-initialize DMUB. */
2072 r = dm_dmub_hw_init(adev);
2073 if (r)
2074 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2075
a80aa93d
ML
2076 /* power on hardware */
2077 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2078
4562236b
HW
2079 /* program HPD filter */
2080 dc_resume(dm->dc);
2081
4562236b
HW
2082 /*
2083 * early enable HPD Rx IRQ, should be done before set mode as short
2084 * pulse interrupts are used for MST
2085 */
2086 amdgpu_dm_irq_resume_early(adev);
2087
d20ebea8 2088 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2089 s3_handle_mst(ddev, false);
2090
4562236b 2091 /* Do detection*/
f8d2d39e
LP
2092 drm_connector_list_iter_begin(ddev, &iter);
2093 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2094 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2095
2096 /*
2097 * this is the case when traversing through already created
2098 * MST connectors, should be skipped
2099 */
2100 if (aconnector->mst_port)
2101 continue;
2102
03ea364c 2103 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2104 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2105 DRM_ERROR("KMS: Failed to detect connector\n");
2106
2107 if (aconnector->base.force && new_connection_type == dc_connection_none)
2108 emulated_link_detect(aconnector->dc_link);
2109 else
2110 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2111
2112 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2113 aconnector->fake_enable = false;
2114
dcd5fb82
MF
2115 if (aconnector->dc_sink)
2116 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2117 aconnector->dc_sink = NULL;
2118 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2119 mutex_unlock(&aconnector->hpd_lock);
4562236b 2120 }
f8d2d39e 2121 drm_connector_list_iter_end(&iter);
4562236b 2122
1f6010a9 2123 /* Force mode set in atomic commit */
a80aa93d 2124 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2125 new_crtc_state->active_changed = true;
4f346e65 2126
fcb4019e
LSL
2127 /*
2128 * atomic_check is expected to create the dc states. We need to release
2129 * them here, since they were duplicated as part of the suspend
2130 * procedure.
2131 */
a80aa93d 2132 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2133 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2134 if (dm_new_crtc_state->stream) {
2135 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2136 dc_stream_release(dm_new_crtc_state->stream);
2137 dm_new_crtc_state->stream = NULL;
2138 }
2139 }
2140
a80aa93d 2141 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2142 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2143 if (dm_new_plane_state->dc_state) {
2144 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2145 dc_plane_state_release(dm_new_plane_state->dc_state);
2146 dm_new_plane_state->dc_state = NULL;
2147 }
2148 }
2149
2d1af6a1 2150 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2151
a80aa93d 2152 dm->cached_state = NULL;
0a214e2f 2153
9faa4237 2154 amdgpu_dm_irq_resume_late(adev);
4562236b 2155
9340dfd3
HW
2156 amdgpu_dm_smu_write_watermarks_table(adev);
2157
2d1af6a1 2158 return 0;
4562236b
HW
2159}
2160
b8592b48
LL
2161/**
2162 * DOC: DM Lifecycle
2163 *
2164 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2165 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2166 * the base driver's device list to be initialized and torn down accordingly.
2167 *
2168 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2169 */
2170
4562236b
HW
2171static const struct amd_ip_funcs amdgpu_dm_funcs = {
2172 .name = "dm",
2173 .early_init = dm_early_init,
7abcf6b5 2174 .late_init = dm_late_init,
4562236b
HW
2175 .sw_init = dm_sw_init,
2176 .sw_fini = dm_sw_fini,
2177 .hw_init = dm_hw_init,
2178 .hw_fini = dm_hw_fini,
2179 .suspend = dm_suspend,
2180 .resume = dm_resume,
2181 .is_idle = dm_is_idle,
2182 .wait_for_idle = dm_wait_for_idle,
2183 .check_soft_reset = dm_check_soft_reset,
2184 .soft_reset = dm_soft_reset,
2185 .set_clockgating_state = dm_set_clockgating_state,
2186 .set_powergating_state = dm_set_powergating_state,
2187};
2188
2189const struct amdgpu_ip_block_version dm_ip_block =
2190{
2191 .type = AMD_IP_BLOCK_TYPE_DCE,
2192 .major = 1,
2193 .minor = 0,
2194 .rev = 0,
2195 .funcs = &amdgpu_dm_funcs,
2196};
2197
ca3268c4 2198
b8592b48
LL
2199/**
2200 * DOC: atomic
2201 *
2202 * *WIP*
2203 */
0a323b84 2204
b3663f70 2205static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2206 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2207 .get_format_info = amd_get_format_info,
366c1baa 2208 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2209 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2210 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2211};
2212
2213static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2214 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2215};
2216
94562810
RS
2217static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2218{
2219 u32 max_cll, min_cll, max, min, q, r;
2220 struct amdgpu_dm_backlight_caps *caps;
2221 struct amdgpu_display_manager *dm;
2222 struct drm_connector *conn_base;
2223 struct amdgpu_device *adev;
ec11fe37 2224 struct dc_link *link = NULL;
94562810
RS
2225 static const u8 pre_computed_values[] = {
2226 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2227 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2228
2229 if (!aconnector || !aconnector->dc_link)
2230 return;
2231
ec11fe37 2232 link = aconnector->dc_link;
2233 if (link->connector_signal != SIGNAL_TYPE_EDP)
2234 return;
2235
94562810 2236 conn_base = &aconnector->base;
1348969a 2237 adev = drm_to_adev(conn_base->dev);
94562810
RS
2238 dm = &adev->dm;
2239 caps = &dm->backlight_caps;
2240 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2241 caps->aux_support = false;
2242 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2243 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2244
2245 if (caps->ext_caps->bits.oled == 1 ||
2246 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2247 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2248 caps->aux_support = true;
2249
2250 /* From the specification (CTA-861-G), for calculating the maximum
2251 * luminance we need to use:
2252 * Luminance = 50*2**(CV/32)
2253 * Where CV is a one-byte value.
2254 * For calculating this expression we may need float point precision;
2255 * to avoid this complexity level, we take advantage that CV is divided
2256 * by a constant. From the Euclids division algorithm, we know that CV
2257 * can be written as: CV = 32*q + r. Next, we replace CV in the
2258 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2259 * need to pre-compute the value of r/32. For pre-computing the values
2260 * We just used the following Ruby line:
2261 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2262 * The results of the above expressions can be verified at
2263 * pre_computed_values.
2264 */
2265 q = max_cll >> 5;
2266 r = max_cll % 32;
2267 max = (1 << q) * pre_computed_values[r];
2268
2269 // min luminance: maxLum * (CV/255)^2 / 100
2270 q = DIV_ROUND_CLOSEST(min_cll, 255);
2271 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2272
2273 caps->aux_max_input_signal = max;
2274 caps->aux_min_input_signal = min;
2275}
2276
97e51c16
HW
2277void amdgpu_dm_update_connector_after_detect(
2278 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2279{
2280 struct drm_connector *connector = &aconnector->base;
2281 struct drm_device *dev = connector->dev;
b73a22d3 2282 struct dc_sink *sink;
4562236b
HW
2283
2284 /* MST handled by drm_mst framework */
2285 if (aconnector->mst_mgr.mst_state == true)
2286 return;
2287
4562236b 2288 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2289 if (sink)
2290 dc_sink_retain(sink);
4562236b 2291
1f6010a9
DF
2292 /*
2293 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2294 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2295 * Skip if already done during boot.
4562236b
HW
2296 */
2297 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2298 && aconnector->dc_em_sink) {
2299
1f6010a9
DF
2300 /*
2301 * For S3 resume with headless use eml_sink to fake stream
2302 * because on resume connector->sink is set to NULL
4562236b
HW
2303 */
2304 mutex_lock(&dev->mode_config.mutex);
2305
2306 if (sink) {
922aa1e1 2307 if (aconnector->dc_sink) {
98e6436d 2308 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2309 /*
2310 * retain and release below are used to
2311 * bump up refcount for sink because the link doesn't point
2312 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2313 * reshuffle by UMD we will get into unwanted dc_sink release
2314 */
dcd5fb82 2315 dc_sink_release(aconnector->dc_sink);
922aa1e1 2316 }
4562236b 2317 aconnector->dc_sink = sink;
dcd5fb82 2318 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2319 amdgpu_dm_update_freesync_caps(connector,
2320 aconnector->edid);
4562236b 2321 } else {
98e6436d 2322 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2323 if (!aconnector->dc_sink) {
4562236b 2324 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2325 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2326 }
4562236b
HW
2327 }
2328
2329 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2330
2331 if (sink)
2332 dc_sink_release(sink);
4562236b
HW
2333 return;
2334 }
2335
2336 /*
2337 * TODO: temporary guard to look for proper fix
2338 * if this sink is MST sink, we should not do anything
2339 */
dcd5fb82
MF
2340 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2341 dc_sink_release(sink);
4562236b 2342 return;
dcd5fb82 2343 }
4562236b
HW
2344
2345 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2346 /*
2347 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2348 * Do nothing!!
2349 */
f1ad2f5e 2350 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2351 aconnector->connector_id);
dcd5fb82
MF
2352 if (sink)
2353 dc_sink_release(sink);
4562236b
HW
2354 return;
2355 }
2356
f1ad2f5e 2357 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2358 aconnector->connector_id, aconnector->dc_sink, sink);
2359
2360 mutex_lock(&dev->mode_config.mutex);
2361
1f6010a9
DF
2362 /*
2363 * 1. Update status of the drm connector
2364 * 2. Send an event and let userspace tell us what to do
2365 */
4562236b 2366 if (sink) {
1f6010a9
DF
2367 /*
2368 * TODO: check if we still need the S3 mode update workaround.
2369 * If yes, put it here.
2370 */
4562236b 2371 if (aconnector->dc_sink)
98e6436d 2372 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2373
2374 aconnector->dc_sink = sink;
dcd5fb82 2375 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2376 if (sink->dc_edid.length == 0) {
4562236b 2377 aconnector->edid = NULL;
e6142dd5
AP
2378 if (aconnector->dc_link->aux_mode) {
2379 drm_dp_cec_unset_edid(
2380 &aconnector->dm_dp_aux.aux);
2381 }
900b3cb1 2382 } else {
4562236b 2383 aconnector->edid =
e6142dd5 2384 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2385
c555f023 2386 drm_connector_update_edid_property(connector,
e6142dd5 2387 aconnector->edid);
8768ff5e 2388 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2389
2390 if (aconnector->dc_link->aux_mode)
2391 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2392 aconnector->edid);
4562236b 2393 }
e6142dd5 2394
98e6436d 2395 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2396 update_connector_ext_caps(aconnector);
4562236b 2397 } else {
e86e8947 2398 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2399 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2400 drm_connector_update_edid_property(connector, NULL);
4562236b 2401 aconnector->num_modes = 0;
dcd5fb82 2402 dc_sink_release(aconnector->dc_sink);
4562236b 2403 aconnector->dc_sink = NULL;
5326c452 2404 aconnector->edid = NULL;
0c8620d6
BL
2405#ifdef CONFIG_DRM_AMD_DC_HDCP
2406 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2407 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2408 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2409#endif
4562236b
HW
2410 }
2411
2412 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2413
0f877894
OV
2414 update_subconnector_property(aconnector);
2415
dcd5fb82
MF
2416 if (sink)
2417 dc_sink_release(sink);
4562236b
HW
2418}
2419
2420static void handle_hpd_irq(void *param)
2421{
c84dec2f 2422 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2423 struct drm_connector *connector = &aconnector->base;
2424 struct drm_device *dev = connector->dev;
fbbdadf2 2425 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2426#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2427 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2428 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2429#endif
4562236b 2430
1f6010a9
DF
2431 /*
2432 * In case of failure or MST no need to update connector status or notify the OS
2433 * since (for MST case) MST does this in its own context.
4562236b
HW
2434 */
2435 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2436
0c8620d6 2437#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2438 if (adev->dm.hdcp_workqueue) {
96a3b32e 2439 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2440 dm_con_state->update_hdcp = true;
2441 }
0c8620d6 2442#endif
2e0ac3d6
HW
2443 if (aconnector->fake_enable)
2444 aconnector->fake_enable = false;
2445
fbbdadf2
BL
2446 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2447 DRM_ERROR("KMS: Failed to detect connector\n");
2448
2449 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2450 emulated_link_detect(aconnector->dc_link);
2451
2452
2453 drm_modeset_lock_all(dev);
2454 dm_restore_drm_connector_state(dev, connector);
2455 drm_modeset_unlock_all(dev);
2456
2457 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2458 drm_kms_helper_hotplug_event(dev);
2459
2460 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2461 if (new_connection_type == dc_connection_none &&
2462 aconnector->dc_link->type == dc_connection_none)
2463 dm_set_dpms_off(aconnector->dc_link);
4562236b 2464
3c4d55c9 2465 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2466
2467 drm_modeset_lock_all(dev);
2468 dm_restore_drm_connector_state(dev, connector);
2469 drm_modeset_unlock_all(dev);
2470
2471 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2472 drm_kms_helper_hotplug_event(dev);
2473 }
2474 mutex_unlock(&aconnector->hpd_lock);
2475
2476}
2477
c84dec2f 2478static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2479{
2480 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2481 uint8_t dret;
2482 bool new_irq_handled = false;
2483 int dpcd_addr;
2484 int dpcd_bytes_to_read;
2485
2486 const int max_process_count = 30;
2487 int process_count = 0;
2488
2489 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2490
2491 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2492 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2493 /* DPCD 0x200 - 0x201 for downstream IRQ */
2494 dpcd_addr = DP_SINK_COUNT;
2495 } else {
2496 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2497 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2498 dpcd_addr = DP_SINK_COUNT_ESI;
2499 }
2500
2501 dret = drm_dp_dpcd_read(
2502 &aconnector->dm_dp_aux.aux,
2503 dpcd_addr,
2504 esi,
2505 dpcd_bytes_to_read);
2506
2507 while (dret == dpcd_bytes_to_read &&
2508 process_count < max_process_count) {
2509 uint8_t retry;
2510 dret = 0;
2511
2512 process_count++;
2513
f1ad2f5e 2514 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2515 /* handle HPD short pulse irq */
2516 if (aconnector->mst_mgr.mst_state)
2517 drm_dp_mst_hpd_irq(
2518 &aconnector->mst_mgr,
2519 esi,
2520 &new_irq_handled);
4562236b
HW
2521
2522 if (new_irq_handled) {
2523 /* ACK at DPCD to notify down stream */
2524 const int ack_dpcd_bytes_to_write =
2525 dpcd_bytes_to_read - 1;
2526
2527 for (retry = 0; retry < 3; retry++) {
2528 uint8_t wret;
2529
2530 wret = drm_dp_dpcd_write(
2531 &aconnector->dm_dp_aux.aux,
2532 dpcd_addr + 1,
2533 &esi[1],
2534 ack_dpcd_bytes_to_write);
2535 if (wret == ack_dpcd_bytes_to_write)
2536 break;
2537 }
2538
1f6010a9 2539 /* check if there is new irq to be handled */
4562236b
HW
2540 dret = drm_dp_dpcd_read(
2541 &aconnector->dm_dp_aux.aux,
2542 dpcd_addr,
2543 esi,
2544 dpcd_bytes_to_read);
2545
2546 new_irq_handled = false;
d4a6e8a9 2547 } else {
4562236b 2548 break;
d4a6e8a9 2549 }
4562236b
HW
2550 }
2551
2552 if (process_count == max_process_count)
f1ad2f5e 2553 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2554}
2555
2556static void handle_hpd_rx_irq(void *param)
2557{
c84dec2f 2558 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2559 struct drm_connector *connector = &aconnector->base;
2560 struct drm_device *dev = connector->dev;
53cbf65c 2561 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2562 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2563 bool result = false;
fbbdadf2 2564 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2565 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2566 union hpd_irq_data hpd_irq_data;
2a0f9270
BL
2567
2568 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2569
1f6010a9
DF
2570 /*
2571 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2572 * conflict, after implement i2c helper, this mutex should be
2573 * retired.
2574 */
53cbf65c 2575 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2576 mutex_lock(&aconnector->hpd_lock);
2577
3083a984
QZ
2578 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2579
2580 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2581 (dc_link->type == dc_connection_mst_branch)) {
2582 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2583 result = true;
2584 dm_handle_hpd_rx_irq(aconnector);
2585 goto out;
2586 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2587 result = false;
2588 dm_handle_hpd_rx_irq(aconnector);
2589 goto out;
2590 }
2591 }
2592
c8ea79a8 2593 mutex_lock(&adev->dm.dc_lock);
2a0f9270 2594#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2595 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2596#else
c8ea79a8 2597 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2598#endif
c8ea79a8
QZ
2599 mutex_unlock(&adev->dm.dc_lock);
2600
3083a984 2601out:
c8ea79a8 2602 if (result && !is_mst_root_connector) {
4562236b 2603 /* Downstream Port status changed. */
fbbdadf2
BL
2604 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2605 DRM_ERROR("KMS: Failed to detect connector\n");
2606
2607 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2608 emulated_link_detect(dc_link);
2609
2610 if (aconnector->fake_enable)
2611 aconnector->fake_enable = false;
2612
2613 amdgpu_dm_update_connector_after_detect(aconnector);
2614
2615
2616 drm_modeset_lock_all(dev);
2617 dm_restore_drm_connector_state(dev, connector);
2618 drm_modeset_unlock_all(dev);
2619
2620 drm_kms_helper_hotplug_event(dev);
2621 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2622
2623 if (aconnector->fake_enable)
2624 aconnector->fake_enable = false;
2625
4562236b
HW
2626 amdgpu_dm_update_connector_after_detect(aconnector);
2627
2628
2629 drm_modeset_lock_all(dev);
2630 dm_restore_drm_connector_state(dev, connector);
2631 drm_modeset_unlock_all(dev);
2632
2633 drm_kms_helper_hotplug_event(dev);
2634 }
2635 }
2a0f9270 2636#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2637 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2638 if (adev->dm.hdcp_workqueue)
2639 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2640 }
2a0f9270 2641#endif
4562236b 2642
e86e8947
HV
2643 if (dc_link->type != dc_connection_mst_branch) {
2644 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2645 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2646 }
4562236b
HW
2647}
2648
2649static void register_hpd_handlers(struct amdgpu_device *adev)
2650{
4a580877 2651 struct drm_device *dev = adev_to_drm(adev);
4562236b 2652 struct drm_connector *connector;
c84dec2f 2653 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2654 const struct dc_link *dc_link;
2655 struct dc_interrupt_params int_params = {0};
2656
2657 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2658 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2659
2660 list_for_each_entry(connector,
2661 &dev->mode_config.connector_list, head) {
2662
c84dec2f 2663 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2664 dc_link = aconnector->dc_link;
2665
2666 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2667 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2668 int_params.irq_source = dc_link->irq_source_hpd;
2669
2670 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2671 handle_hpd_irq,
2672 (void *) aconnector);
2673 }
2674
2675 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2676
2677 /* Also register for DP short pulse (hpd_rx). */
2678 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2679 int_params.irq_source = dc_link->irq_source_hpd_rx;
2680
2681 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2682 handle_hpd_rx_irq,
2683 (void *) aconnector);
2684 }
2685 }
2686}
2687
55e56389
MR
2688#if defined(CONFIG_DRM_AMD_DC_SI)
2689/* Register IRQ sources and initialize IRQ callbacks */
2690static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2691{
2692 struct dc *dc = adev->dm.dc;
2693 struct common_irq_params *c_irq_params;
2694 struct dc_interrupt_params int_params = {0};
2695 int r;
2696 int i;
2697 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2698
2699 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2700 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2701
2702 /*
2703 * Actions of amdgpu_irq_add_id():
2704 * 1. Register a set() function with base driver.
2705 * Base driver will call set() function to enable/disable an
2706 * interrupt in DC hardware.
2707 * 2. Register amdgpu_dm_irq_handler().
2708 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2709 * coming from DC hardware.
2710 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2711 * for acknowledging and handling. */
2712
2713 /* Use VBLANK interrupt */
2714 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2715 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2716 if (r) {
2717 DRM_ERROR("Failed to add crtc irq id!\n");
2718 return r;
2719 }
2720
2721 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2722 int_params.irq_source =
2723 dc_interrupt_to_irq_source(dc, i+1 , 0);
2724
2725 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2726
2727 c_irq_params->adev = adev;
2728 c_irq_params->irq_src = int_params.irq_source;
2729
2730 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2731 dm_crtc_high_irq, c_irq_params);
2732 }
2733
2734 /* Use GRPH_PFLIP interrupt */
2735 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2736 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2737 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2738 if (r) {
2739 DRM_ERROR("Failed to add page flip irq id!\n");
2740 return r;
2741 }
2742
2743 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2744 int_params.irq_source =
2745 dc_interrupt_to_irq_source(dc, i, 0);
2746
2747 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2748
2749 c_irq_params->adev = adev;
2750 c_irq_params->irq_src = int_params.irq_source;
2751
2752 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2753 dm_pflip_high_irq, c_irq_params);
2754
2755 }
2756
2757 /* HPD */
2758 r = amdgpu_irq_add_id(adev, client_id,
2759 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2760 if (r) {
2761 DRM_ERROR("Failed to add hpd irq id!\n");
2762 return r;
2763 }
2764
2765 register_hpd_handlers(adev);
2766
2767 return 0;
2768}
2769#endif
2770
4562236b
HW
2771/* Register IRQ sources and initialize IRQ callbacks */
2772static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2773{
2774 struct dc *dc = adev->dm.dc;
2775 struct common_irq_params *c_irq_params;
2776 struct dc_interrupt_params int_params = {0};
2777 int r;
2778 int i;
1ffdeca6 2779 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2780
84374725 2781 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2782 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2783
2784 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2785 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2786
1f6010a9
DF
2787 /*
2788 * Actions of amdgpu_irq_add_id():
4562236b
HW
2789 * 1. Register a set() function with base driver.
2790 * Base driver will call set() function to enable/disable an
2791 * interrupt in DC hardware.
2792 * 2. Register amdgpu_dm_irq_handler().
2793 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2794 * coming from DC hardware.
2795 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2796 * for acknowledging and handling. */
2797
b57de80a 2798 /* Use VBLANK interrupt */
e9029155 2799 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2800 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2801 if (r) {
2802 DRM_ERROR("Failed to add crtc irq id!\n");
2803 return r;
2804 }
2805
2806 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2807 int_params.irq_source =
3d761e79 2808 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2809
b57de80a 2810 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2811
2812 c_irq_params->adev = adev;
2813 c_irq_params->irq_src = int_params.irq_source;
2814
2815 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2816 dm_crtc_high_irq, c_irq_params);
2817 }
2818
d2574c33
MK
2819 /* Use VUPDATE interrupt */
2820 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2821 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2822 if (r) {
2823 DRM_ERROR("Failed to add vupdate irq id!\n");
2824 return r;
2825 }
2826
2827 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2828 int_params.irq_source =
2829 dc_interrupt_to_irq_source(dc, i, 0);
2830
2831 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2832
2833 c_irq_params->adev = adev;
2834 c_irq_params->irq_src = int_params.irq_source;
2835
2836 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2837 dm_vupdate_high_irq, c_irq_params);
2838 }
2839
3d761e79 2840 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2841 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2842 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2843 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2844 if (r) {
2845 DRM_ERROR("Failed to add page flip irq id!\n");
2846 return r;
2847 }
2848
2849 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2850 int_params.irq_source =
2851 dc_interrupt_to_irq_source(dc, i, 0);
2852
2853 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2854
2855 c_irq_params->adev = adev;
2856 c_irq_params->irq_src = int_params.irq_source;
2857
2858 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2859 dm_pflip_high_irq, c_irq_params);
2860
2861 }
2862
2863 /* HPD */
2c8ad2d5
AD
2864 r = amdgpu_irq_add_id(adev, client_id,
2865 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2866 if (r) {
2867 DRM_ERROR("Failed to add hpd irq id!\n");
2868 return r;
2869 }
2870
2871 register_hpd_handlers(adev);
2872
2873 return 0;
2874}
2875
b86a1aa3 2876#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2877/* Register IRQ sources and initialize IRQ callbacks */
2878static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2879{
2880 struct dc *dc = adev->dm.dc;
2881 struct common_irq_params *c_irq_params;
2882 struct dc_interrupt_params int_params = {0};
2883 int r;
2884 int i;
2885
2886 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2887 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2888
1f6010a9
DF
2889 /*
2890 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2891 * 1. Register a set() function with base driver.
2892 * Base driver will call set() function to enable/disable an
2893 * interrupt in DC hardware.
2894 * 2. Register amdgpu_dm_irq_handler().
2895 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2896 * coming from DC hardware.
2897 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2898 * for acknowledging and handling.
1f6010a9 2899 */
ff5ef992
AD
2900
2901 /* Use VSTARTUP interrupt */
2902 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2903 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2904 i++) {
3760f76c 2905 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2906
2907 if (r) {
2908 DRM_ERROR("Failed to add crtc irq id!\n");
2909 return r;
2910 }
2911
2912 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2913 int_params.irq_source =
2914 dc_interrupt_to_irq_source(dc, i, 0);
2915
2916 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2917
2918 c_irq_params->adev = adev;
2919 c_irq_params->irq_src = int_params.irq_source;
2920
2346ef47
NK
2921 amdgpu_dm_irq_register_interrupt(
2922 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2923 }
2924
2925 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2926 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2927 * to trigger at end of each vblank, regardless of state of the lock,
2928 * matching DCE behaviour.
2929 */
2930 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2931 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2932 i++) {
2933 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2934
2935 if (r) {
2936 DRM_ERROR("Failed to add vupdate irq id!\n");
2937 return r;
2938 }
2939
2940 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2941 int_params.irq_source =
2942 dc_interrupt_to_irq_source(dc, i, 0);
2943
2944 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2945
2946 c_irq_params->adev = adev;
2947 c_irq_params->irq_src = int_params.irq_source;
2948
ff5ef992 2949 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2950 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2951 }
2952
ff5ef992
AD
2953 /* Use GRPH_PFLIP interrupt */
2954 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2955 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2956 i++) {
3760f76c 2957 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2958 if (r) {
2959 DRM_ERROR("Failed to add page flip irq id!\n");
2960 return r;
2961 }
2962
2963 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964 int_params.irq_source =
2965 dc_interrupt_to_irq_source(dc, i, 0);
2966
2967 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2968
2969 c_irq_params->adev = adev;
2970 c_irq_params->irq_src = int_params.irq_source;
2971
2972 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973 dm_pflip_high_irq, c_irq_params);
2974
2975 }
2976
2977 /* HPD */
3760f76c 2978 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2979 &adev->hpd_irq);
2980 if (r) {
2981 DRM_ERROR("Failed to add hpd irq id!\n");
2982 return r;
2983 }
2984
2985 register_hpd_handlers(adev);
2986
2987 return 0;
2988}
2989#endif
2990
eb3dc897
NK
2991/*
2992 * Acquires the lock for the atomic state object and returns
2993 * the new atomic state.
2994 *
2995 * This should only be called during atomic check.
2996 */
2997static int dm_atomic_get_state(struct drm_atomic_state *state,
2998 struct dm_atomic_state **dm_state)
2999{
3000 struct drm_device *dev = state->dev;
1348969a 3001 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3002 struct amdgpu_display_manager *dm = &adev->dm;
3003 struct drm_private_state *priv_state;
eb3dc897
NK
3004
3005 if (*dm_state)
3006 return 0;
3007
eb3dc897
NK
3008 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3009 if (IS_ERR(priv_state))
3010 return PTR_ERR(priv_state);
3011
3012 *dm_state = to_dm_atomic_state(priv_state);
3013
3014 return 0;
3015}
3016
dfd84d90 3017static struct dm_atomic_state *
eb3dc897
NK
3018dm_atomic_get_new_state(struct drm_atomic_state *state)
3019{
3020 struct drm_device *dev = state->dev;
1348969a 3021 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3022 struct amdgpu_display_manager *dm = &adev->dm;
3023 struct drm_private_obj *obj;
3024 struct drm_private_state *new_obj_state;
3025 int i;
3026
3027 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3028 if (obj->funcs == dm->atomic_obj.funcs)
3029 return to_dm_atomic_state(new_obj_state);
3030 }
3031
3032 return NULL;
3033}
3034
eb3dc897
NK
3035static struct drm_private_state *
3036dm_atomic_duplicate_state(struct drm_private_obj *obj)
3037{
3038 struct dm_atomic_state *old_state, *new_state;
3039
3040 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3041 if (!new_state)
3042 return NULL;
3043
3044 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3045
813d20dc
AW
3046 old_state = to_dm_atomic_state(obj->state);
3047
3048 if (old_state && old_state->context)
3049 new_state->context = dc_copy_state(old_state->context);
3050
eb3dc897
NK
3051 if (!new_state->context) {
3052 kfree(new_state);
3053 return NULL;
3054 }
3055
eb3dc897
NK
3056 return &new_state->base;
3057}
3058
3059static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3060 struct drm_private_state *state)
3061{
3062 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3063
3064 if (dm_state && dm_state->context)
3065 dc_release_state(dm_state->context);
3066
3067 kfree(dm_state);
3068}
3069
3070static struct drm_private_state_funcs dm_atomic_state_funcs = {
3071 .atomic_duplicate_state = dm_atomic_duplicate_state,
3072 .atomic_destroy_state = dm_atomic_destroy_state,
3073};
3074
4562236b
HW
3075static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3076{
eb3dc897 3077 struct dm_atomic_state *state;
4562236b
HW
3078 int r;
3079
3080 adev->mode_info.mode_config_initialized = true;
3081
4a580877
LT
3082 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3083 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3084
4a580877
LT
3085 adev_to_drm(adev)->mode_config.max_width = 16384;
3086 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3087
4a580877
LT
3088 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3089 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3090 /* indicates support for immediate flip */
4a580877 3091 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3092
4a580877 3093 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3094
eb3dc897
NK
3095 state = kzalloc(sizeof(*state), GFP_KERNEL);
3096 if (!state)
3097 return -ENOMEM;
3098
813d20dc 3099 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3100 if (!state->context) {
3101 kfree(state);
3102 return -ENOMEM;
3103 }
3104
3105 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3106
4a580877 3107 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3108 &adev->dm.atomic_obj,
eb3dc897
NK
3109 &state->base,
3110 &dm_atomic_state_funcs);
3111
3dc9b1ce 3112 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3113 if (r) {
3114 dc_release_state(state->context);
3115 kfree(state);
4562236b 3116 return r;
b67a468a 3117 }
4562236b 3118
6ce8f316 3119 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3120 if (r) {
3121 dc_release_state(state->context);
3122 kfree(state);
6ce8f316 3123 return r;
b67a468a 3124 }
6ce8f316 3125
4562236b
HW
3126 return 0;
3127}
3128
206bbafe
DF
3129#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3130#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3131#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3132
4562236b
HW
3133#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3134 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3135
206bbafe
DF
3136static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3137{
3138#if defined(CONFIG_ACPI)
3139 struct amdgpu_dm_backlight_caps caps;
3140
58965855
FS
3141 memset(&caps, 0, sizeof(caps));
3142
206bbafe
DF
3143 if (dm->backlight_caps.caps_valid)
3144 return;
3145
3146 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3147 if (caps.caps_valid) {
94562810
RS
3148 dm->backlight_caps.caps_valid = true;
3149 if (caps.aux_support)
3150 return;
206bbafe
DF
3151 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3152 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3153 } else {
3154 dm->backlight_caps.min_input_signal =
3155 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3156 dm->backlight_caps.max_input_signal =
3157 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3158 }
3159#else
94562810
RS
3160 if (dm->backlight_caps.aux_support)
3161 return;
3162
8bcbc9ef
DF
3163 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3164 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3165#endif
3166}
3167
94562810
RS
3168static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3169{
3170 bool rc;
3171
3172 if (!link)
3173 return 1;
3174
3175 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3176 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3177
3178 return rc ? 0 : 1;
3179}
3180
69d9f427
AM
3181static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3182 unsigned *min, unsigned *max)
94562810 3183{
94562810 3184 if (!caps)
69d9f427 3185 return 0;
94562810 3186
69d9f427
AM
3187 if (caps->aux_support) {
3188 // Firmware limits are in nits, DC API wants millinits.
3189 *max = 1000 * caps->aux_max_input_signal;
3190 *min = 1000 * caps->aux_min_input_signal;
94562810 3191 } else {
69d9f427
AM
3192 // Firmware limits are 8-bit, PWM control is 16-bit.
3193 *max = 0x101 * caps->max_input_signal;
3194 *min = 0x101 * caps->min_input_signal;
94562810 3195 }
69d9f427
AM
3196 return 1;
3197}
94562810 3198
69d9f427
AM
3199static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3200 uint32_t brightness)
3201{
3202 unsigned min, max;
94562810 3203
69d9f427
AM
3204 if (!get_brightness_range(caps, &min, &max))
3205 return brightness;
3206
3207 // Rescale 0..255 to min..max
3208 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3209 AMDGPU_MAX_BL_LEVEL);
3210}
3211
3212static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3213 uint32_t brightness)
3214{
3215 unsigned min, max;
3216
3217 if (!get_brightness_range(caps, &min, &max))
3218 return brightness;
3219
3220 if (brightness < min)
3221 return 0;
3222 // Rescale min..max to 0..255
3223 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3224 max - min);
94562810
RS
3225}
3226
4562236b
HW
3227static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3228{
3229 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3230 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3231 struct dc_link *link = NULL;
3232 u32 brightness;
3233 bool rc;
4562236b 3234
206bbafe
DF
3235 amdgpu_dm_update_backlight_caps(dm);
3236 caps = dm->backlight_caps;
94562810
RS
3237
3238 link = (struct dc_link *)dm->backlight_link;
3239
69d9f427 3240 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3241 // Change brightness based on AUX property
3242 if (caps.aux_support)
3243 return set_backlight_via_aux(link, brightness);
3244
3245 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3246
3247 return rc ? 0 : 1;
4562236b
HW
3248}
3249
3250static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3251{
620a0d27
DF
3252 struct amdgpu_display_manager *dm = bl_get_data(bd);
3253 int ret = dc_link_get_backlight_level(dm->backlight_link);
3254
3255 if (ret == DC_ERROR_UNEXPECTED)
3256 return bd->props.brightness;
69d9f427 3257 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3258}
3259
3260static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3261 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3262 .get_brightness = amdgpu_dm_backlight_get_brightness,
3263 .update_status = amdgpu_dm_backlight_update_status,
3264};
3265
7578ecda
AD
3266static void
3267amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3268{
3269 char bl_name[16];
3270 struct backlight_properties props = { 0 };
3271
206bbafe
DF
3272 amdgpu_dm_update_backlight_caps(dm);
3273
4562236b 3274 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3275 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3276 props.type = BACKLIGHT_RAW;
3277
3278 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3279 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3280
3281 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3282 adev_to_drm(dm->adev)->dev,
3283 dm,
3284 &amdgpu_dm_backlight_ops,
3285 &props);
4562236b 3286
74baea42 3287 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3288 DRM_ERROR("DM: Backlight registration failed!\n");
3289 else
f1ad2f5e 3290 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3291}
3292
3293#endif
3294
df534fff 3295static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3296 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3297 enum drm_plane_type plane_type,
3298 const struct dc_plane_cap *plane_cap)
df534fff 3299{
f180b4bc 3300 struct drm_plane *plane;
df534fff
S
3301 unsigned long possible_crtcs;
3302 int ret = 0;
3303
f180b4bc 3304 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3305 if (!plane) {
3306 DRM_ERROR("KMS: Failed to allocate plane\n");
3307 return -ENOMEM;
3308 }
b2fddb13 3309 plane->type = plane_type;
df534fff
S
3310
3311 /*
b2fddb13
NK
3312 * HACK: IGT tests expect that the primary plane for a CRTC
3313 * can only have one possible CRTC. Only expose support for
3314 * any CRTC if they're not going to be used as a primary plane
3315 * for a CRTC - like overlay or underlay planes.
df534fff
S
3316 */
3317 possible_crtcs = 1 << plane_id;
3318 if (plane_id >= dm->dc->caps.max_streams)
3319 possible_crtcs = 0xff;
3320
cc1fec57 3321 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3322
3323 if (ret) {
3324 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3325 kfree(plane);
df534fff
S
3326 return ret;
3327 }
3328
54087768
NK
3329 if (mode_info)
3330 mode_info->planes[plane_id] = plane;
3331
df534fff
S
3332 return ret;
3333}
3334
89fc8d4e
HW
3335
3336static void register_backlight_device(struct amdgpu_display_manager *dm,
3337 struct dc_link *link)
3338{
3339#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3340 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3341
3342 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3343 link->type != dc_connection_none) {
1f6010a9
DF
3344 /*
3345 * Event if registration failed, we should continue with
89fc8d4e
HW
3346 * DM initialization because not having a backlight control
3347 * is better then a black screen.
3348 */
3349 amdgpu_dm_register_backlight_device(dm);
3350
3351 if (dm->backlight_dev)
3352 dm->backlight_link = link;
3353 }
3354#endif
3355}
3356
3357
1f6010a9
DF
3358/*
3359 * In this architecture, the association
4562236b
HW
3360 * connector -> encoder -> crtc
3361 * id not really requried. The crtc and connector will hold the
3362 * display_index as an abstraction to use with DAL component
3363 *
3364 * Returns 0 on success
3365 */
7578ecda 3366static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3367{
3368 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3369 int32_t i;
c84dec2f 3370 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3371 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3372 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3373 uint32_t link_cnt;
cc1fec57 3374 int32_t primary_planes;
fbbdadf2 3375 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3376 const struct dc_plane_cap *plane;
4562236b 3377
d58159de
AD
3378 dm->display_indexes_num = dm->dc->caps.max_streams;
3379 /* Update the actual used number of crtc */
3380 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3381
4562236b 3382 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3383 if (amdgpu_dm_mode_config_init(dm->adev)) {
3384 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3385 return -EINVAL;
4562236b
HW
3386 }
3387
b2fddb13
NK
3388 /* There is one primary plane per CRTC */
3389 primary_planes = dm->dc->caps.max_streams;
54087768 3390 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3391
b2fddb13
NK
3392 /*
3393 * Initialize primary planes, implicit planes for legacy IOCTLS.
3394 * Order is reversed to match iteration order in atomic check.
3395 */
3396 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3397 plane = &dm->dc->caps.planes[i];
3398
b2fddb13 3399 if (initialize_plane(dm, mode_info, i,
cc1fec57 3400 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3401 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3402 goto fail;
d4e13b0d 3403 }
df534fff 3404 }
92f3ac40 3405
0d579c7e
NK
3406 /*
3407 * Initialize overlay planes, index starting after primary planes.
3408 * These planes have a higher DRM index than the primary planes since
3409 * they should be considered as having a higher z-order.
3410 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3411 *
3412 * Only support DCN for now, and only expose one so we don't encourage
3413 * userspace to use up all the pipes.
0d579c7e 3414 */
cc1fec57
NK
3415 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3416 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3417
3418 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3419 continue;
3420
3421 if (!plane->blends_with_above || !plane->blends_with_below)
3422 continue;
3423
ea36ad34 3424 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3425 continue;
3426
54087768 3427 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3428 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3429 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3430 goto fail;
d4e13b0d 3431 }
cc1fec57
NK
3432
3433 /* Only create one overlay plane. */
3434 break;
d4e13b0d 3435 }
4562236b 3436
d4e13b0d 3437 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3438 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3439 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3440 goto fail;
4562236b 3441 }
4562236b 3442
4562236b
HW
3443 /* loops over all connectors on the board */
3444 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3445 struct dc_link *link = NULL;
4562236b
HW
3446
3447 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3448 DRM_ERROR(
3449 "KMS: Cannot support more than %d display indexes\n",
3450 AMDGPU_DM_MAX_DISPLAY_INDEX);
3451 continue;
3452 }
3453
3454 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3455 if (!aconnector)
cd8a2ae8 3456 goto fail;
4562236b
HW
3457
3458 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3459 if (!aencoder)
cd8a2ae8 3460 goto fail;
4562236b
HW
3461
3462 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3463 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3464 goto fail;
4562236b
HW
3465 }
3466
3467 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3468 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3469 goto fail;
4562236b
HW
3470 }
3471
89fc8d4e
HW
3472 link = dc_get_link_at_index(dm->dc, i);
3473
fbbdadf2
BL
3474 if (!dc_link_detect_sink(link, &new_connection_type))
3475 DRM_ERROR("KMS: Failed to detect connector\n");
3476
3477 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3478 emulated_link_detect(link);
3479 amdgpu_dm_update_connector_after_detect(aconnector);
3480
3481 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3482 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3483 register_backlight_device(dm, link);
397a9bc5
RL
3484 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3485 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3486 }
3487
3488
4562236b
HW
3489 }
3490
3491 /* Software is initialized. Now we can register interrupt handlers. */
3492 switch (adev->asic_type) {
55e56389
MR
3493#if defined(CONFIG_DRM_AMD_DC_SI)
3494 case CHIP_TAHITI:
3495 case CHIP_PITCAIRN:
3496 case CHIP_VERDE:
3497 case CHIP_OLAND:
3498 if (dce60_register_irq_handlers(dm->adev)) {
3499 DRM_ERROR("DM: Failed to initialize IRQ\n");
3500 goto fail;
3501 }
3502 break;
3503#endif
4562236b
HW
3504 case CHIP_BONAIRE:
3505 case CHIP_HAWAII:
cd4b356f
AD
3506 case CHIP_KAVERI:
3507 case CHIP_KABINI:
3508 case CHIP_MULLINS:
4562236b
HW
3509 case CHIP_TONGA:
3510 case CHIP_FIJI:
3511 case CHIP_CARRIZO:
3512 case CHIP_STONEY:
3513 case CHIP_POLARIS11:
3514 case CHIP_POLARIS10:
b264d345 3515 case CHIP_POLARIS12:
7737de91 3516 case CHIP_VEGAM:
2c8ad2d5 3517 case CHIP_VEGA10:
2325ff30 3518 case CHIP_VEGA12:
1fe6bf2f 3519 case CHIP_VEGA20:
4562236b
HW
3520 if (dce110_register_irq_handlers(dm->adev)) {
3521 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3522 goto fail;
4562236b
HW
3523 }
3524 break;
b86a1aa3 3525#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3526 case CHIP_RAVEN:
fbd2afe5 3527 case CHIP_NAVI12:
476e955d 3528 case CHIP_NAVI10:
fce651e3 3529 case CHIP_NAVI14:
30221ad8 3530 case CHIP_RENOIR:
79037324 3531 case CHIP_SIENNA_CICHLID:
a6c5308f 3532 case CHIP_NAVY_FLOUNDER:
2a411205 3533 case CHIP_DIMGREY_CAVEFISH:
469989ca 3534 case CHIP_VANGOGH:
ff5ef992
AD
3535 if (dcn10_register_irq_handlers(dm->adev)) {
3536 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3537 goto fail;
ff5ef992
AD
3538 }
3539 break;
3540#endif
4562236b 3541 default:
e63f8673 3542 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3543 goto fail;
4562236b
HW
3544 }
3545
4562236b 3546 return 0;
cd8a2ae8 3547fail:
4562236b 3548 kfree(aencoder);
4562236b 3549 kfree(aconnector);
54087768 3550
59d0f396 3551 return -EINVAL;
4562236b
HW
3552}
3553
7578ecda 3554static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3555{
3556 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3557 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3558 return;
3559}
3560
3561/******************************************************************************
3562 * amdgpu_display_funcs functions
3563 *****************************************************************************/
3564
1f6010a9 3565/*
4562236b
HW
3566 * dm_bandwidth_update - program display watermarks
3567 *
3568 * @adev: amdgpu_device pointer
3569 *
3570 * Calculate and program the display watermarks and line buffer allocation.
3571 */
3572static void dm_bandwidth_update(struct amdgpu_device *adev)
3573{
49c07a99 3574 /* TODO: implement later */
4562236b
HW
3575}
3576
39cc5be2 3577static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3578 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3579 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3580 .backlight_set_level = NULL, /* never called for DC */
3581 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3582 .hpd_sense = NULL,/* called unconditionally */
3583 .hpd_set_polarity = NULL, /* called unconditionally */
3584 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3585 .page_flip_get_scanoutpos =
3586 dm_crtc_get_scanoutpos,/* called unconditionally */
3587 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3588 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3589};
3590
3591#if defined(CONFIG_DEBUG_KERNEL_DC)
3592
3ee6b26b
AD
3593static ssize_t s3_debug_store(struct device *device,
3594 struct device_attribute *attr,
3595 const char *buf,
3596 size_t count)
4562236b
HW
3597{
3598 int ret;
3599 int s3_state;
ef1de361 3600 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3601 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3602
3603 ret = kstrtoint(buf, 0, &s3_state);
3604
3605 if (ret == 0) {
3606 if (s3_state) {
3607 dm_resume(adev);
4a580877 3608 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3609 } else
3610 dm_suspend(adev);
3611 }
3612
3613 return ret == 0 ? count : 0;
3614}
3615
3616DEVICE_ATTR_WO(s3_debug);
3617
3618#endif
3619
3620static int dm_early_init(void *handle)
3621{
3622 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3623
4562236b 3624 switch (adev->asic_type) {
55e56389
MR
3625#if defined(CONFIG_DRM_AMD_DC_SI)
3626 case CHIP_TAHITI:
3627 case CHIP_PITCAIRN:
3628 case CHIP_VERDE:
3629 adev->mode_info.num_crtc = 6;
3630 adev->mode_info.num_hpd = 6;
3631 adev->mode_info.num_dig = 6;
3632 break;
3633 case CHIP_OLAND:
3634 adev->mode_info.num_crtc = 2;
3635 adev->mode_info.num_hpd = 2;
3636 adev->mode_info.num_dig = 2;
3637 break;
3638#endif
4562236b
HW
3639 case CHIP_BONAIRE:
3640 case CHIP_HAWAII:
3641 adev->mode_info.num_crtc = 6;
3642 adev->mode_info.num_hpd = 6;
3643 adev->mode_info.num_dig = 6;
4562236b 3644 break;
cd4b356f
AD
3645 case CHIP_KAVERI:
3646 adev->mode_info.num_crtc = 4;
3647 adev->mode_info.num_hpd = 6;
3648 adev->mode_info.num_dig = 7;
cd4b356f
AD
3649 break;
3650 case CHIP_KABINI:
3651 case CHIP_MULLINS:
3652 adev->mode_info.num_crtc = 2;
3653 adev->mode_info.num_hpd = 6;
3654 adev->mode_info.num_dig = 6;
cd4b356f 3655 break;
4562236b
HW
3656 case CHIP_FIJI:
3657 case CHIP_TONGA:
3658 adev->mode_info.num_crtc = 6;
3659 adev->mode_info.num_hpd = 6;
3660 adev->mode_info.num_dig = 7;
4562236b
HW
3661 break;
3662 case CHIP_CARRIZO:
3663 adev->mode_info.num_crtc = 3;
3664 adev->mode_info.num_hpd = 6;
3665 adev->mode_info.num_dig = 9;
4562236b
HW
3666 break;
3667 case CHIP_STONEY:
3668 adev->mode_info.num_crtc = 2;
3669 adev->mode_info.num_hpd = 6;
3670 adev->mode_info.num_dig = 9;
4562236b
HW
3671 break;
3672 case CHIP_POLARIS11:
b264d345 3673 case CHIP_POLARIS12:
4562236b
HW
3674 adev->mode_info.num_crtc = 5;
3675 adev->mode_info.num_hpd = 5;
3676 adev->mode_info.num_dig = 5;
4562236b
HW
3677 break;
3678 case CHIP_POLARIS10:
7737de91 3679 case CHIP_VEGAM:
4562236b
HW
3680 adev->mode_info.num_crtc = 6;
3681 adev->mode_info.num_hpd = 6;
3682 adev->mode_info.num_dig = 6;
4562236b 3683 break;
2c8ad2d5 3684 case CHIP_VEGA10:
2325ff30 3685 case CHIP_VEGA12:
1fe6bf2f 3686 case CHIP_VEGA20:
2c8ad2d5
AD
3687 adev->mode_info.num_crtc = 6;
3688 adev->mode_info.num_hpd = 6;
3689 adev->mode_info.num_dig = 6;
3690 break;
b86a1aa3 3691#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3692 case CHIP_RAVEN:
20f2ffe5
AD
3693 case CHIP_RENOIR:
3694 case CHIP_VANGOGH:
ff5ef992
AD
3695 adev->mode_info.num_crtc = 4;
3696 adev->mode_info.num_hpd = 4;
3697 adev->mode_info.num_dig = 4;
ff5ef992 3698 break;
476e955d 3699 case CHIP_NAVI10:
fbd2afe5 3700 case CHIP_NAVI12:
79037324 3701 case CHIP_SIENNA_CICHLID:
a6c5308f 3702 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3703 adev->mode_info.num_crtc = 6;
3704 adev->mode_info.num_hpd = 6;
3705 adev->mode_info.num_dig = 6;
3706 break;
fce651e3 3707 case CHIP_NAVI14:
2a411205 3708 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3709 adev->mode_info.num_crtc = 5;
3710 adev->mode_info.num_hpd = 5;
3711 adev->mode_info.num_dig = 5;
3712 break;
20f2ffe5 3713#endif
4562236b 3714 default:
e63f8673 3715 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3716 return -EINVAL;
3717 }
3718
c8dd5715
MD
3719 amdgpu_dm_set_irq_funcs(adev);
3720
39cc5be2
AD
3721 if (adev->mode_info.funcs == NULL)
3722 adev->mode_info.funcs = &dm_display_funcs;
3723
1f6010a9
DF
3724 /*
3725 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3726 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3727 * amdgpu_device_init()
3728 */
4562236b
HW
3729#if defined(CONFIG_DEBUG_KERNEL_DC)
3730 device_create_file(
4a580877 3731 adev_to_drm(adev)->dev,
4562236b
HW
3732 &dev_attr_s3_debug);
3733#endif
3734
3735 return 0;
3736}
3737
9b690ef3 3738static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3739 struct dc_stream_state *new_stream,
3740 struct dc_stream_state *old_stream)
9b690ef3 3741{
2afda735 3742 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3743}
3744
3745static bool modereset_required(struct drm_crtc_state *crtc_state)
3746{
2afda735 3747 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3748}
3749
7578ecda 3750static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3751{
3752 drm_encoder_cleanup(encoder);
3753 kfree(encoder);
3754}
3755
3756static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3757 .destroy = amdgpu_dm_encoder_destroy,
3758};
3759
e7b07cee 3760
6300b3bd
MK
3761static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3762 struct drm_framebuffer *fb,
3763 int *min_downscale, int *max_upscale)
3764{
3765 struct amdgpu_device *adev = drm_to_adev(dev);
3766 struct dc *dc = adev->dm.dc;
3767 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3768 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3769
3770 switch (fb->format->format) {
3771 case DRM_FORMAT_P010:
3772 case DRM_FORMAT_NV12:
3773 case DRM_FORMAT_NV21:
3774 *max_upscale = plane_cap->max_upscale_factor.nv12;
3775 *min_downscale = plane_cap->max_downscale_factor.nv12;
3776 break;
3777
3778 case DRM_FORMAT_XRGB16161616F:
3779 case DRM_FORMAT_ARGB16161616F:
3780 case DRM_FORMAT_XBGR16161616F:
3781 case DRM_FORMAT_ABGR16161616F:
3782 *max_upscale = plane_cap->max_upscale_factor.fp16;
3783 *min_downscale = plane_cap->max_downscale_factor.fp16;
3784 break;
3785
3786 default:
3787 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3788 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3789 break;
3790 }
3791
3792 /*
3793 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3794 * scaling factor of 1.0 == 1000 units.
3795 */
3796 if (*max_upscale == 1)
3797 *max_upscale = 1000;
3798
3799 if (*min_downscale == 1)
3800 *min_downscale = 1000;
3801}
3802
3803
695af5f9
NK
3804static int fill_dc_scaling_info(const struct drm_plane_state *state,
3805 struct dc_scaling_info *scaling_info)
e7b07cee 3806{
6300b3bd 3807 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 3808
695af5f9 3809 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3810
695af5f9
NK
3811 /* Source is fixed 16.16 but we ignore mantissa for now... */
3812 scaling_info->src_rect.x = state->src_x >> 16;
3813 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3814
695af5f9
NK
3815 scaling_info->src_rect.width = state->src_w >> 16;
3816 if (scaling_info->src_rect.width == 0)
3817 return -EINVAL;
3818
3819 scaling_info->src_rect.height = state->src_h >> 16;
3820 if (scaling_info->src_rect.height == 0)
3821 return -EINVAL;
3822
3823 scaling_info->dst_rect.x = state->crtc_x;
3824 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3825
3826 if (state->crtc_w == 0)
695af5f9 3827 return -EINVAL;
e7b07cee 3828
695af5f9 3829 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3830
3831 if (state->crtc_h == 0)
695af5f9 3832 return -EINVAL;
e7b07cee 3833
695af5f9 3834 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3835
695af5f9
NK
3836 /* DRM doesn't specify clipping on destination output. */
3837 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3838
6300b3bd
MK
3839 /* Validate scaling per-format with DC plane caps */
3840 if (state->plane && state->plane->dev && state->fb) {
3841 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3842 &min_downscale, &max_upscale);
3843 } else {
3844 min_downscale = 250;
3845 max_upscale = 16000;
3846 }
3847
6491f0c0
NK
3848 scale_w = scaling_info->dst_rect.width * 1000 /
3849 scaling_info->src_rect.width;
e7b07cee 3850
6300b3bd 3851 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
3852 return -EINVAL;
3853
3854 scale_h = scaling_info->dst_rect.height * 1000 /
3855 scaling_info->src_rect.height;
3856
6300b3bd 3857 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
3858 return -EINVAL;
3859
695af5f9
NK
3860 /*
3861 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3862 * assume reasonable defaults based on the format.
3863 */
e7b07cee 3864
695af5f9 3865 return 0;
4562236b 3866}
695af5f9 3867
a3241991
BN
3868static void
3869fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3870 uint64_t tiling_flags)
e7b07cee 3871{
a3241991
BN
3872 /* Fill GFX8 params */
3873 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3874 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3875
a3241991
BN
3876 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3877 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3878 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3879 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3880 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3881
a3241991
BN
3882 /* XXX fix me for VI */
3883 tiling_info->gfx8.num_banks = num_banks;
3884 tiling_info->gfx8.array_mode =
3885 DC_ARRAY_2D_TILED_THIN1;
3886 tiling_info->gfx8.tile_split = tile_split;
3887 tiling_info->gfx8.bank_width = bankw;
3888 tiling_info->gfx8.bank_height = bankh;
3889 tiling_info->gfx8.tile_aspect = mtaspect;
3890 tiling_info->gfx8.tile_mode =
3891 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3892 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3893 == DC_ARRAY_1D_TILED_THIN1) {
3894 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3895 }
3896
a3241991
BN
3897 tiling_info->gfx8.pipe_config =
3898 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3899}
3900
a3241991
BN
3901static void
3902fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3903 union dc_tiling_info *tiling_info)
3904{
3905 tiling_info->gfx9.num_pipes =
3906 adev->gfx.config.gb_addr_config_fields.num_pipes;
3907 tiling_info->gfx9.num_banks =
3908 adev->gfx.config.gb_addr_config_fields.num_banks;
3909 tiling_info->gfx9.pipe_interleave =
3910 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3911 tiling_info->gfx9.num_shader_engines =
3912 adev->gfx.config.gb_addr_config_fields.num_se;
3913 tiling_info->gfx9.max_compressed_frags =
3914 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3915 tiling_info->gfx9.num_rb_per_se =
3916 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3917 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
3918 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3919 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3920 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3921 adev->asic_type == CHIP_VANGOGH)
3922 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
3923}
3924
695af5f9 3925static int
a3241991
BN
3926validate_dcc(struct amdgpu_device *adev,
3927 const enum surface_pixel_format format,
3928 const enum dc_rotation_angle rotation,
3929 const union dc_tiling_info *tiling_info,
3930 const struct dc_plane_dcc_param *dcc,
3931 const struct dc_plane_address *address,
3932 const struct plane_size *plane_size)
7df7e505
NK
3933{
3934 struct dc *dc = adev->dm.dc;
8daa1218
NC
3935 struct dc_dcc_surface_param input;
3936 struct dc_surface_dcc_cap output;
7df7e505 3937
8daa1218
NC
3938 memset(&input, 0, sizeof(input));
3939 memset(&output, 0, sizeof(output));
3940
a3241991 3941 if (!dcc->enable)
87b7ebc2
RS
3942 return 0;
3943
a3241991
BN
3944 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3945 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3946 return -EINVAL;
7df7e505 3947
695af5f9 3948 input.format = format;
12e2b2d4
DL
3949 input.surface_size.width = plane_size->surface_size.width;
3950 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3951 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3952
695af5f9 3953 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3954 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3955 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3956 input.scan = SCAN_DIRECTION_VERTICAL;
3957
3958 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3959 return -EINVAL;
7df7e505
NK
3960
3961 if (!output.capable)
09e5665a 3962 return -EINVAL;
7df7e505 3963
a3241991
BN
3964 if (dcc->independent_64b_blks == 0 &&
3965 output.grph.rgb.independent_64b_blks != 0)
09e5665a 3966 return -EINVAL;
7df7e505 3967
a3241991
BN
3968 return 0;
3969}
3970
37384b3f
BN
3971static bool
3972modifier_has_dcc(uint64_t modifier)
3973{
3974 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3975}
3976
3977static unsigned
3978modifier_gfx9_swizzle_mode(uint64_t modifier)
3979{
3980 if (modifier == DRM_FORMAT_MOD_LINEAR)
3981 return 0;
3982
3983 return AMD_FMT_MOD_GET(TILE, modifier);
3984}
3985
dfbbfe3c
BN
3986static const struct drm_format_info *
3987amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3988{
816853f9 3989 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
3990}
3991
37384b3f
BN
3992static void
3993fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3994 union dc_tiling_info *tiling_info,
3995 uint64_t modifier)
3996{
3997 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3998 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3999 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4000 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4001
4002 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4003
4004 if (!IS_AMD_FMT_MOD(modifier))
4005 return;
4006
4007 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4008 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4009
4010 if (adev->family >= AMDGPU_FAMILY_NV) {
4011 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4012 } else {
4013 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4014
4015 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4016 }
4017}
4018
faa37f54
BN
4019enum dm_micro_swizzle {
4020 MICRO_SWIZZLE_Z = 0,
4021 MICRO_SWIZZLE_S = 1,
4022 MICRO_SWIZZLE_D = 2,
4023 MICRO_SWIZZLE_R = 3
4024};
4025
4026static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4027 uint32_t format,
4028 uint64_t modifier)
4029{
4030 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4031 const struct drm_format_info *info = drm_format_info(format);
4032
4033 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4034
4035 if (!info)
4036 return false;
4037
4038 /*
4039 * We always have to allow this modifier, because core DRM still
4040 * checks LINEAR support if userspace does not provide modifers.
4041 */
4042 if (modifier == DRM_FORMAT_MOD_LINEAR)
4043 return true;
4044
4045 /*
4046 * The arbitrary tiling support for multiplane formats has not been hooked
4047 * up.
4048 */
4049 if (info->num_planes > 1)
4050 return false;
4051
4052 /*
4053 * For D swizzle the canonical modifier depends on the bpp, so check
4054 * it here.
4055 */
4056 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4057 adev->family >= AMDGPU_FAMILY_NV) {
4058 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4059 return false;
4060 }
4061
4062 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4063 info->cpp[0] < 8)
4064 return false;
4065
4066 if (modifier_has_dcc(modifier)) {
4067 /* Per radeonsi comments 16/64 bpp are more complicated. */
4068 if (info->cpp[0] != 4)
4069 return false;
4070 }
4071
4072 return true;
4073}
4074
4075static void
4076add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4077{
4078 if (!*mods)
4079 return;
4080
4081 if (*cap - *size < 1) {
4082 uint64_t new_cap = *cap * 2;
4083 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4084
4085 if (!new_mods) {
4086 kfree(*mods);
4087 *mods = NULL;
4088 return;
4089 }
4090
4091 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4092 kfree(*mods);
4093 *mods = new_mods;
4094 *cap = new_cap;
4095 }
4096
4097 (*mods)[*size] = mod;
4098 *size += 1;
4099}
4100
4101static void
4102add_gfx9_modifiers(const struct amdgpu_device *adev,
4103 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4104{
4105 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4106 int pipe_xor_bits = min(8, pipes +
4107 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4108 int bank_xor_bits = min(8 - pipe_xor_bits,
4109 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4110 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4111 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4112
4113
4114 if (adev->family == AMDGPU_FAMILY_RV) {
4115 /* Raven2 and later */
4116 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4117
4118 /*
4119 * No _D DCC swizzles yet because we only allow 32bpp, which
4120 * doesn't support _D on DCN
4121 */
4122
4123 if (has_constant_encode) {
4124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129 AMD_FMT_MOD_SET(DCC, 1) |
4130 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4131 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4132 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4133 }
4134
4135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4138 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4139 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4140 AMD_FMT_MOD_SET(DCC, 1) |
4141 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4142 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4143 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4144
4145 if (has_constant_encode) {
4146 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4147 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4148 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4149 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4150 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4151 AMD_FMT_MOD_SET(DCC, 1) |
4152 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4153 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4154 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4155
4156 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4157 AMD_FMT_MOD_SET(RB, rb) |
4158 AMD_FMT_MOD_SET(PIPE, pipes));
4159 }
4160
4161 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4163 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4164 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4165 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4166 AMD_FMT_MOD_SET(DCC, 1) |
4167 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4168 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4169 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4170 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4171 AMD_FMT_MOD_SET(RB, rb) |
4172 AMD_FMT_MOD_SET(PIPE, pipes));
4173 }
4174
4175 /*
4176 * Only supported for 64bpp on Raven, will be filtered on format in
4177 * dm_plane_format_mod_supported.
4178 */
4179 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4180 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4181 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4182 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4183 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4184
4185 if (adev->family == AMDGPU_FAMILY_RV) {
4186 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4188 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4189 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4191 }
4192
4193 /*
4194 * Only supported for 64bpp on Raven, will be filtered on format in
4195 * dm_plane_format_mod_supported.
4196 */
4197 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4199 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4200
4201 if (adev->family == AMDGPU_FAMILY_RV) {
4202 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4203 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4204 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4205 }
4206}
4207
4208static void
4209add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4210 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4211{
4212 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4213
4214 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4215 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4216 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4217 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4218 AMD_FMT_MOD_SET(DCC, 1) |
4219 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4220 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4221 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4222
4223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4226 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227 AMD_FMT_MOD_SET(DCC, 1) |
4228 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4229 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4230 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4231 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4232
4233 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4234 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4235 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4236 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4237
4238 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4240 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4241 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4242
4243
4244 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4245 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4246 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4247 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4248
4249 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4251 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4252}
4253
4254static void
4255add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4256 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4257{
4258 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4259 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4260
4261 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4262 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4263 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4264 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4265 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4266 AMD_FMT_MOD_SET(DCC, 1) |
4267 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4268 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4269 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4270 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4271
4272 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4273 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4274 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4275 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4276 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4277 AMD_FMT_MOD_SET(DCC, 1) |
4278 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4279 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4280 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4281 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4282 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4283
4284 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4285 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4286 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4287 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4288 AMD_FMT_MOD_SET(PACKERS, pkrs));
4289
4290 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4292 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294 AMD_FMT_MOD_SET(PACKERS, pkrs));
4295
4296 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4297 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4298 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4299 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4300
4301 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4303 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4304}
4305
4306static int
4307get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4308{
4309 uint64_t size = 0, capacity = 128;
4310 *mods = NULL;
4311
4312 /* We have not hooked up any pre-GFX9 modifiers. */
4313 if (adev->family < AMDGPU_FAMILY_AI)
4314 return 0;
4315
4316 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4317
4318 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4319 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4320 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4321 return *mods ? 0 : -ENOMEM;
4322 }
4323
4324 switch (adev->family) {
4325 case AMDGPU_FAMILY_AI:
4326 case AMDGPU_FAMILY_RV:
4327 add_gfx9_modifiers(adev, mods, &size, &capacity);
4328 break;
4329 case AMDGPU_FAMILY_NV:
4330 case AMDGPU_FAMILY_VGH:
4331 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4332 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4333 else
4334 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4335 break;
4336 }
4337
4338 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4339
4340 /* INVALID marks the end of the list. */
4341 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4342
4343 if (!*mods)
4344 return -ENOMEM;
4345
4346 return 0;
4347}
4348
37384b3f
BN
4349static int
4350fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4351 const struct amdgpu_framebuffer *afb,
4352 const enum surface_pixel_format format,
4353 const enum dc_rotation_angle rotation,
4354 const struct plane_size *plane_size,
4355 union dc_tiling_info *tiling_info,
4356 struct dc_plane_dcc_param *dcc,
4357 struct dc_plane_address *address,
4358 const bool force_disable_dcc)
4359{
4360 const uint64_t modifier = afb->base.modifier;
4361 int ret;
4362
4363 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4364 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4365
4366 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4367 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4368
4369 dcc->enable = 1;
4370 dcc->meta_pitch = afb->base.pitches[1];
4371 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4372
4373 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4374 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4375 }
4376
4377 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4378 if (ret)
4379 return ret;
7df7e505 4380
09e5665a
NK
4381 return 0;
4382}
4383
4384static int
320932bf 4385fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4386 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4387 const enum surface_pixel_format format,
4388 const enum dc_rotation_angle rotation,
4389 const uint64_t tiling_flags,
09e5665a 4390 union dc_tiling_info *tiling_info,
12e2b2d4 4391 struct plane_size *plane_size,
09e5665a 4392 struct dc_plane_dcc_param *dcc,
87b7ebc2 4393 struct dc_plane_address *address,
5888f07a 4394 bool tmz_surface,
87b7ebc2 4395 bool force_disable_dcc)
09e5665a 4396{
320932bf 4397 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4398 int ret;
4399
4400 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4401 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4402 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4403 memset(address, 0, sizeof(*address));
4404
5888f07a
HW
4405 address->tmz_surface = tmz_surface;
4406
695af5f9 4407 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4408 uint64_t addr = afb->address + fb->offsets[0];
4409
12e2b2d4
DL
4410 plane_size->surface_size.x = 0;
4411 plane_size->surface_size.y = 0;
4412 plane_size->surface_size.width = fb->width;
4413 plane_size->surface_size.height = fb->height;
4414 plane_size->surface_pitch =
320932bf
NK
4415 fb->pitches[0] / fb->format->cpp[0];
4416
e0634e8d 4417 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4418 address->grph.addr.low_part = lower_32_bits(addr);
4419 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4420 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4421 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4422 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4423
12e2b2d4
DL
4424 plane_size->surface_size.x = 0;
4425 plane_size->surface_size.y = 0;
4426 plane_size->surface_size.width = fb->width;
4427 plane_size->surface_size.height = fb->height;
4428 plane_size->surface_pitch =
320932bf
NK
4429 fb->pitches[0] / fb->format->cpp[0];
4430
12e2b2d4
DL
4431 plane_size->chroma_size.x = 0;
4432 plane_size->chroma_size.y = 0;
320932bf 4433 /* TODO: set these based on surface format */
12e2b2d4
DL
4434 plane_size->chroma_size.width = fb->width / 2;
4435 plane_size->chroma_size.height = fb->height / 2;
320932bf 4436
12e2b2d4 4437 plane_size->chroma_pitch =
320932bf
NK
4438 fb->pitches[1] / fb->format->cpp[1];
4439
e0634e8d
NK
4440 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4441 address->video_progressive.luma_addr.low_part =
be7b9b32 4442 lower_32_bits(luma_addr);
e0634e8d 4443 address->video_progressive.luma_addr.high_part =
be7b9b32 4444 upper_32_bits(luma_addr);
e0634e8d
NK
4445 address->video_progressive.chroma_addr.low_part =
4446 lower_32_bits(chroma_addr);
4447 address->video_progressive.chroma_addr.high_part =
4448 upper_32_bits(chroma_addr);
4449 }
09e5665a 4450
a3241991 4451 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4452 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4453 rotation, plane_size,
4454 tiling_info, dcc,
4455 address,
4456 force_disable_dcc);
09e5665a
NK
4457 if (ret)
4458 return ret;
a3241991
BN
4459 } else {
4460 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4461 }
4462
4463 return 0;
7df7e505
NK
4464}
4465
d74004b6 4466static void
695af5f9 4467fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4468 bool *per_pixel_alpha, bool *global_alpha,
4469 int *global_alpha_value)
4470{
4471 *per_pixel_alpha = false;
4472 *global_alpha = false;
4473 *global_alpha_value = 0xff;
4474
4475 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4476 return;
4477
4478 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4479 static const uint32_t alpha_formats[] = {
4480 DRM_FORMAT_ARGB8888,
4481 DRM_FORMAT_RGBA8888,
4482 DRM_FORMAT_ABGR8888,
4483 };
4484 uint32_t format = plane_state->fb->format->format;
4485 unsigned int i;
4486
4487 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4488 if (format == alpha_formats[i]) {
4489 *per_pixel_alpha = true;
4490 break;
4491 }
4492 }
4493 }
4494
4495 if (plane_state->alpha < 0xffff) {
4496 *global_alpha = true;
4497 *global_alpha_value = plane_state->alpha >> 8;
4498 }
4499}
4500
004fefa3
NK
4501static int
4502fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4503 const enum surface_pixel_format format,
004fefa3
NK
4504 enum dc_color_space *color_space)
4505{
4506 bool full_range;
4507
4508 *color_space = COLOR_SPACE_SRGB;
4509
4510 /* DRM color properties only affect non-RGB formats. */
695af5f9 4511 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4512 return 0;
4513
4514 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4515
4516 switch (plane_state->color_encoding) {
4517 case DRM_COLOR_YCBCR_BT601:
4518 if (full_range)
4519 *color_space = COLOR_SPACE_YCBCR601;
4520 else
4521 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4522 break;
4523
4524 case DRM_COLOR_YCBCR_BT709:
4525 if (full_range)
4526 *color_space = COLOR_SPACE_YCBCR709;
4527 else
4528 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4529 break;
4530
4531 case DRM_COLOR_YCBCR_BT2020:
4532 if (full_range)
4533 *color_space = COLOR_SPACE_2020_YCBCR;
4534 else
4535 return -EINVAL;
4536 break;
4537
4538 default:
4539 return -EINVAL;
4540 }
4541
4542 return 0;
4543}
4544
695af5f9
NK
4545static int
4546fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4547 const struct drm_plane_state *plane_state,
4548 const uint64_t tiling_flags,
4549 struct dc_plane_info *plane_info,
87b7ebc2 4550 struct dc_plane_address *address,
5888f07a 4551 bool tmz_surface,
87b7ebc2 4552 bool force_disable_dcc)
695af5f9
NK
4553{
4554 const struct drm_framebuffer *fb = plane_state->fb;
4555 const struct amdgpu_framebuffer *afb =
4556 to_amdgpu_framebuffer(plane_state->fb);
4557 struct drm_format_name_buf format_name;
4558 int ret;
4559
4560 memset(plane_info, 0, sizeof(*plane_info));
4561
4562 switch (fb->format->format) {
4563 case DRM_FORMAT_C8:
4564 plane_info->format =
4565 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4566 break;
4567 case DRM_FORMAT_RGB565:
4568 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4569 break;
4570 case DRM_FORMAT_XRGB8888:
4571 case DRM_FORMAT_ARGB8888:
4572 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4573 break;
4574 case DRM_FORMAT_XRGB2101010:
4575 case DRM_FORMAT_ARGB2101010:
4576 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4577 break;
4578 case DRM_FORMAT_XBGR2101010:
4579 case DRM_FORMAT_ABGR2101010:
4580 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4581 break;
4582 case DRM_FORMAT_XBGR8888:
4583 case DRM_FORMAT_ABGR8888:
4584 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4585 break;
4586 case DRM_FORMAT_NV21:
4587 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4588 break;
4589 case DRM_FORMAT_NV12:
4590 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4591 break;
cbec6477
SW
4592 case DRM_FORMAT_P010:
4593 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4594 break;
492548dc
SW
4595 case DRM_FORMAT_XRGB16161616F:
4596 case DRM_FORMAT_ARGB16161616F:
4597 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4598 break;
2a5195dc
MK
4599 case DRM_FORMAT_XBGR16161616F:
4600 case DRM_FORMAT_ABGR16161616F:
4601 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4602 break;
695af5f9
NK
4603 default:
4604 DRM_ERROR(
4605 "Unsupported screen format %s\n",
4606 drm_get_format_name(fb->format->format, &format_name));
4607 return -EINVAL;
4608 }
4609
4610 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4611 case DRM_MODE_ROTATE_0:
4612 plane_info->rotation = ROTATION_ANGLE_0;
4613 break;
4614 case DRM_MODE_ROTATE_90:
4615 plane_info->rotation = ROTATION_ANGLE_90;
4616 break;
4617 case DRM_MODE_ROTATE_180:
4618 plane_info->rotation = ROTATION_ANGLE_180;
4619 break;
4620 case DRM_MODE_ROTATE_270:
4621 plane_info->rotation = ROTATION_ANGLE_270;
4622 break;
4623 default:
4624 plane_info->rotation = ROTATION_ANGLE_0;
4625 break;
4626 }
4627
4628 plane_info->visible = true;
4629 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4630
6d83a32d
MS
4631 plane_info->layer_index = 0;
4632
695af5f9
NK
4633 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4634 &plane_info->color_space);
4635 if (ret)
4636 return ret;
4637
4638 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4639 plane_info->rotation, tiling_flags,
4640 &plane_info->tiling_info,
4641 &plane_info->plane_size,
5888f07a 4642 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4643 force_disable_dcc);
695af5f9
NK
4644 if (ret)
4645 return ret;
4646
4647 fill_blending_from_plane_state(
4648 plane_state, &plane_info->per_pixel_alpha,
4649 &plane_info->global_alpha, &plane_info->global_alpha_value);
4650
4651 return 0;
4652}
4653
4654static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4655 struct dc_plane_state *dc_plane_state,
4656 struct drm_plane_state *plane_state,
4657 struct drm_crtc_state *crtc_state)
e7b07cee 4658{
cf020d49 4659 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4660 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4661 struct dc_scaling_info scaling_info;
4662 struct dc_plane_info plane_info;
695af5f9 4663 int ret;
87b7ebc2 4664 bool force_disable_dcc = false;
e7b07cee 4665
695af5f9
NK
4666 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4667 if (ret)
4668 return ret;
e7b07cee 4669
695af5f9
NK
4670 dc_plane_state->src_rect = scaling_info.src_rect;
4671 dc_plane_state->dst_rect = scaling_info.dst_rect;
4672 dc_plane_state->clip_rect = scaling_info.clip_rect;
4673 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4674
87b7ebc2 4675 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4676 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4677 afb->tiling_flags,
695af5f9 4678 &plane_info,
87b7ebc2 4679 &dc_plane_state->address,
6eed95b0 4680 afb->tmz_surface,
87b7ebc2 4681 force_disable_dcc);
004fefa3
NK
4682 if (ret)
4683 return ret;
4684
695af5f9
NK
4685 dc_plane_state->format = plane_info.format;
4686 dc_plane_state->color_space = plane_info.color_space;
4687 dc_plane_state->format = plane_info.format;
4688 dc_plane_state->plane_size = plane_info.plane_size;
4689 dc_plane_state->rotation = plane_info.rotation;
4690 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4691 dc_plane_state->stereo_format = plane_info.stereo_format;
4692 dc_plane_state->tiling_info = plane_info.tiling_info;
4693 dc_plane_state->visible = plane_info.visible;
4694 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4695 dc_plane_state->global_alpha = plane_info.global_alpha;
4696 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4697 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4698 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4699
e277adc5
LSL
4700 /*
4701 * Always set input transfer function, since plane state is refreshed
4702 * every time.
4703 */
cf020d49
NK
4704 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4705 if (ret)
4706 return ret;
e7b07cee 4707
cf020d49 4708 return 0;
e7b07cee
HW
4709}
4710
3ee6b26b
AD
4711static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4712 const struct dm_connector_state *dm_state,
4713 struct dc_stream_state *stream)
e7b07cee
HW
4714{
4715 enum amdgpu_rmx_type rmx_type;
4716
4717 struct rect src = { 0 }; /* viewport in composition space*/
4718 struct rect dst = { 0 }; /* stream addressable area */
4719
4720 /* no mode. nothing to be done */
4721 if (!mode)
4722 return;
4723
4724 /* Full screen scaling by default */
4725 src.width = mode->hdisplay;
4726 src.height = mode->vdisplay;
4727 dst.width = stream->timing.h_addressable;
4728 dst.height = stream->timing.v_addressable;
4729
f4791779
HW
4730 if (dm_state) {
4731 rmx_type = dm_state->scaling;
4732 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4733 if (src.width * dst.height <
4734 src.height * dst.width) {
4735 /* height needs less upscaling/more downscaling */
4736 dst.width = src.width *
4737 dst.height / src.height;
4738 } else {
4739 /* width needs less upscaling/more downscaling */
4740 dst.height = src.height *
4741 dst.width / src.width;
4742 }
4743 } else if (rmx_type == RMX_CENTER) {
4744 dst = src;
e7b07cee 4745 }
e7b07cee 4746
f4791779
HW
4747 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4748 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4749
f4791779
HW
4750 if (dm_state->underscan_enable) {
4751 dst.x += dm_state->underscan_hborder / 2;
4752 dst.y += dm_state->underscan_vborder / 2;
4753 dst.width -= dm_state->underscan_hborder;
4754 dst.height -= dm_state->underscan_vborder;
4755 }
e7b07cee
HW
4756 }
4757
4758 stream->src = src;
4759 stream->dst = dst;
4760
f1ad2f5e 4761 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4762 dst.x, dst.y, dst.width, dst.height);
4763
4764}
4765
3ee6b26b 4766static enum dc_color_depth
42ba01fc 4767convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4768 bool is_y420, int requested_bpc)
e7b07cee 4769{
1bc22f20 4770 uint8_t bpc;
01c22997 4771
1bc22f20
SW
4772 if (is_y420) {
4773 bpc = 8;
4774
4775 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4776 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4777 bpc = 16;
4778 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4779 bpc = 12;
4780 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4781 bpc = 10;
4782 } else {
4783 bpc = (uint8_t)connector->display_info.bpc;
4784 /* Assume 8 bpc by default if no bpc is specified. */
4785 bpc = bpc ? bpc : 8;
4786 }
e7b07cee 4787
cbd14ae7 4788 if (requested_bpc > 0) {
01c22997
NK
4789 /*
4790 * Cap display bpc based on the user requested value.
4791 *
4792 * The value for state->max_bpc may not correctly updated
4793 * depending on when the connector gets added to the state
4794 * or if this was called outside of atomic check, so it
4795 * can't be used directly.
4796 */
cbd14ae7 4797 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4798
1825fd34
NK
4799 /* Round down to the nearest even number. */
4800 bpc = bpc - (bpc & 1);
4801 }
07e3a1cf 4802
e7b07cee
HW
4803 switch (bpc) {
4804 case 0:
1f6010a9
DF
4805 /*
4806 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4807 * EDID revision before 1.4
4808 * TODO: Fix edid parsing
4809 */
4810 return COLOR_DEPTH_888;
4811 case 6:
4812 return COLOR_DEPTH_666;
4813 case 8:
4814 return COLOR_DEPTH_888;
4815 case 10:
4816 return COLOR_DEPTH_101010;
4817 case 12:
4818 return COLOR_DEPTH_121212;
4819 case 14:
4820 return COLOR_DEPTH_141414;
4821 case 16:
4822 return COLOR_DEPTH_161616;
4823 default:
4824 return COLOR_DEPTH_UNDEFINED;
4825 }
4826}
4827
3ee6b26b
AD
4828static enum dc_aspect_ratio
4829get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4830{
e11d4147
LSL
4831 /* 1-1 mapping, since both enums follow the HDMI spec. */
4832 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4833}
4834
3ee6b26b
AD
4835static enum dc_color_space
4836get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4837{
4838 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4839
4840 switch (dc_crtc_timing->pixel_encoding) {
4841 case PIXEL_ENCODING_YCBCR422:
4842 case PIXEL_ENCODING_YCBCR444:
4843 case PIXEL_ENCODING_YCBCR420:
4844 {
4845 /*
4846 * 27030khz is the separation point between HDTV and SDTV
4847 * according to HDMI spec, we use YCbCr709 and YCbCr601
4848 * respectively
4849 */
380604e2 4850 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4851 if (dc_crtc_timing->flags.Y_ONLY)
4852 color_space =
4853 COLOR_SPACE_YCBCR709_LIMITED;
4854 else
4855 color_space = COLOR_SPACE_YCBCR709;
4856 } else {
4857 if (dc_crtc_timing->flags.Y_ONLY)
4858 color_space =
4859 COLOR_SPACE_YCBCR601_LIMITED;
4860 else
4861 color_space = COLOR_SPACE_YCBCR601;
4862 }
4863
4864 }
4865 break;
4866 case PIXEL_ENCODING_RGB:
4867 color_space = COLOR_SPACE_SRGB;
4868 break;
4869
4870 default:
4871 WARN_ON(1);
4872 break;
4873 }
4874
4875 return color_space;
4876}
4877
ea117312
TA
4878static bool adjust_colour_depth_from_display_info(
4879 struct dc_crtc_timing *timing_out,
4880 const struct drm_display_info *info)
400443e8 4881{
ea117312 4882 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4883 int normalized_clk;
400443e8 4884 do {
380604e2 4885 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4886 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4887 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4888 normalized_clk /= 2;
4889 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4890 switch (depth) {
4891 case COLOR_DEPTH_888:
4892 break;
400443e8
ML
4893 case COLOR_DEPTH_101010:
4894 normalized_clk = (normalized_clk * 30) / 24;
4895 break;
4896 case COLOR_DEPTH_121212:
4897 normalized_clk = (normalized_clk * 36) / 24;
4898 break;
4899 case COLOR_DEPTH_161616:
4900 normalized_clk = (normalized_clk * 48) / 24;
4901 break;
4902 default:
ea117312
TA
4903 /* The above depths are the only ones valid for HDMI. */
4904 return false;
400443e8 4905 }
ea117312
TA
4906 if (normalized_clk <= info->max_tmds_clock) {
4907 timing_out->display_color_depth = depth;
4908 return true;
4909 }
4910 } while (--depth > COLOR_DEPTH_666);
4911 return false;
400443e8 4912}
e7b07cee 4913
42ba01fc
NK
4914static void fill_stream_properties_from_drm_display_mode(
4915 struct dc_stream_state *stream,
4916 const struct drm_display_mode *mode_in,
4917 const struct drm_connector *connector,
4918 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4919 const struct dc_stream_state *old_stream,
4920 int requested_bpc)
e7b07cee
HW
4921{
4922 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4923 const struct drm_display_info *info = &connector->display_info;
d4252eee 4924 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4925 struct hdmi_vendor_infoframe hv_frame;
4926 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4927
acf83f86
WL
4928 memset(&hv_frame, 0, sizeof(hv_frame));
4929 memset(&avi_frame, 0, sizeof(avi_frame));
4930
e7b07cee
HW
4931 timing_out->h_border_left = 0;
4932 timing_out->h_border_right = 0;
4933 timing_out->v_border_top = 0;
4934 timing_out->v_border_bottom = 0;
4935 /* TODO: un-hardcode */
fe61a2f1 4936 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4937 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4938 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4939 else if (drm_mode_is_420_also(info, mode_in)
4940 && aconnector->force_yuv420_output)
4941 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4942 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4943 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4944 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4945 else
4946 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4947
4948 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4949 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4950 connector,
4951 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4952 requested_bpc);
e7b07cee
HW
4953 timing_out->scan_type = SCANNING_TYPE_NODATA;
4954 timing_out->hdmi_vic = 0;
b333730d
BL
4955
4956 if(old_stream) {
4957 timing_out->vic = old_stream->timing.vic;
4958 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4959 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4960 } else {
4961 timing_out->vic = drm_match_cea_mode(mode_in);
4962 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4963 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4964 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4965 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4966 }
e7b07cee 4967
1cb1d477
WL
4968 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4969 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4970 timing_out->vic = avi_frame.video_code;
4971 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4972 timing_out->hdmi_vic = hv_frame.vic;
4973 }
4974
e7b07cee
HW
4975 timing_out->h_addressable = mode_in->crtc_hdisplay;
4976 timing_out->h_total = mode_in->crtc_htotal;
4977 timing_out->h_sync_width =
4978 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4979 timing_out->h_front_porch =
4980 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4981 timing_out->v_total = mode_in->crtc_vtotal;
4982 timing_out->v_addressable = mode_in->crtc_vdisplay;
4983 timing_out->v_front_porch =
4984 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4985 timing_out->v_sync_width =
4986 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4987 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4988 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4989
4990 stream->output_color_space = get_output_color_space(timing_out);
4991
e43a432c
AK
4992 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4993 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4994 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4995 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4996 drm_mode_is_420_also(info, mode_in) &&
4997 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4998 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4999 adjust_colour_depth_from_display_info(timing_out, info);
5000 }
5001 }
e7b07cee
HW
5002}
5003
3ee6b26b
AD
5004static void fill_audio_info(struct audio_info *audio_info,
5005 const struct drm_connector *drm_connector,
5006 const struct dc_sink *dc_sink)
e7b07cee
HW
5007{
5008 int i = 0;
5009 int cea_revision = 0;
5010 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5011
5012 audio_info->manufacture_id = edid_caps->manufacturer_id;
5013 audio_info->product_id = edid_caps->product_id;
5014
5015 cea_revision = drm_connector->display_info.cea_rev;
5016
090afc1e 5017 strscpy(audio_info->display_name,
d2b2562c 5018 edid_caps->display_name,
090afc1e 5019 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5020
b830ebc9 5021 if (cea_revision >= 3) {
e7b07cee
HW
5022 audio_info->mode_count = edid_caps->audio_mode_count;
5023
5024 for (i = 0; i < audio_info->mode_count; ++i) {
5025 audio_info->modes[i].format_code =
5026 (enum audio_format_code)
5027 (edid_caps->audio_modes[i].format_code);
5028 audio_info->modes[i].channel_count =
5029 edid_caps->audio_modes[i].channel_count;
5030 audio_info->modes[i].sample_rates.all =
5031 edid_caps->audio_modes[i].sample_rate;
5032 audio_info->modes[i].sample_size =
5033 edid_caps->audio_modes[i].sample_size;
5034 }
5035 }
5036
5037 audio_info->flags.all = edid_caps->speaker_flags;
5038
5039 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5040 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5041 audio_info->video_latency = drm_connector->video_latency[0];
5042 audio_info->audio_latency = drm_connector->audio_latency[0];
5043 }
5044
5045 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5046
5047}
5048
3ee6b26b
AD
5049static void
5050copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5051 struct drm_display_mode *dst_mode)
e7b07cee
HW
5052{
5053 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5054 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5055 dst_mode->crtc_clock = src_mode->crtc_clock;
5056 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5057 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5058 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5059 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5060 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5061 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5062 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5063 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5064 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5065 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5066 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5067}
5068
3ee6b26b
AD
5069static void
5070decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5071 const struct drm_display_mode *native_mode,
5072 bool scale_enabled)
e7b07cee
HW
5073{
5074 if (scale_enabled) {
5075 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5076 } else if (native_mode->clock == drm_mode->clock &&
5077 native_mode->htotal == drm_mode->htotal &&
5078 native_mode->vtotal == drm_mode->vtotal) {
5079 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5080 } else {
5081 /* no scaling nor amdgpu inserted, no need to patch */
5082 }
5083}
5084
aed15309
ML
5085static struct dc_sink *
5086create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5087{
2e0ac3d6 5088 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5089 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5090 sink_init_data.link = aconnector->dc_link;
5091 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5092
5093 sink = dc_sink_create(&sink_init_data);
423788c7 5094 if (!sink) {
2e0ac3d6 5095 DRM_ERROR("Failed to create sink!\n");
aed15309 5096 return NULL;
423788c7 5097 }
2e0ac3d6 5098 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5099
aed15309 5100 return sink;
2e0ac3d6
HW
5101}
5102
fa2123db
ML
5103static void set_multisync_trigger_params(
5104 struct dc_stream_state *stream)
5105{
5106 if (stream->triggered_crtc_reset.enabled) {
5107 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5108 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5109 }
5110}
5111
5112static void set_master_stream(struct dc_stream_state *stream_set[],
5113 int stream_count)
5114{
5115 int j, highest_rfr = 0, master_stream = 0;
5116
5117 for (j = 0; j < stream_count; j++) {
5118 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5119 int refresh_rate = 0;
5120
380604e2 5121 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5122 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5123 if (refresh_rate > highest_rfr) {
5124 highest_rfr = refresh_rate;
5125 master_stream = j;
5126 }
5127 }
5128 }
5129 for (j = 0; j < stream_count; j++) {
03736f4c 5130 if (stream_set[j])
fa2123db
ML
5131 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5132 }
5133}
5134
5135static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5136{
5137 int i = 0;
5138
5139 if (context->stream_count < 2)
5140 return;
5141 for (i = 0; i < context->stream_count ; i++) {
5142 if (!context->streams[i])
5143 continue;
1f6010a9
DF
5144 /*
5145 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5146 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5147 * For now it's set to false
fa2123db
ML
5148 */
5149 set_multisync_trigger_params(context->streams[i]);
5150 }
5151 set_master_stream(context->streams, context->stream_count);
5152}
5153
3ee6b26b
AD
5154static struct dc_stream_state *
5155create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5156 const struct drm_display_mode *drm_mode,
b333730d 5157 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5158 const struct dc_stream_state *old_stream,
5159 int requested_bpc)
e7b07cee
HW
5160{
5161 struct drm_display_mode *preferred_mode = NULL;
391ef035 5162 struct drm_connector *drm_connector;
42ba01fc
NK
5163 const struct drm_connector_state *con_state =
5164 dm_state ? &dm_state->base : NULL;
0971c40e 5165 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5166 struct drm_display_mode mode = *drm_mode;
5167 bool native_mode_found = false;
b333730d
BL
5168 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5169 int mode_refresh;
58124bf8 5170 int preferred_refresh = 0;
defeb878 5171#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5172 struct dsc_dec_dpcd_caps dsc_caps;
5173 uint32_t link_bandwidth_kbps;
7c431455 5174#endif
aed15309 5175 struct dc_sink *sink = NULL;
b830ebc9 5176 if (aconnector == NULL) {
e7b07cee 5177 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5178 return stream;
e7b07cee
HW
5179 }
5180
e7b07cee 5181 drm_connector = &aconnector->base;
2e0ac3d6 5182
f4ac176e 5183 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5184 sink = create_fake_sink(aconnector);
5185 if (!sink)
5186 return stream;
aed15309
ML
5187 } else {
5188 sink = aconnector->dc_sink;
dcd5fb82 5189 dc_sink_retain(sink);
f4ac176e 5190 }
2e0ac3d6 5191
aed15309 5192 stream = dc_create_stream_for_sink(sink);
4562236b 5193
b830ebc9 5194 if (stream == NULL) {
e7b07cee 5195 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5196 goto finish;
e7b07cee
HW
5197 }
5198
ceb3dbb4
JL
5199 stream->dm_stream_context = aconnector;
5200
4a36fcba
WL
5201 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5202 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5203
e7b07cee
HW
5204 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5205 /* Search for preferred mode */
5206 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5207 native_mode_found = true;
5208 break;
5209 }
5210 }
5211 if (!native_mode_found)
5212 preferred_mode = list_first_entry_or_null(
5213 &aconnector->base.modes,
5214 struct drm_display_mode,
5215 head);
5216
b333730d
BL
5217 mode_refresh = drm_mode_vrefresh(&mode);
5218
b830ebc9 5219 if (preferred_mode == NULL) {
1f6010a9
DF
5220 /*
5221 * This may not be an error, the use case is when we have no
e7b07cee
HW
5222 * usermode calls to reset and set mode upon hotplug. In this
5223 * case, we call set mode ourselves to restore the previous mode
5224 * and the modelist may not be filled in in time.
5225 */
f1ad2f5e 5226 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
5227 } else {
5228 decide_crtc_timing_for_drm_display_mode(
5229 &mode, preferred_mode,
f4791779 5230 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5231 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5232 }
5233
f783577c
JFZ
5234 if (!dm_state)
5235 drm_mode_set_crtcinfo(&mode, 0);
5236
b333730d
BL
5237 /*
5238 * If scaling is enabled and refresh rate didn't change
5239 * we copy the vic and polarities of the old timings
5240 */
5241 if (!scale || mode_refresh != preferred_refresh)
5242 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5243 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
5244 else
5245 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5246 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5247
df2f1015
DF
5248 stream->timing.flags.DSC = 0;
5249
5250 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5251#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5252 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5253 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5254 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5255 &dsc_caps);
5256 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5257 dc_link_get_link_cap(aconnector->dc_link));
5258
0749ddeb 5259 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5260 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5261 dc_dsc_policy_set_enable_dsc_when_not_needed(
5262 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5263
0417df16 5264 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5265 &dsc_caps,
0417df16 5266 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5267 0,
df2f1015
DF
5268 link_bandwidth_kbps,
5269 &stream->timing,
5270 &stream->timing.dsc_cfg))
5271 stream->timing.flags.DSC = 1;
27e84dd7 5272 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5273 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5274 stream->timing.flags.DSC = 1;
734e4c97 5275
28b2f656
EB
5276 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5277 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5278
28b2f656
EB
5279 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5280 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5281
5282 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5283 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5284 }
39a4eb85 5285#endif
df2f1015 5286 }
39a4eb85 5287
e7b07cee
HW
5288 update_stream_scaling_settings(&mode, dm_state, stream);
5289
5290 fill_audio_info(
5291 &stream->audio_info,
5292 drm_connector,
aed15309 5293 sink);
e7b07cee 5294
ceb3dbb4 5295 update_stream_signal(stream, sink);
9182b4cb 5296
d832fc3b 5297 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5298 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5299
8a488f5d
RL
5300 if (stream->link->psr_settings.psr_feature_enabled) {
5301 //
5302 // should decide stream support vsc sdp colorimetry capability
5303 // before building vsc info packet
5304 //
5305 stream->use_vsc_sdp_for_colorimetry = false;
5306 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5307 stream->use_vsc_sdp_for_colorimetry =
5308 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5309 } else {
5310 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5311 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5312 }
8a488f5d 5313 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5314 }
aed15309 5315finish:
dcd5fb82 5316 dc_sink_release(sink);
9e3efe3e 5317
e7b07cee
HW
5318 return stream;
5319}
5320
7578ecda 5321static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5322{
5323 drm_crtc_cleanup(crtc);
5324 kfree(crtc);
5325}
5326
5327static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5328 struct drm_crtc_state *state)
e7b07cee
HW
5329{
5330 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5331
5332 /* TODO Destroy dc_stream objects are stream object is flattened */
5333 if (cur->stream)
5334 dc_stream_release(cur->stream);
5335
5336
5337 __drm_atomic_helper_crtc_destroy_state(state);
5338
5339
5340 kfree(state);
5341}
5342
5343static void dm_crtc_reset_state(struct drm_crtc *crtc)
5344{
5345 struct dm_crtc_state *state;
5346
5347 if (crtc->state)
5348 dm_crtc_destroy_state(crtc, crtc->state);
5349
5350 state = kzalloc(sizeof(*state), GFP_KERNEL);
5351 if (WARN_ON(!state))
5352 return;
5353
1f8a52ec 5354 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5355}
5356
5357static struct drm_crtc_state *
5358dm_crtc_duplicate_state(struct drm_crtc *crtc)
5359{
5360 struct dm_crtc_state *state, *cur;
5361
5362 cur = to_dm_crtc_state(crtc->state);
5363
5364 if (WARN_ON(!crtc->state))
5365 return NULL;
5366
2004f45e 5367 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5368 if (!state)
5369 return NULL;
e7b07cee
HW
5370
5371 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5372
5373 if (cur->stream) {
5374 state->stream = cur->stream;
5375 dc_stream_retain(state->stream);
5376 }
5377
d6ef9b41 5378 state->active_planes = cur->active_planes;
98e6436d 5379 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5380 state->abm_level = cur->abm_level;
bb47de73
NK
5381 state->vrr_supported = cur->vrr_supported;
5382 state->freesync_config = cur->freesync_config;
14b25846 5383 state->crc_src = cur->crc_src;
cf020d49
NK
5384 state->cm_has_degamma = cur->cm_has_degamma;
5385 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
c920888c
WL
5386#ifdef CONFIG_DEBUG_FS
5387 state->crc_window = cur->crc_window;
5388#endif
e7b07cee
HW
5389 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5390
5391 return &state->base;
5392}
5393
c920888c 5394#ifdef CONFIG_DEBUG_FS
8ccbfdf0 5395static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
c920888c
WL
5396 struct drm_crtc_state *crtc_state,
5397 struct drm_property *property,
5398 uint64_t val)
5399{
5400 struct drm_device *dev = crtc->dev;
5401 struct amdgpu_device *adev = drm_to_adev(dev);
5402 struct dm_crtc_state *dm_new_state =
5403 to_dm_crtc_state(crtc_state);
5404
5405 if (property == adev->dm.crc_win_x_start_property)
5406 dm_new_state->crc_window.x_start = val;
5407 else if (property == adev->dm.crc_win_y_start_property)
5408 dm_new_state->crc_window.y_start = val;
5409 else if (property == adev->dm.crc_win_x_end_property)
5410 dm_new_state->crc_window.x_end = val;
5411 else if (property == adev->dm.crc_win_y_end_property)
5412 dm_new_state->crc_window.y_end = val;
5413 else
5414 return -EINVAL;
5415
5416 return 0;
5417}
5418
8ccbfdf0 5419static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
c920888c
WL
5420 const struct drm_crtc_state *state,
5421 struct drm_property *property,
5422 uint64_t *val)
5423{
5424 struct drm_device *dev = crtc->dev;
5425 struct amdgpu_device *adev = drm_to_adev(dev);
5426 struct dm_crtc_state *dm_state =
5427 to_dm_crtc_state(state);
5428
5429 if (property == adev->dm.crc_win_x_start_property)
5430 *val = dm_state->crc_window.x_start;
5431 else if (property == adev->dm.crc_win_y_start_property)
5432 *val = dm_state->crc_window.y_start;
5433 else if (property == adev->dm.crc_win_x_end_property)
5434 *val = dm_state->crc_window.x_end;
5435 else if (property == adev->dm.crc_win_y_end_property)
5436 *val = dm_state->crc_window.y_end;
5437 else
5438 return -EINVAL;
5439
5440 return 0;
5441}
5442#endif
5443
d2574c33
MK
5444static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5445{
5446 enum dc_irq_source irq_source;
5447 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5448 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5449 int rc;
5450
5451 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5452
5453 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5454
5455 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5456 acrtc->crtc_id, enable ? "en" : "dis", rc);
5457 return rc;
5458}
589d2739
HW
5459
5460static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5461{
5462 enum dc_irq_source irq_source;
5463 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5464 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5465 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
71338cb4 5466 struct amdgpu_display_manager *dm = &adev->dm;
d2574c33
MK
5467 int rc = 0;
5468
5469 if (enable) {
5470 /* vblank irq on -> Only need vupdate irq in vrr mode */
5471 if (amdgpu_dm_vrr_active(acrtc_state))
5472 rc = dm_set_vupdate_irq(crtc, true);
5473 } else {
5474 /* vblank irq off -> vupdate irq off */
5475 rc = dm_set_vupdate_irq(crtc, false);
5476 }
5477
5478 if (rc)
5479 return rc;
589d2739
HW
5480
5481 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5482
5483 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5484 return -EBUSY;
5485
5486 mutex_lock(&dm->dc_lock);
5487
5488 if (enable)
5489 dm->active_vblank_irq_count++;
5490 else
5491 dm->active_vblank_irq_count--;
5492
4928b480 5493#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4
BL
5494 dc_allow_idle_optimizations(
5495 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5496
5497 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
4928b480 5498#endif
71338cb4
BL
5499
5500 mutex_unlock(&dm->dc_lock);
5501
5502 return 0;
589d2739
HW
5503}
5504
5505static int dm_enable_vblank(struct drm_crtc *crtc)
5506{
5507 return dm_set_vblank(crtc, true);
5508}
5509
5510static void dm_disable_vblank(struct drm_crtc *crtc)
5511{
5512 dm_set_vblank(crtc, false);
5513}
5514
e7b07cee
HW
5515/* Implemented only the options currently availible for the driver */
5516static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5517 .reset = dm_crtc_reset_state,
5518 .destroy = amdgpu_dm_crtc_destroy,
5519 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5520 .set_config = drm_atomic_helper_set_config,
5521 .page_flip = drm_atomic_helper_page_flip,
5522 .atomic_duplicate_state = dm_crtc_duplicate_state,
5523 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5524 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5525 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5526 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5527 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5528 .enable_vblank = dm_enable_vblank,
5529 .disable_vblank = dm_disable_vblank,
e3eff4b5 5530 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
c920888c
WL
5531#ifdef CONFIG_DEBUG_FS
5532 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5533 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5534#endif
e7b07cee
HW
5535};
5536
5537static enum drm_connector_status
5538amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5539{
5540 bool connected;
c84dec2f 5541 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5542
1f6010a9
DF
5543 /*
5544 * Notes:
e7b07cee
HW
5545 * 1. This interface is NOT called in context of HPD irq.
5546 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5547 * makes it a bad place for *any* MST-related activity.
5548 */
e7b07cee 5549
8580d60b
HW
5550 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5551 !aconnector->fake_enable)
e7b07cee
HW
5552 connected = (aconnector->dc_sink != NULL);
5553 else
5554 connected = (aconnector->base.force == DRM_FORCE_ON);
5555
0f877894
OV
5556 update_subconnector_property(aconnector);
5557
e7b07cee
HW
5558 return (connected ? connector_status_connected :
5559 connector_status_disconnected);
5560}
5561
3ee6b26b
AD
5562int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5563 struct drm_connector_state *connector_state,
5564 struct drm_property *property,
5565 uint64_t val)
e7b07cee
HW
5566{
5567 struct drm_device *dev = connector->dev;
1348969a 5568 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5569 struct dm_connector_state *dm_old_state =
5570 to_dm_connector_state(connector->state);
5571 struct dm_connector_state *dm_new_state =
5572 to_dm_connector_state(connector_state);
5573
5574 int ret = -EINVAL;
5575
5576 if (property == dev->mode_config.scaling_mode_property) {
5577 enum amdgpu_rmx_type rmx_type;
5578
5579 switch (val) {
5580 case DRM_MODE_SCALE_CENTER:
5581 rmx_type = RMX_CENTER;
5582 break;
5583 case DRM_MODE_SCALE_ASPECT:
5584 rmx_type = RMX_ASPECT;
5585 break;
5586 case DRM_MODE_SCALE_FULLSCREEN:
5587 rmx_type = RMX_FULL;
5588 break;
5589 case DRM_MODE_SCALE_NONE:
5590 default:
5591 rmx_type = RMX_OFF;
5592 break;
5593 }
5594
5595 if (dm_old_state->scaling == rmx_type)
5596 return 0;
5597
5598 dm_new_state->scaling = rmx_type;
5599 ret = 0;
5600 } else if (property == adev->mode_info.underscan_hborder_property) {
5601 dm_new_state->underscan_hborder = val;
5602 ret = 0;
5603 } else if (property == adev->mode_info.underscan_vborder_property) {
5604 dm_new_state->underscan_vborder = val;
5605 ret = 0;
5606 } else if (property == adev->mode_info.underscan_property) {
5607 dm_new_state->underscan_enable = val;
5608 ret = 0;
c1ee92f9
DF
5609 } else if (property == adev->mode_info.abm_level_property) {
5610 dm_new_state->abm_level = val;
5611 ret = 0;
e7b07cee
HW
5612 }
5613
5614 return ret;
5615}
5616
3ee6b26b
AD
5617int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5618 const struct drm_connector_state *state,
5619 struct drm_property *property,
5620 uint64_t *val)
e7b07cee
HW
5621{
5622 struct drm_device *dev = connector->dev;
1348969a 5623 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5624 struct dm_connector_state *dm_state =
5625 to_dm_connector_state(state);
5626 int ret = -EINVAL;
5627
5628 if (property == dev->mode_config.scaling_mode_property) {
5629 switch (dm_state->scaling) {
5630 case RMX_CENTER:
5631 *val = DRM_MODE_SCALE_CENTER;
5632 break;
5633 case RMX_ASPECT:
5634 *val = DRM_MODE_SCALE_ASPECT;
5635 break;
5636 case RMX_FULL:
5637 *val = DRM_MODE_SCALE_FULLSCREEN;
5638 break;
5639 case RMX_OFF:
5640 default:
5641 *val = DRM_MODE_SCALE_NONE;
5642 break;
5643 }
5644 ret = 0;
5645 } else if (property == adev->mode_info.underscan_hborder_property) {
5646 *val = dm_state->underscan_hborder;
5647 ret = 0;
5648 } else if (property == adev->mode_info.underscan_vborder_property) {
5649 *val = dm_state->underscan_vborder;
5650 ret = 0;
5651 } else if (property == adev->mode_info.underscan_property) {
5652 *val = dm_state->underscan_enable;
5653 ret = 0;
c1ee92f9
DF
5654 } else if (property == adev->mode_info.abm_level_property) {
5655 *val = dm_state->abm_level;
5656 ret = 0;
e7b07cee 5657 }
c1ee92f9 5658
e7b07cee
HW
5659 return ret;
5660}
5661
526c654a
ED
5662static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5663{
5664 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5665
5666 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5667}
5668
7578ecda 5669static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5670{
c84dec2f 5671 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5672 const struct dc_link *link = aconnector->dc_link;
1348969a 5673 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5674 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5675
5dff80bd
AG
5676 /*
5677 * Call only if mst_mgr was iniitalized before since it's not done
5678 * for all connector types.
5679 */
5680 if (aconnector->mst_mgr.dev)
5681 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5682
e7b07cee
HW
5683#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5684 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5685
89fc8d4e 5686 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5687 link->type != dc_connection_none &&
5688 dm->backlight_dev) {
5689 backlight_device_unregister(dm->backlight_dev);
5690 dm->backlight_dev = NULL;
e7b07cee
HW
5691 }
5692#endif
dcd5fb82
MF
5693
5694 if (aconnector->dc_em_sink)
5695 dc_sink_release(aconnector->dc_em_sink);
5696 aconnector->dc_em_sink = NULL;
5697 if (aconnector->dc_sink)
5698 dc_sink_release(aconnector->dc_sink);
5699 aconnector->dc_sink = NULL;
5700
e86e8947 5701 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5702 drm_connector_unregister(connector);
5703 drm_connector_cleanup(connector);
526c654a
ED
5704 if (aconnector->i2c) {
5705 i2c_del_adapter(&aconnector->i2c->base);
5706 kfree(aconnector->i2c);
5707 }
7daec99f 5708 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5709
e7b07cee
HW
5710 kfree(connector);
5711}
5712
5713void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5714{
5715 struct dm_connector_state *state =
5716 to_dm_connector_state(connector->state);
5717
df099b9b
LSL
5718 if (connector->state)
5719 __drm_atomic_helper_connector_destroy_state(connector->state);
5720
e7b07cee
HW
5721 kfree(state);
5722
5723 state = kzalloc(sizeof(*state), GFP_KERNEL);
5724
5725 if (state) {
5726 state->scaling = RMX_OFF;
5727 state->underscan_enable = false;
5728 state->underscan_hborder = 0;
5729 state->underscan_vborder = 0;
01933ba4 5730 state->base.max_requested_bpc = 8;
3261e013
ML
5731 state->vcpi_slots = 0;
5732 state->pbn = 0;
c3e50f89
NK
5733 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5734 state->abm_level = amdgpu_dm_abm_level;
5735
df099b9b 5736 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5737 }
5738}
5739
3ee6b26b
AD
5740struct drm_connector_state *
5741amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5742{
5743 struct dm_connector_state *state =
5744 to_dm_connector_state(connector->state);
5745
5746 struct dm_connector_state *new_state =
5747 kmemdup(state, sizeof(*state), GFP_KERNEL);
5748
98e6436d
AK
5749 if (!new_state)
5750 return NULL;
e7b07cee 5751
98e6436d
AK
5752 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5753
5754 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5755 new_state->abm_level = state->abm_level;
922454c2
NK
5756 new_state->scaling = state->scaling;
5757 new_state->underscan_enable = state->underscan_enable;
5758 new_state->underscan_hborder = state->underscan_hborder;
5759 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5760 new_state->vcpi_slots = state->vcpi_slots;
5761 new_state->pbn = state->pbn;
98e6436d 5762 return &new_state->base;
e7b07cee
HW
5763}
5764
14f04fa4
AD
5765static int
5766amdgpu_dm_connector_late_register(struct drm_connector *connector)
5767{
5768 struct amdgpu_dm_connector *amdgpu_dm_connector =
5769 to_amdgpu_dm_connector(connector);
00a8037e 5770 int r;
14f04fa4 5771
00a8037e
AD
5772 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5773 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5774 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5775 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5776 if (r)
5777 return r;
5778 }
5779
5780#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5781 connector_debugfs_init(amdgpu_dm_connector);
5782#endif
5783
5784 return 0;
5785}
5786
e7b07cee
HW
5787static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5788 .reset = amdgpu_dm_connector_funcs_reset,
5789 .detect = amdgpu_dm_connector_detect,
5790 .fill_modes = drm_helper_probe_single_connector_modes,
5791 .destroy = amdgpu_dm_connector_destroy,
5792 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5793 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5794 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5795 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5796 .late_register = amdgpu_dm_connector_late_register,
526c654a 5797 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5798};
5799
e7b07cee
HW
5800static int get_modes(struct drm_connector *connector)
5801{
5802 return amdgpu_dm_connector_get_modes(connector);
5803}
5804
c84dec2f 5805static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5806{
5807 struct dc_sink_init_data init_params = {
5808 .link = aconnector->dc_link,
5809 .sink_signal = SIGNAL_TYPE_VIRTUAL
5810 };
70e8ffc5 5811 struct edid *edid;
e7b07cee 5812
a89ff457 5813 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5814 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5815 aconnector->base.name);
5816
5817 aconnector->base.force = DRM_FORCE_OFF;
5818 aconnector->base.override_edid = false;
5819 return;
5820 }
5821
70e8ffc5
HW
5822 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5823
e7b07cee
HW
5824 aconnector->edid = edid;
5825
5826 aconnector->dc_em_sink = dc_link_add_remote_sink(
5827 aconnector->dc_link,
5828 (uint8_t *)edid,
5829 (edid->extensions + 1) * EDID_LENGTH,
5830 &init_params);
5831
dcd5fb82 5832 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5833 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5834 aconnector->dc_link->local_sink :
5835 aconnector->dc_em_sink;
dcd5fb82
MF
5836 dc_sink_retain(aconnector->dc_sink);
5837 }
e7b07cee
HW
5838}
5839
c84dec2f 5840static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5841{
5842 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5843
1f6010a9
DF
5844 /*
5845 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5846 * Those settings have to be != 0 to get initial modeset
5847 */
5848 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5849 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5850 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5851 }
5852
5853
5854 aconnector->base.override_edid = true;
5855 create_eml_sink(aconnector);
5856}
5857
cbd14ae7
SW
5858static struct dc_stream_state *
5859create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5860 const struct drm_display_mode *drm_mode,
5861 const struct dm_connector_state *dm_state,
5862 const struct dc_stream_state *old_stream)
5863{
5864 struct drm_connector *connector = &aconnector->base;
1348969a 5865 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5866 struct dc_stream_state *stream;
4b7da34b
SW
5867 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5868 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5869 enum dc_status dc_result = DC_OK;
5870
5871 do {
5872 stream = create_stream_for_sink(aconnector, drm_mode,
5873 dm_state, old_stream,
5874 requested_bpc);
5875 if (stream == NULL) {
5876 DRM_ERROR("Failed to create stream for sink!\n");
5877 break;
5878 }
5879
5880 dc_result = dc_validate_stream(adev->dm.dc, stream);
5881
5882 if (dc_result != DC_OK) {
74a16675 5883 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5884 drm_mode->hdisplay,
5885 drm_mode->vdisplay,
5886 drm_mode->clock,
74a16675
RS
5887 dc_result,
5888 dc_status_to_str(dc_result));
cbd14ae7
SW
5889
5890 dc_stream_release(stream);
5891 stream = NULL;
5892 requested_bpc -= 2; /* lower bpc to retry validation */
5893 }
5894
5895 } while (stream == NULL && requested_bpc >= 6);
5896
5897 return stream;
5898}
5899
ba9ca088 5900enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5901 struct drm_display_mode *mode)
e7b07cee
HW
5902{
5903 int result = MODE_ERROR;
5904 struct dc_sink *dc_sink;
e7b07cee 5905 /* TODO: Unhardcode stream count */
0971c40e 5906 struct dc_stream_state *stream;
c84dec2f 5907 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5908
5909 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5910 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5911 return result;
5912
1f6010a9
DF
5913 /*
5914 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5915 * EDID mgmt
5916 */
5917 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5918 !aconnector->dc_em_sink)
5919 handle_edid_mgmt(aconnector);
5920
c84dec2f 5921 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5922
ad975f44
VL
5923 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5924 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5925 DRM_ERROR("dc_sink is NULL!\n");
5926 goto fail;
5927 }
5928
cbd14ae7
SW
5929 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5930 if (stream) {
5931 dc_stream_release(stream);
e7b07cee 5932 result = MODE_OK;
cbd14ae7 5933 }
e7b07cee
HW
5934
5935fail:
5936 /* TODO: error handling*/
5937 return result;
5938}
5939
88694af9
NK
5940static int fill_hdr_info_packet(const struct drm_connector_state *state,
5941 struct dc_info_packet *out)
5942{
5943 struct hdmi_drm_infoframe frame;
5944 unsigned char buf[30]; /* 26 + 4 */
5945 ssize_t len;
5946 int ret, i;
5947
5948 memset(out, 0, sizeof(*out));
5949
5950 if (!state->hdr_output_metadata)
5951 return 0;
5952
5953 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5954 if (ret)
5955 return ret;
5956
5957 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5958 if (len < 0)
5959 return (int)len;
5960
5961 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5962 if (len != 30)
5963 return -EINVAL;
5964
5965 /* Prepare the infopacket for DC. */
5966 switch (state->connector->connector_type) {
5967 case DRM_MODE_CONNECTOR_HDMIA:
5968 out->hb0 = 0x87; /* type */
5969 out->hb1 = 0x01; /* version */
5970 out->hb2 = 0x1A; /* length */
5971 out->sb[0] = buf[3]; /* checksum */
5972 i = 1;
5973 break;
5974
5975 case DRM_MODE_CONNECTOR_DisplayPort:
5976 case DRM_MODE_CONNECTOR_eDP:
5977 out->hb0 = 0x00; /* sdp id, zero */
5978 out->hb1 = 0x87; /* type */
5979 out->hb2 = 0x1D; /* payload len - 1 */
5980 out->hb3 = (0x13 << 2); /* sdp version */
5981 out->sb[0] = 0x01; /* version */
5982 out->sb[1] = 0x1A; /* length */
5983 i = 2;
5984 break;
5985
5986 default:
5987 return -EINVAL;
5988 }
5989
5990 memcpy(&out->sb[i], &buf[4], 26);
5991 out->valid = true;
5992
5993 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5994 sizeof(out->sb), false);
5995
5996 return 0;
5997}
5998
5999static bool
6000is_hdr_metadata_different(const struct drm_connector_state *old_state,
6001 const struct drm_connector_state *new_state)
6002{
6003 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6004 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6005
6006 if (old_blob != new_blob) {
6007 if (old_blob && new_blob &&
6008 old_blob->length == new_blob->length)
6009 return memcmp(old_blob->data, new_blob->data,
6010 old_blob->length);
6011
6012 return true;
6013 }
6014
6015 return false;
6016}
6017
6018static int
6019amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6020 struct drm_atomic_state *state)
88694af9 6021{
51e857af
SP
6022 struct drm_connector_state *new_con_state =
6023 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6024 struct drm_connector_state *old_con_state =
6025 drm_atomic_get_old_connector_state(state, conn);
6026 struct drm_crtc *crtc = new_con_state->crtc;
6027 struct drm_crtc_state *new_crtc_state;
6028 int ret;
6029
e8a98235
RS
6030 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6031
88694af9
NK
6032 if (!crtc)
6033 return 0;
6034
6035 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6036 struct dc_info_packet hdr_infopacket;
6037
6038 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6039 if (ret)
6040 return ret;
6041
6042 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6043 if (IS_ERR(new_crtc_state))
6044 return PTR_ERR(new_crtc_state);
6045
6046 /*
6047 * DC considers the stream backends changed if the
6048 * static metadata changes. Forcing the modeset also
6049 * gives a simple way for userspace to switch from
b232d4ed
NK
6050 * 8bpc to 10bpc when setting the metadata to enter
6051 * or exit HDR.
6052 *
6053 * Changing the static metadata after it's been
6054 * set is permissible, however. So only force a
6055 * modeset if we're entering or exiting HDR.
88694af9 6056 */
b232d4ed
NK
6057 new_crtc_state->mode_changed =
6058 !old_con_state->hdr_output_metadata ||
6059 !new_con_state->hdr_output_metadata;
88694af9
NK
6060 }
6061
6062 return 0;
6063}
6064
e7b07cee
HW
6065static const struct drm_connector_helper_funcs
6066amdgpu_dm_connector_helper_funcs = {
6067 /*
1f6010a9 6068 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6069 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6070 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6071 * in get_modes call back, not just return the modes count
6072 */
e7b07cee
HW
6073 .get_modes = get_modes,
6074 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6075 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6076};
6077
6078static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6079{
6080}
6081
d6ef9b41 6082static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6083{
6084 struct drm_atomic_state *state = new_crtc_state->state;
6085 struct drm_plane *plane;
6086 int num_active = 0;
6087
6088 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6089 struct drm_plane_state *new_plane_state;
6090
6091 /* Cursor planes are "fake". */
6092 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6093 continue;
6094
6095 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6096
6097 if (!new_plane_state) {
6098 /*
6099 * The plane is enable on the CRTC and hasn't changed
6100 * state. This means that it previously passed
6101 * validation and is therefore enabled.
6102 */
6103 num_active += 1;
6104 continue;
6105 }
6106
6107 /* We need a framebuffer to be considered enabled. */
6108 num_active += (new_plane_state->fb != NULL);
6109 }
6110
d6ef9b41
NK
6111 return num_active;
6112}
6113
8fe684e9
NK
6114static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6115 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6116{
6117 struct dm_crtc_state *dm_new_crtc_state =
6118 to_dm_crtc_state(new_crtc_state);
6119
6120 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6121
6122 if (!dm_new_crtc_state->stream)
6123 return;
6124
6125 dm_new_crtc_state->active_planes =
6126 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6127}
6128
3ee6b26b 6129static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6130 struct drm_atomic_state *state)
e7b07cee 6131{
29b77ad7
MR
6132 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6133 crtc);
1348969a 6134 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6135 struct dc *dc = adev->dm.dc;
29b77ad7 6136 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6137 int ret = -EINVAL;
6138
5b8c5969 6139 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6140
29b77ad7 6141 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6142
9b690ef3 6143 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6144 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6145 WARN_ON(1);
6146 return ret;
6147 }
6148
bc92c065 6149 /*
b836a274
MD
6150 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6151 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6152 * planes are disabled, which is not supported by the hardware. And there is legacy
6153 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6154 */
29b77ad7 6155 if (crtc_state->enable &&
ea9522f5
SS
6156 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6157 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6158 return -EINVAL;
ea9522f5 6159 }
c14a005c 6160
b836a274
MD
6161 /* In some use cases, like reset, no stream is attached */
6162 if (!dm_crtc_state->stream)
6163 return 0;
6164
62c933f9 6165 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6166 return 0;
6167
ea9522f5 6168 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6169 return ret;
6170}
6171
3ee6b26b
AD
6172static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6173 const struct drm_display_mode *mode,
6174 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6175{
6176 return true;
6177}
6178
6179static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6180 .disable = dm_crtc_helper_disable,
6181 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6182 .mode_fixup = dm_crtc_helper_mode_fixup,
6183 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6184};
6185
6186static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6187{
6188
6189}
6190
3261e013
ML
6191static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6192{
6193 switch (display_color_depth) {
6194 case COLOR_DEPTH_666:
6195 return 6;
6196 case COLOR_DEPTH_888:
6197 return 8;
6198 case COLOR_DEPTH_101010:
6199 return 10;
6200 case COLOR_DEPTH_121212:
6201 return 12;
6202 case COLOR_DEPTH_141414:
6203 return 14;
6204 case COLOR_DEPTH_161616:
6205 return 16;
6206 default:
6207 break;
6208 }
6209 return 0;
6210}
6211
3ee6b26b
AD
6212static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6213 struct drm_crtc_state *crtc_state,
6214 struct drm_connector_state *conn_state)
e7b07cee 6215{
3261e013
ML
6216 struct drm_atomic_state *state = crtc_state->state;
6217 struct drm_connector *connector = conn_state->connector;
6218 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6219 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6220 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6221 struct drm_dp_mst_topology_mgr *mst_mgr;
6222 struct drm_dp_mst_port *mst_port;
6223 enum dc_color_depth color_depth;
6224 int clock, bpp = 0;
1bc22f20 6225 bool is_y420 = false;
3261e013
ML
6226
6227 if (!aconnector->port || !aconnector->dc_sink)
6228 return 0;
6229
6230 mst_port = aconnector->port;
6231 mst_mgr = &aconnector->mst_port->mst_mgr;
6232
6233 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6234 return 0;
6235
6236 if (!state->duplicated) {
cbd14ae7 6237 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6238 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6239 aconnector->force_yuv420_output;
cbd14ae7
SW
6240 color_depth = convert_color_depth_from_display_info(connector,
6241 is_y420,
6242 max_bpc);
3261e013
ML
6243 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6244 clock = adjusted_mode->clock;
dc48529f 6245 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6246 }
6247 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6248 mst_mgr,
6249 mst_port,
1c6c1cb5 6250 dm_new_connector_state->pbn,
03ca9600 6251 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6252 if (dm_new_connector_state->vcpi_slots < 0) {
6253 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6254 return dm_new_connector_state->vcpi_slots;
6255 }
e7b07cee
HW
6256 return 0;
6257}
6258
6259const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6260 .disable = dm_encoder_helper_disable,
6261 .atomic_check = dm_encoder_helper_atomic_check
6262};
6263
d9fe1a4c 6264#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6265static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6266 struct dc_state *dc_state)
6267{
6268 struct dc_stream_state *stream = NULL;
6269 struct drm_connector *connector;
6270 struct drm_connector_state *new_con_state, *old_con_state;
6271 struct amdgpu_dm_connector *aconnector;
6272 struct dm_connector_state *dm_conn_state;
6273 int i, j, clock, bpp;
6274 int vcpi, pbn_div, pbn = 0;
6275
6276 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6277
6278 aconnector = to_amdgpu_dm_connector(connector);
6279
6280 if (!aconnector->port)
6281 continue;
6282
6283 if (!new_con_state || !new_con_state->crtc)
6284 continue;
6285
6286 dm_conn_state = to_dm_connector_state(new_con_state);
6287
6288 for (j = 0; j < dc_state->stream_count; j++) {
6289 stream = dc_state->streams[j];
6290 if (!stream)
6291 continue;
6292
6293 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6294 break;
6295
6296 stream = NULL;
6297 }
6298
6299 if (!stream)
6300 continue;
6301
6302 if (stream->timing.flags.DSC != 1) {
6303 drm_dp_mst_atomic_enable_dsc(state,
6304 aconnector->port,
6305 dm_conn_state->pbn,
6306 0,
6307 false);
6308 continue;
6309 }
6310
6311 pbn_div = dm_mst_get_pbn_divider(stream->link);
6312 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6313 clock = stream->timing.pix_clk_100hz / 10;
6314 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6315 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6316 aconnector->port,
6317 pbn, pbn_div,
6318 true);
6319 if (vcpi < 0)
6320 return vcpi;
6321
6322 dm_conn_state->pbn = pbn;
6323 dm_conn_state->vcpi_slots = vcpi;
6324 }
6325 return 0;
6326}
d9fe1a4c 6327#endif
29b9ba74 6328
e7b07cee
HW
6329static void dm_drm_plane_reset(struct drm_plane *plane)
6330{
6331 struct dm_plane_state *amdgpu_state = NULL;
6332
6333 if (plane->state)
6334 plane->funcs->atomic_destroy_state(plane, plane->state);
6335
6336 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6337 WARN_ON(amdgpu_state == NULL);
1f6010a9 6338
7ddaef96
NK
6339 if (amdgpu_state)
6340 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6341}
6342
6343static struct drm_plane_state *
6344dm_drm_plane_duplicate_state(struct drm_plane *plane)
6345{
6346 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6347
6348 old_dm_plane_state = to_dm_plane_state(plane->state);
6349 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6350 if (!dm_plane_state)
6351 return NULL;
6352
6353 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6354
3be5262e
HW
6355 if (old_dm_plane_state->dc_state) {
6356 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6357 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6358 }
6359
6360 return &dm_plane_state->base;
6361}
6362
dfd84d90 6363static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6364 struct drm_plane_state *state)
e7b07cee
HW
6365{
6366 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6367
3be5262e
HW
6368 if (dm_plane_state->dc_state)
6369 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6370
0627bbd3 6371 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6372}
6373
6374static const struct drm_plane_funcs dm_plane_funcs = {
6375 .update_plane = drm_atomic_helper_update_plane,
6376 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6377 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6378 .reset = dm_drm_plane_reset,
6379 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6380 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6381 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6382};
6383
3ee6b26b
AD
6384static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6385 struct drm_plane_state *new_state)
e7b07cee
HW
6386{
6387 struct amdgpu_framebuffer *afb;
6388 struct drm_gem_object *obj;
5d43be0c 6389 struct amdgpu_device *adev;
e7b07cee 6390 struct amdgpu_bo *rbo;
e7b07cee 6391 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6392 struct list_head list;
6393 struct ttm_validate_buffer tv;
6394 struct ww_acquire_ctx ticket;
5d43be0c
CK
6395 uint32_t domain;
6396 int r;
e7b07cee
HW
6397
6398 if (!new_state->fb) {
f1ad2f5e 6399 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6400 return 0;
6401 }
6402
6403 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6404 obj = new_state->fb->obj[0];
e7b07cee 6405 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6406 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6407 INIT_LIST_HEAD(&list);
6408
6409 tv.bo = &rbo->tbo;
6410 tv.num_shared = 1;
6411 list_add(&tv.head, &list);
6412
9165fb87 6413 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6414 if (r) {
6415 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6416 return r;
0f257b09 6417 }
e7b07cee 6418
5d43be0c 6419 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6420 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6421 else
6422 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6423
7b7c6c81 6424 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6425 if (unlikely(r != 0)) {
30b7c614
HW
6426 if (r != -ERESTARTSYS)
6427 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6428 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6429 return r;
6430 }
6431
bb812f1e
JZ
6432 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6433 if (unlikely(r != 0)) {
6434 amdgpu_bo_unpin(rbo);
0f257b09 6435 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6436 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6437 return r;
6438 }
7df7e505 6439
0f257b09 6440 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6441
7b7c6c81 6442 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6443
6444 amdgpu_bo_ref(rbo);
6445
cf322b49
NK
6446 /**
6447 * We don't do surface updates on planes that have been newly created,
6448 * but we also don't have the afb->address during atomic check.
6449 *
6450 * Fill in buffer attributes depending on the address here, but only on
6451 * newly created planes since they're not being used by DC yet and this
6452 * won't modify global state.
6453 */
6454 dm_plane_state_old = to_dm_plane_state(plane->state);
6455 dm_plane_state_new = to_dm_plane_state(new_state);
6456
3be5262e 6457 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6458 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6459 struct dc_plane_state *plane_state =
6460 dm_plane_state_new->dc_state;
6461 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6462
320932bf 6463 fill_plane_buffer_attributes(
695af5f9 6464 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6465 afb->tiling_flags,
cf322b49
NK
6466 &plane_state->tiling_info, &plane_state->plane_size,
6467 &plane_state->dcc, &plane_state->address,
6eed95b0 6468 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6469 }
6470
e7b07cee
HW
6471 return 0;
6472}
6473
3ee6b26b
AD
6474static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6475 struct drm_plane_state *old_state)
e7b07cee
HW
6476{
6477 struct amdgpu_bo *rbo;
e7b07cee
HW
6478 int r;
6479
6480 if (!old_state->fb)
6481 return;
6482
e68d14dd 6483 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6484 r = amdgpu_bo_reserve(rbo, false);
6485 if (unlikely(r)) {
6486 DRM_ERROR("failed to reserve rbo before unpin\n");
6487 return;
b830ebc9
HW
6488 }
6489
6490 amdgpu_bo_unpin(rbo);
6491 amdgpu_bo_unreserve(rbo);
6492 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6493}
6494
8c44515b
AP
6495static int dm_plane_helper_check_state(struct drm_plane_state *state,
6496 struct drm_crtc_state *new_crtc_state)
6497{
6300b3bd
MK
6498 struct drm_framebuffer *fb = state->fb;
6499 int min_downscale, max_upscale;
6500 int min_scale = 0;
6501 int max_scale = INT_MAX;
6502
6503 /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6504 if (fb && state->crtc) {
6505 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6506 &min_downscale, &max_upscale);
6507 /*
6508 * Convert to drm convention: 16.16 fixed point, instead of dc's
6509 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6510 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6511 */
6512 min_scale = (1000 << 16) / max_upscale;
6513 max_scale = (1000 << 16) / min_downscale;
6514 }
8c44515b 6515
8c44515b 6516 return drm_atomic_helper_check_plane_state(
6300b3bd 6517 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
6518}
6519
7578ecda
AD
6520static int dm_plane_atomic_check(struct drm_plane *plane,
6521 struct drm_plane_state *state)
cbd19488 6522{
1348969a 6523 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6524 struct dc *dc = adev->dm.dc;
78171832 6525 struct dm_plane_state *dm_plane_state;
695af5f9 6526 struct dc_scaling_info scaling_info;
8c44515b 6527 struct drm_crtc_state *new_crtc_state;
695af5f9 6528 int ret;
78171832 6529
e8a98235
RS
6530 trace_amdgpu_dm_plane_atomic_check(state);
6531
78171832 6532 dm_plane_state = to_dm_plane_state(state);
cbd19488 6533
3be5262e 6534 if (!dm_plane_state->dc_state)
9a3329b1 6535 return 0;
cbd19488 6536
8c44515b
AP
6537 new_crtc_state =
6538 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6539 if (!new_crtc_state)
6540 return -EINVAL;
6541
6542 ret = dm_plane_helper_check_state(state, new_crtc_state);
6543 if (ret)
6544 return ret;
6545
695af5f9
NK
6546 ret = fill_dc_scaling_info(state, &scaling_info);
6547 if (ret)
6548 return ret;
a05bcff1 6549
62c933f9 6550 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6551 return 0;
6552
6553 return -EINVAL;
6554}
6555
674e78ac
NK
6556static int dm_plane_atomic_async_check(struct drm_plane *plane,
6557 struct drm_plane_state *new_plane_state)
6558{
6559 /* Only support async updates on cursor planes. */
6560 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6561 return -EINVAL;
6562
6563 return 0;
6564}
6565
6566static void dm_plane_atomic_async_update(struct drm_plane *plane,
6567 struct drm_plane_state *new_state)
6568{
6569 struct drm_plane_state *old_state =
6570 drm_atomic_get_old_plane_state(new_state->state, plane);
6571
e8a98235
RS
6572 trace_amdgpu_dm_atomic_update_cursor(new_state);
6573
332af874 6574 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6575
6576 plane->state->src_x = new_state->src_x;
6577 plane->state->src_y = new_state->src_y;
6578 plane->state->src_w = new_state->src_w;
6579 plane->state->src_h = new_state->src_h;
6580 plane->state->crtc_x = new_state->crtc_x;
6581 plane->state->crtc_y = new_state->crtc_y;
6582 plane->state->crtc_w = new_state->crtc_w;
6583 plane->state->crtc_h = new_state->crtc_h;
6584
6585 handle_cursor_update(plane, old_state);
6586}
6587
e7b07cee
HW
6588static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6589 .prepare_fb = dm_plane_helper_prepare_fb,
6590 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6591 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6592 .atomic_async_check = dm_plane_atomic_async_check,
6593 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6594};
6595
6596/*
6597 * TODO: these are currently initialized to rgb formats only.
6598 * For future use cases we should either initialize them dynamically based on
6599 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6600 * check will succeed, and let DC implement proper check
e7b07cee 6601 */
d90371b0 6602static const uint32_t rgb_formats[] = {
e7b07cee
HW
6603 DRM_FORMAT_XRGB8888,
6604 DRM_FORMAT_ARGB8888,
6605 DRM_FORMAT_RGBA8888,
6606 DRM_FORMAT_XRGB2101010,
6607 DRM_FORMAT_XBGR2101010,
6608 DRM_FORMAT_ARGB2101010,
6609 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6610 DRM_FORMAT_XBGR8888,
6611 DRM_FORMAT_ABGR8888,
46dd9ff7 6612 DRM_FORMAT_RGB565,
e7b07cee
HW
6613};
6614
0d579c7e
NK
6615static const uint32_t overlay_formats[] = {
6616 DRM_FORMAT_XRGB8888,
6617 DRM_FORMAT_ARGB8888,
6618 DRM_FORMAT_RGBA8888,
6619 DRM_FORMAT_XBGR8888,
6620 DRM_FORMAT_ABGR8888,
7267a1a9 6621 DRM_FORMAT_RGB565
e7b07cee
HW
6622};
6623
6624static const u32 cursor_formats[] = {
6625 DRM_FORMAT_ARGB8888
6626};
6627
37c6a93b
NK
6628static int get_plane_formats(const struct drm_plane *plane,
6629 const struct dc_plane_cap *plane_cap,
6630 uint32_t *formats, int max_formats)
e7b07cee 6631{
37c6a93b
NK
6632 int i, num_formats = 0;
6633
6634 /*
6635 * TODO: Query support for each group of formats directly from
6636 * DC plane caps. This will require adding more formats to the
6637 * caps list.
6638 */
e7b07cee 6639
f180b4bc 6640 switch (plane->type) {
e7b07cee 6641 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6642 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6643 if (num_formats >= max_formats)
6644 break;
6645
6646 formats[num_formats++] = rgb_formats[i];
6647 }
6648
ea36ad34 6649 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6650 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6651 if (plane_cap && plane_cap->pixel_format_support.p010)
6652 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6653 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6654 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6655 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6656 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6657 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6658 }
e7b07cee 6659 break;
37c6a93b 6660
e7b07cee 6661 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6662 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6663 if (num_formats >= max_formats)
6664 break;
6665
6666 formats[num_formats++] = overlay_formats[i];
6667 }
e7b07cee 6668 break;
37c6a93b 6669
e7b07cee 6670 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6671 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6672 if (num_formats >= max_formats)
6673 break;
6674
6675 formats[num_formats++] = cursor_formats[i];
6676 }
e7b07cee
HW
6677 break;
6678 }
6679
37c6a93b
NK
6680 return num_formats;
6681}
6682
6683static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6684 struct drm_plane *plane,
6685 unsigned long possible_crtcs,
6686 const struct dc_plane_cap *plane_cap)
6687{
6688 uint32_t formats[32];
6689 int num_formats;
6690 int res = -EPERM;
ecc874a6 6691 unsigned int supported_rotations;
faa37f54 6692 uint64_t *modifiers = NULL;
37c6a93b
NK
6693
6694 num_formats = get_plane_formats(plane, plane_cap, formats,
6695 ARRAY_SIZE(formats));
6696
faa37f54
BN
6697 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6698 if (res)
6699 return res;
6700
4a580877 6701 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6702 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6703 modifiers, plane->type, NULL);
6704 kfree(modifiers);
37c6a93b
NK
6705 if (res)
6706 return res;
6707
cc1fec57
NK
6708 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6709 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6710 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6711 BIT(DRM_MODE_BLEND_PREMULTI);
6712
6713 drm_plane_create_alpha_property(plane);
6714 drm_plane_create_blend_mode_property(plane, blend_caps);
6715 }
6716
fc8e5230 6717 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6718 plane_cap &&
6719 (plane_cap->pixel_format_support.nv12 ||
6720 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6721 /* This only affects YUV formats. */
6722 drm_plane_create_color_properties(
6723 plane,
6724 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6725 BIT(DRM_COLOR_YCBCR_BT709) |
6726 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6727 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6728 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6729 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6730 }
6731
ecc874a6
PLG
6732 supported_rotations =
6733 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6734 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6735
1347385f
SS
6736 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6737 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
6738 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6739 supported_rotations);
ecc874a6 6740
f180b4bc 6741 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6742
96719c54 6743 /* Create (reset) the plane state */
f180b4bc
HW
6744 if (plane->funcs->reset)
6745 plane->funcs->reset(plane);
96719c54 6746
37c6a93b 6747 return 0;
e7b07cee
HW
6748}
6749
c920888c
WL
6750#ifdef CONFIG_DEBUG_FS
6751static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6752 struct amdgpu_crtc *acrtc)
6753{
6754 drm_object_attach_property(&acrtc->base.base,
6755 dm->crc_win_x_start_property,
6756 0);
6757 drm_object_attach_property(&acrtc->base.base,
6758 dm->crc_win_y_start_property,
6759 0);
6760 drm_object_attach_property(&acrtc->base.base,
6761 dm->crc_win_x_end_property,
6762 0);
6763 drm_object_attach_property(&acrtc->base.base,
6764 dm->crc_win_y_end_property,
6765 0);
6766}
6767#endif
6768
7578ecda
AD
6769static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6770 struct drm_plane *plane,
6771 uint32_t crtc_index)
e7b07cee
HW
6772{
6773 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6774 struct drm_plane *cursor_plane;
e7b07cee
HW
6775
6776 int res = -ENOMEM;
6777
6778 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6779 if (!cursor_plane)
6780 goto fail;
6781
f180b4bc 6782 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6783 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6784
6785 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6786 if (!acrtc)
6787 goto fail;
6788
6789 res = drm_crtc_init_with_planes(
6790 dm->ddev,
6791 &acrtc->base,
6792 plane,
f180b4bc 6793 cursor_plane,
e7b07cee
HW
6794 &amdgpu_dm_crtc_funcs, NULL);
6795
6796 if (res)
6797 goto fail;
6798
6799 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6800
96719c54
HW
6801 /* Create (reset) the plane state */
6802 if (acrtc->base.funcs->reset)
6803 acrtc->base.funcs->reset(&acrtc->base);
6804
e7b07cee
HW
6805 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6806 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6807
6808 acrtc->crtc_id = crtc_index;
6809 acrtc->base.enabled = false;
c37e2d29 6810 acrtc->otg_inst = -1;
e7b07cee
HW
6811
6812 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6813 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6814 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6815 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
c920888c
WL
6816#ifdef CONFIG_DEBUG_FS
6817 attach_crtc_crc_properties(dm, acrtc);
6818#endif
e7b07cee
HW
6819 return 0;
6820
6821fail:
b830ebc9
HW
6822 kfree(acrtc);
6823 kfree(cursor_plane);
e7b07cee
HW
6824 return res;
6825}
6826
6827
6828static int to_drm_connector_type(enum signal_type st)
6829{
6830 switch (st) {
6831 case SIGNAL_TYPE_HDMI_TYPE_A:
6832 return DRM_MODE_CONNECTOR_HDMIA;
6833 case SIGNAL_TYPE_EDP:
6834 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6835 case SIGNAL_TYPE_LVDS:
6836 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6837 case SIGNAL_TYPE_RGB:
6838 return DRM_MODE_CONNECTOR_VGA;
6839 case SIGNAL_TYPE_DISPLAY_PORT:
6840 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6841 return DRM_MODE_CONNECTOR_DisplayPort;
6842 case SIGNAL_TYPE_DVI_DUAL_LINK:
6843 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6844 return DRM_MODE_CONNECTOR_DVID;
6845 case SIGNAL_TYPE_VIRTUAL:
6846 return DRM_MODE_CONNECTOR_VIRTUAL;
6847
6848 default:
6849 return DRM_MODE_CONNECTOR_Unknown;
6850 }
6851}
6852
2b4c1c05
DV
6853static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6854{
62afb4ad
JRS
6855 struct drm_encoder *encoder;
6856
6857 /* There is only one encoder per connector */
6858 drm_connector_for_each_possible_encoder(connector, encoder)
6859 return encoder;
6860
6861 return NULL;
2b4c1c05
DV
6862}
6863
e7b07cee
HW
6864static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6865{
e7b07cee
HW
6866 struct drm_encoder *encoder;
6867 struct amdgpu_encoder *amdgpu_encoder;
6868
2b4c1c05 6869 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6870
6871 if (encoder == NULL)
6872 return;
6873
6874 amdgpu_encoder = to_amdgpu_encoder(encoder);
6875
6876 amdgpu_encoder->native_mode.clock = 0;
6877
6878 if (!list_empty(&connector->probed_modes)) {
6879 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6880
e7b07cee 6881 list_for_each_entry(preferred_mode,
b830ebc9
HW
6882 &connector->probed_modes,
6883 head) {
6884 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6885 amdgpu_encoder->native_mode = *preferred_mode;
6886
e7b07cee
HW
6887 break;
6888 }
6889
6890 }
6891}
6892
3ee6b26b
AD
6893static struct drm_display_mode *
6894amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6895 char *name,
6896 int hdisplay, int vdisplay)
e7b07cee
HW
6897{
6898 struct drm_device *dev = encoder->dev;
6899 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6900 struct drm_display_mode *mode = NULL;
6901 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6902
6903 mode = drm_mode_duplicate(dev, native_mode);
6904
b830ebc9 6905 if (mode == NULL)
e7b07cee
HW
6906 return NULL;
6907
6908 mode->hdisplay = hdisplay;
6909 mode->vdisplay = vdisplay;
6910 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6911 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6912
6913 return mode;
6914
6915}
6916
6917static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6918 struct drm_connector *connector)
e7b07cee
HW
6919{
6920 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6921 struct drm_display_mode *mode = NULL;
6922 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6923 struct amdgpu_dm_connector *amdgpu_dm_connector =
6924 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6925 int i;
6926 int n;
6927 struct mode_size {
6928 char name[DRM_DISPLAY_MODE_LEN];
6929 int w;
6930 int h;
b830ebc9 6931 } common_modes[] = {
e7b07cee
HW
6932 { "640x480", 640, 480},
6933 { "800x600", 800, 600},
6934 { "1024x768", 1024, 768},
6935 { "1280x720", 1280, 720},
6936 { "1280x800", 1280, 800},
6937 {"1280x1024", 1280, 1024},
6938 { "1440x900", 1440, 900},
6939 {"1680x1050", 1680, 1050},
6940 {"1600x1200", 1600, 1200},
6941 {"1920x1080", 1920, 1080},
6942 {"1920x1200", 1920, 1200}
6943 };
6944
b830ebc9 6945 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6946
6947 for (i = 0; i < n; i++) {
6948 struct drm_display_mode *curmode = NULL;
6949 bool mode_existed = false;
6950
6951 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6952 common_modes[i].h > native_mode->vdisplay ||
6953 (common_modes[i].w == native_mode->hdisplay &&
6954 common_modes[i].h == native_mode->vdisplay))
6955 continue;
e7b07cee
HW
6956
6957 list_for_each_entry(curmode, &connector->probed_modes, head) {
6958 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6959 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6960 mode_existed = true;
6961 break;
6962 }
6963 }
6964
6965 if (mode_existed)
6966 continue;
6967
6968 mode = amdgpu_dm_create_common_mode(encoder,
6969 common_modes[i].name, common_modes[i].w,
6970 common_modes[i].h);
6971 drm_mode_probed_add(connector, mode);
c84dec2f 6972 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6973 }
6974}
6975
3ee6b26b
AD
6976static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6977 struct edid *edid)
e7b07cee 6978{
c84dec2f
HW
6979 struct amdgpu_dm_connector *amdgpu_dm_connector =
6980 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6981
6982 if (edid) {
6983 /* empty probed_modes */
6984 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6985 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6986 drm_add_edid_modes(connector, edid);
6987
f1e5e913
YMM
6988 /* sorting the probed modes before calling function
6989 * amdgpu_dm_get_native_mode() since EDID can have
6990 * more than one preferred mode. The modes that are
6991 * later in the probed mode list could be of higher
6992 * and preferred resolution. For example, 3840x2160
6993 * resolution in base EDID preferred timing and 4096x2160
6994 * preferred resolution in DID extension block later.
6995 */
6996 drm_mode_sort(&connector->probed_modes);
e7b07cee 6997 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6998 } else {
c84dec2f 6999 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7000 }
e7b07cee
HW
7001}
7002
7578ecda 7003static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7004{
c84dec2f
HW
7005 struct amdgpu_dm_connector *amdgpu_dm_connector =
7006 to_amdgpu_dm_connector(connector);
e7b07cee 7007 struct drm_encoder *encoder;
c84dec2f 7008 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7009
2b4c1c05 7010 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7011
5c0e6840 7012 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7013 amdgpu_dm_connector->num_modes =
7014 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7015 } else {
7016 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7017 amdgpu_dm_connector_add_common_modes(encoder, connector);
7018 }
3e332d3a 7019 amdgpu_dm_fbc_init(connector);
5099114b 7020
c84dec2f 7021 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7022}
7023
3ee6b26b
AD
7024void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7025 struct amdgpu_dm_connector *aconnector,
7026 int connector_type,
7027 struct dc_link *link,
7028 int link_index)
e7b07cee 7029{
1348969a 7030 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7031
f04bee34
NK
7032 /*
7033 * Some of the properties below require access to state, like bpc.
7034 * Allocate some default initial connector state with our reset helper.
7035 */
7036 if (aconnector->base.funcs->reset)
7037 aconnector->base.funcs->reset(&aconnector->base);
7038
e7b07cee
HW
7039 aconnector->connector_id = link_index;
7040 aconnector->dc_link = link;
7041 aconnector->base.interlace_allowed = false;
7042 aconnector->base.doublescan_allowed = false;
7043 aconnector->base.stereo_allowed = false;
7044 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7045 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7046 aconnector->audio_inst = -1;
e7b07cee
HW
7047 mutex_init(&aconnector->hpd_lock);
7048
1f6010a9
DF
7049 /*
7050 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7051 * which means HPD hot plug not supported
7052 */
e7b07cee
HW
7053 switch (connector_type) {
7054 case DRM_MODE_CONNECTOR_HDMIA:
7055 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7056 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7057 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7058 break;
7059 case DRM_MODE_CONNECTOR_DisplayPort:
7060 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7061 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7062 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7063 break;
7064 case DRM_MODE_CONNECTOR_DVID:
7065 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7066 break;
7067 default:
7068 break;
7069 }
7070
7071 drm_object_attach_property(&aconnector->base.base,
7072 dm->ddev->mode_config.scaling_mode_property,
7073 DRM_MODE_SCALE_NONE);
7074
7075 drm_object_attach_property(&aconnector->base.base,
7076 adev->mode_info.underscan_property,
7077 UNDERSCAN_OFF);
7078 drm_object_attach_property(&aconnector->base.base,
7079 adev->mode_info.underscan_hborder_property,
7080 0);
7081 drm_object_attach_property(&aconnector->base.base,
7082 adev->mode_info.underscan_vborder_property,
7083 0);
1825fd34 7084
8c61b31e
JFZ
7085 if (!aconnector->mst_port)
7086 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7087
4a8ca46b
RL
7088 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7089 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7090 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7091
c1ee92f9 7092 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7093 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7094 drm_object_attach_property(&aconnector->base.base,
7095 adev->mode_info.abm_level_property, 0);
7096 }
bb47de73
NK
7097
7098 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7099 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7100 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
7101 drm_object_attach_property(
7102 &aconnector->base.base,
7103 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7104
8c61b31e
JFZ
7105 if (!aconnector->mst_port)
7106 drm_connector_attach_vrr_capable_property(&aconnector->base);
7107
0c8620d6 7108#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7109 if (adev->dm.hdcp_workqueue)
53e108aa 7110 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7111#endif
bb47de73 7112 }
e7b07cee
HW
7113}
7114
7578ecda
AD
7115static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7116 struct i2c_msg *msgs, int num)
e7b07cee
HW
7117{
7118 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7119 struct ddc_service *ddc_service = i2c->ddc_service;
7120 struct i2c_command cmd;
7121 int i;
7122 int result = -EIO;
7123
b830ebc9 7124 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7125
7126 if (!cmd.payloads)
7127 return result;
7128
7129 cmd.number_of_payloads = num;
7130 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7131 cmd.speed = 100;
7132
7133 for (i = 0; i < num; i++) {
7134 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7135 cmd.payloads[i].address = msgs[i].addr;
7136 cmd.payloads[i].length = msgs[i].len;
7137 cmd.payloads[i].data = msgs[i].buf;
7138 }
7139
c85e6e54
DF
7140 if (dc_submit_i2c(
7141 ddc_service->ctx->dc,
7142 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7143 &cmd))
7144 result = num;
7145
7146 kfree(cmd.payloads);
7147 return result;
7148}
7149
7578ecda 7150static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7151{
7152 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7153}
7154
7155static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7156 .master_xfer = amdgpu_dm_i2c_xfer,
7157 .functionality = amdgpu_dm_i2c_func,
7158};
7159
3ee6b26b
AD
7160static struct amdgpu_i2c_adapter *
7161create_i2c(struct ddc_service *ddc_service,
7162 int link_index,
7163 int *res)
e7b07cee
HW
7164{
7165 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7166 struct amdgpu_i2c_adapter *i2c;
7167
b830ebc9 7168 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7169 if (!i2c)
7170 return NULL;
e7b07cee
HW
7171 i2c->base.owner = THIS_MODULE;
7172 i2c->base.class = I2C_CLASS_DDC;
7173 i2c->base.dev.parent = &adev->pdev->dev;
7174 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7175 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7176 i2c_set_adapdata(&i2c->base, i2c);
7177 i2c->ddc_service = ddc_service;
c85e6e54 7178 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7179
7180 return i2c;
7181}
7182
89fc8d4e 7183
1f6010a9
DF
7184/*
7185 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7186 * dc_link which will be represented by this aconnector.
7187 */
7578ecda
AD
7188static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7189 struct amdgpu_dm_connector *aconnector,
7190 uint32_t link_index,
7191 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7192{
7193 int res = 0;
7194 int connector_type;
7195 struct dc *dc = dm->dc;
7196 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7197 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7198
7199 link->priv = aconnector;
e7b07cee 7200
f1ad2f5e 7201 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7202
7203 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7204 if (!i2c) {
7205 DRM_ERROR("Failed to create i2c adapter data\n");
7206 return -ENOMEM;
7207 }
7208
e7b07cee
HW
7209 aconnector->i2c = i2c;
7210 res = i2c_add_adapter(&i2c->base);
7211
7212 if (res) {
7213 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7214 goto out_free;
7215 }
7216
7217 connector_type = to_drm_connector_type(link->connector_signal);
7218
17165de2 7219 res = drm_connector_init_with_ddc(
e7b07cee
HW
7220 dm->ddev,
7221 &aconnector->base,
7222 &amdgpu_dm_connector_funcs,
17165de2
AP
7223 connector_type,
7224 &i2c->base);
e7b07cee
HW
7225
7226 if (res) {
7227 DRM_ERROR("connector_init failed\n");
7228 aconnector->connector_id = -1;
7229 goto out_free;
7230 }
7231
7232 drm_connector_helper_add(
7233 &aconnector->base,
7234 &amdgpu_dm_connector_helper_funcs);
7235
7236 amdgpu_dm_connector_init_helper(
7237 dm,
7238 aconnector,
7239 connector_type,
7240 link,
7241 link_index);
7242
cde4c44d 7243 drm_connector_attach_encoder(
e7b07cee
HW
7244 &aconnector->base, &aencoder->base);
7245
e7b07cee
HW
7246 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7247 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7248 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7249
e7b07cee
HW
7250out_free:
7251 if (res) {
7252 kfree(i2c);
7253 aconnector->i2c = NULL;
7254 }
7255 return res;
7256}
7257
7258int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7259{
7260 switch (adev->mode_info.num_crtc) {
7261 case 1:
7262 return 0x1;
7263 case 2:
7264 return 0x3;
7265 case 3:
7266 return 0x7;
7267 case 4:
7268 return 0xf;
7269 case 5:
7270 return 0x1f;
7271 case 6:
7272 default:
7273 return 0x3f;
7274 }
7275}
7276
7578ecda
AD
7277static int amdgpu_dm_encoder_init(struct drm_device *dev,
7278 struct amdgpu_encoder *aencoder,
7279 uint32_t link_index)
e7b07cee 7280{
1348969a 7281 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7282
7283 int res = drm_encoder_init(dev,
7284 &aencoder->base,
7285 &amdgpu_dm_encoder_funcs,
7286 DRM_MODE_ENCODER_TMDS,
7287 NULL);
7288
7289 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7290
7291 if (!res)
7292 aencoder->encoder_id = link_index;
7293 else
7294 aencoder->encoder_id = -1;
7295
7296 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7297
7298 return res;
7299}
7300
3ee6b26b
AD
7301static void manage_dm_interrupts(struct amdgpu_device *adev,
7302 struct amdgpu_crtc *acrtc,
7303 bool enable)
e7b07cee
HW
7304{
7305 /*
8fe684e9
NK
7306 * We have no guarantee that the frontend index maps to the same
7307 * backend index - some even map to more than one.
7308 *
7309 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7310 */
7311 int irq_type =
734dd01d 7312 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7313 adev,
7314 acrtc->crtc_id);
7315
7316 if (enable) {
7317 drm_crtc_vblank_on(&acrtc->base);
7318 amdgpu_irq_get(
7319 adev,
7320 &adev->pageflip_irq,
7321 irq_type);
7322 } else {
7323
7324 amdgpu_irq_put(
7325 adev,
7326 &adev->pageflip_irq,
7327 irq_type);
7328 drm_crtc_vblank_off(&acrtc->base);
7329 }
7330}
7331
8fe684e9
NK
7332static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7333 struct amdgpu_crtc *acrtc)
7334{
7335 int irq_type =
7336 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7337
7338 /**
7339 * This reads the current state for the IRQ and force reapplies
7340 * the setting to hardware.
7341 */
7342 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7343}
7344
3ee6b26b
AD
7345static bool
7346is_scaling_state_different(const struct dm_connector_state *dm_state,
7347 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7348{
7349 if (dm_state->scaling != old_dm_state->scaling)
7350 return true;
7351 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7352 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7353 return true;
7354 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7355 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7356 return true;
b830ebc9
HW
7357 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7358 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7359 return true;
e7b07cee
HW
7360 return false;
7361}
7362
0c8620d6
BL
7363#ifdef CONFIG_DRM_AMD_DC_HDCP
7364static bool is_content_protection_different(struct drm_connector_state *state,
7365 const struct drm_connector_state *old_state,
7366 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7367{
7368 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7369 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7370
31c0ed90 7371 /* Handle: Type0/1 change */
53e108aa
BL
7372 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7373 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7374 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7375 return true;
7376 }
7377
31c0ed90
BL
7378 /* CP is being re enabled, ignore this
7379 *
7380 * Handles: ENABLED -> DESIRED
7381 */
0c8620d6
BL
7382 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7383 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7384 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7385 return false;
7386 }
7387
31c0ed90
BL
7388 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7389 *
7390 * Handles: UNDESIRED -> ENABLED
7391 */
0c8620d6
BL
7392 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7393 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7394 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7395
7396 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7397 * hot-plug, headless s3, dpms
31c0ed90
BL
7398 *
7399 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7400 */
97f6c917
BL
7401 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7402 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7403 dm_con_state->update_hdcp = false;
0c8620d6 7404 return true;
97f6c917 7405 }
0c8620d6 7406
31c0ed90
BL
7407 /*
7408 * Handles: UNDESIRED -> UNDESIRED
7409 * DESIRED -> DESIRED
7410 * ENABLED -> ENABLED
7411 */
0c8620d6
BL
7412 if (old_state->content_protection == state->content_protection)
7413 return false;
7414
31c0ed90
BL
7415 /*
7416 * Handles: UNDESIRED -> DESIRED
7417 * DESIRED -> UNDESIRED
7418 * ENABLED -> UNDESIRED
7419 */
97f6c917 7420 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7421 return true;
7422
31c0ed90
BL
7423 /*
7424 * Handles: DESIRED -> ENABLED
7425 */
0c8620d6
BL
7426 return false;
7427}
7428
0c8620d6 7429#endif
3ee6b26b
AD
7430static void remove_stream(struct amdgpu_device *adev,
7431 struct amdgpu_crtc *acrtc,
7432 struct dc_stream_state *stream)
e7b07cee
HW
7433{
7434 /* this is the update mode case */
e7b07cee
HW
7435
7436 acrtc->otg_inst = -1;
7437 acrtc->enabled = false;
7438}
7439
7578ecda
AD
7440static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7441 struct dc_cursor_position *position)
2a8f6ccb 7442{
f4c2cc43 7443 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7444 int x, y;
7445 int xorigin = 0, yorigin = 0;
7446
e371e19c
NK
7447 position->enable = false;
7448 position->x = 0;
7449 position->y = 0;
7450
7451 if (!crtc || !plane->state->fb)
2a8f6ccb 7452 return 0;
2a8f6ccb
HW
7453
7454 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7455 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7456 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7457 __func__,
7458 plane->state->crtc_w,
7459 plane->state->crtc_h);
7460 return -EINVAL;
7461 }
7462
7463 x = plane->state->crtc_x;
7464 y = plane->state->crtc_y;
c14a005c 7465
e371e19c
NK
7466 if (x <= -amdgpu_crtc->max_cursor_width ||
7467 y <= -amdgpu_crtc->max_cursor_height)
7468 return 0;
7469
2a8f6ccb
HW
7470 if (x < 0) {
7471 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7472 x = 0;
7473 }
7474 if (y < 0) {
7475 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7476 y = 0;
7477 }
7478 position->enable = true;
d243b6ff 7479 position->translate_by_source = true;
2a8f6ccb
HW
7480 position->x = x;
7481 position->y = y;
7482 position->x_hotspot = xorigin;
7483 position->y_hotspot = yorigin;
7484
7485 return 0;
7486}
7487
3ee6b26b
AD
7488static void handle_cursor_update(struct drm_plane *plane,
7489 struct drm_plane_state *old_plane_state)
e7b07cee 7490{
1348969a 7491 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7492 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7493 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7494 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7495 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7496 uint64_t address = afb ? afb->address : 0;
7497 struct dc_cursor_position position;
7498 struct dc_cursor_attributes attributes;
7499 int ret;
7500
e7b07cee
HW
7501 if (!plane->state->fb && !old_plane_state->fb)
7502 return;
7503
f1ad2f5e 7504 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7505 __func__,
7506 amdgpu_crtc->crtc_id,
7507 plane->state->crtc_w,
7508 plane->state->crtc_h);
2a8f6ccb
HW
7509
7510 ret = get_cursor_position(plane, crtc, &position);
7511 if (ret)
7512 return;
7513
7514 if (!position.enable) {
7515 /* turn off cursor */
674e78ac
NK
7516 if (crtc_state && crtc_state->stream) {
7517 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7518 dc_stream_set_cursor_position(crtc_state->stream,
7519 &position);
674e78ac
NK
7520 mutex_unlock(&adev->dm.dc_lock);
7521 }
2a8f6ccb 7522 return;
e7b07cee 7523 }
e7b07cee 7524
2a8f6ccb
HW
7525 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7526 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7527
c1cefe11 7528 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7529 attributes.address.high_part = upper_32_bits(address);
7530 attributes.address.low_part = lower_32_bits(address);
7531 attributes.width = plane->state->crtc_w;
7532 attributes.height = plane->state->crtc_h;
7533 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7534 attributes.rotation_angle = 0;
7535 attributes.attribute_flags.value = 0;
7536
03a66367 7537 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 7538
886daac9 7539 if (crtc_state->stream) {
674e78ac 7540 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7541 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7542 &attributes))
7543 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7544
2a8f6ccb
HW
7545 if (!dc_stream_set_cursor_position(crtc_state->stream,
7546 &position))
7547 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7548 mutex_unlock(&adev->dm.dc_lock);
886daac9 7549 }
2a8f6ccb 7550}
e7b07cee
HW
7551
7552static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7553{
7554
7555 assert_spin_locked(&acrtc->base.dev->event_lock);
7556 WARN_ON(acrtc->event);
7557
7558 acrtc->event = acrtc->base.state->event;
7559
7560 /* Set the flip status */
7561 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7562
7563 /* Mark this event as consumed */
7564 acrtc->base.state->event = NULL;
7565
7566 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7567 acrtc->crtc_id);
7568}
7569
bb47de73
NK
7570static void update_freesync_state_on_stream(
7571 struct amdgpu_display_manager *dm,
7572 struct dm_crtc_state *new_crtc_state,
180db303
NK
7573 struct dc_stream_state *new_stream,
7574 struct dc_plane_state *surface,
7575 u32 flip_timestamp_in_us)
bb47de73 7576{
09aef2c4 7577 struct mod_vrr_params vrr_params;
bb47de73 7578 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7579 struct amdgpu_device *adev = dm->adev;
585d450c 7580 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7581 unsigned long flags;
bb47de73
NK
7582
7583 if (!new_stream)
7584 return;
7585
7586 /*
7587 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7588 * For now it's sufficient to just guard against these conditions.
7589 */
7590
7591 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7592 return;
7593
4a580877 7594 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7595 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7596
180db303
NK
7597 if (surface) {
7598 mod_freesync_handle_preflip(
7599 dm->freesync_module,
7600 surface,
7601 new_stream,
7602 flip_timestamp_in_us,
7603 &vrr_params);
09aef2c4
MK
7604
7605 if (adev->family < AMDGPU_FAMILY_AI &&
7606 amdgpu_dm_vrr_active(new_crtc_state)) {
7607 mod_freesync_handle_v_update(dm->freesync_module,
7608 new_stream, &vrr_params);
e63e2491
EB
7609
7610 /* Need to call this before the frame ends. */
7611 dc_stream_adjust_vmin_vmax(dm->dc,
7612 new_crtc_state->stream,
7613 &vrr_params.adjust);
09aef2c4 7614 }
180db303 7615 }
bb47de73
NK
7616
7617 mod_freesync_build_vrr_infopacket(
7618 dm->freesync_module,
7619 new_stream,
180db303 7620 &vrr_params,
ecd0136b
HT
7621 PACKET_TYPE_VRR,
7622 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7623 &vrr_infopacket);
7624
8a48b44c 7625 new_crtc_state->freesync_timing_changed |=
585d450c 7626 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7627 &vrr_params.adjust,
7628 sizeof(vrr_params.adjust)) != 0);
bb47de73 7629
8a48b44c 7630 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7631 (memcmp(&new_crtc_state->vrr_infopacket,
7632 &vrr_infopacket,
7633 sizeof(vrr_infopacket)) != 0);
7634
585d450c 7635 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7636 new_crtc_state->vrr_infopacket = vrr_infopacket;
7637
585d450c 7638 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7639 new_stream->vrr_infopacket = vrr_infopacket;
7640
7641 if (new_crtc_state->freesync_vrr_info_changed)
7642 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7643 new_crtc_state->base.crtc->base.id,
7644 (int)new_crtc_state->base.vrr_enabled,
180db303 7645 (int)vrr_params.state);
09aef2c4 7646
4a580877 7647 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7648}
7649
585d450c 7650static void update_stream_irq_parameters(
e854194c
MK
7651 struct amdgpu_display_manager *dm,
7652 struct dm_crtc_state *new_crtc_state)
7653{
7654 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7655 struct mod_vrr_params vrr_params;
e854194c 7656 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7657 struct amdgpu_device *adev = dm->adev;
585d450c 7658 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7659 unsigned long flags;
e854194c
MK
7660
7661 if (!new_stream)
7662 return;
7663
7664 /*
7665 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7666 * For now it's sufficient to just guard against these conditions.
7667 */
7668 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7669 return;
7670
4a580877 7671 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7672 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7673
e854194c
MK
7674 if (new_crtc_state->vrr_supported &&
7675 config.min_refresh_in_uhz &&
7676 config.max_refresh_in_uhz) {
7677 config.state = new_crtc_state->base.vrr_enabled ?
7678 VRR_STATE_ACTIVE_VARIABLE :
7679 VRR_STATE_INACTIVE;
7680 } else {
7681 config.state = VRR_STATE_UNSUPPORTED;
7682 }
7683
7684 mod_freesync_build_vrr_params(dm->freesync_module,
7685 new_stream,
7686 &config, &vrr_params);
7687
7688 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7689 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7690 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7691
585d450c
AP
7692 new_crtc_state->freesync_config = config;
7693 /* Copy state for access from DM IRQ handler */
7694 acrtc->dm_irq_params.freesync_config = config;
7695 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7696 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7697 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7698}
7699
66b0c973
MK
7700static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7701 struct dm_crtc_state *new_state)
7702{
7703 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7704 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7705
7706 if (!old_vrr_active && new_vrr_active) {
7707 /* Transition VRR inactive -> active:
7708 * While VRR is active, we must not disable vblank irq, as a
7709 * reenable after disable would compute bogus vblank/pflip
7710 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7711 *
7712 * We also need vupdate irq for the actual core vblank handling
7713 * at end of vblank.
66b0c973 7714 */
d2574c33 7715 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7716 drm_crtc_vblank_get(new_state->base.crtc);
7717 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7718 __func__, new_state->base.crtc->base.id);
7719 } else if (old_vrr_active && !new_vrr_active) {
7720 /* Transition VRR active -> inactive:
7721 * Allow vblank irq disable again for fixed refresh rate.
7722 */
d2574c33 7723 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7724 drm_crtc_vblank_put(new_state->base.crtc);
7725 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7726 __func__, new_state->base.crtc->base.id);
7727 }
7728}
7729
8ad27806
NK
7730static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7731{
7732 struct drm_plane *plane;
7733 struct drm_plane_state *old_plane_state, *new_plane_state;
7734 int i;
7735
7736 /*
7737 * TODO: Make this per-stream so we don't issue redundant updates for
7738 * commits with multiple streams.
7739 */
7740 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7741 new_plane_state, i)
7742 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7743 handle_cursor_update(plane, old_plane_state);
7744}
7745
3be5262e 7746static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7747 struct dc_state *dc_state,
3ee6b26b
AD
7748 struct drm_device *dev,
7749 struct amdgpu_display_manager *dm,
7750 struct drm_crtc *pcrtc,
420cd472 7751 bool wait_for_vblank)
e7b07cee 7752{
570c91d5 7753 uint32_t i;
8a48b44c 7754 uint64_t timestamp_ns;
e7b07cee 7755 struct drm_plane *plane;
0bc9706d 7756 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7757 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7758 struct drm_crtc_state *new_pcrtc_state =
7759 drm_atomic_get_new_crtc_state(state, pcrtc);
7760 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7761 struct dm_crtc_state *dm_old_crtc_state =
7762 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7763 int planes_count = 0, vpos, hpos;
570c91d5 7764 long r;
e7b07cee 7765 unsigned long flags;
8a48b44c 7766 struct amdgpu_bo *abo;
fdd1fe57
MK
7767 uint32_t target_vblank, last_flip_vblank;
7768 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7769 bool pflip_present = false;
bc7f670e
DF
7770 struct {
7771 struct dc_surface_update surface_updates[MAX_SURFACES];
7772 struct dc_plane_info plane_infos[MAX_SURFACES];
7773 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7774 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7775 struct dc_stream_update stream_update;
74aa7bd4 7776 } *bundle;
bc7f670e 7777
74aa7bd4 7778 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7779
74aa7bd4
DF
7780 if (!bundle) {
7781 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7782 goto cleanup;
7783 }
e7b07cee 7784
8ad27806
NK
7785 /*
7786 * Disable the cursor first if we're disabling all the planes.
7787 * It'll remain on the screen after the planes are re-enabled
7788 * if we don't.
7789 */
7790 if (acrtc_state->active_planes == 0)
7791 amdgpu_dm_commit_cursors(state);
7792
e7b07cee 7793 /* update planes when needed */
0bc9706d
LSL
7794 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7795 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7796 struct drm_crtc_state *new_crtc_state;
0bc9706d 7797 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7798 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7799 bool plane_needs_flip;
c7af5f77 7800 struct dc_plane_state *dc_plane;
54d76575 7801 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7802
80c218d5
NK
7803 /* Cursor plane is handled after stream updates */
7804 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7805 continue;
e7b07cee 7806
f5ba60fe
DD
7807 if (!fb || !crtc || pcrtc != crtc)
7808 continue;
7809
7810 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7811 if (!new_crtc_state->active)
e7b07cee
HW
7812 continue;
7813
bc7f670e 7814 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7815
74aa7bd4 7816 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7817 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7818 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7819 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7820 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7821 }
8a48b44c 7822
695af5f9
NK
7823 fill_dc_scaling_info(new_plane_state,
7824 &bundle->scaling_infos[planes_count]);
8a48b44c 7825
695af5f9
NK
7826 bundle->surface_updates[planes_count].scaling_info =
7827 &bundle->scaling_infos[planes_count];
8a48b44c 7828
f5031000 7829 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7830
f5031000 7831 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7832
f5031000
DF
7833 if (!plane_needs_flip) {
7834 planes_count += 1;
7835 continue;
7836 }
8a48b44c 7837
2fac0f53
CK
7838 abo = gem_to_amdgpu_bo(fb->obj[0]);
7839
f8308898
AG
7840 /*
7841 * Wait for all fences on this FB. Do limited wait to avoid
7842 * deadlock during GPU reset when this fence will not signal
7843 * but we hold reservation lock for the BO.
7844 */
52791eee 7845 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7846 false,
f8308898
AG
7847 msecs_to_jiffies(5000));
7848 if (unlikely(r <= 0))
ed8a5fb2 7849 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7850
695af5f9 7851 fill_dc_plane_info_and_addr(
8ce5d842 7852 dm->adev, new_plane_state,
6eed95b0 7853 afb->tiling_flags,
695af5f9 7854 &bundle->plane_infos[planes_count],
87b7ebc2 7855 &bundle->flip_addrs[planes_count].address,
6eed95b0 7856 afb->tmz_surface, false);
87b7ebc2
RS
7857
7858 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7859 new_plane_state->plane->index,
7860 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7861
7862 bundle->surface_updates[planes_count].plane_info =
7863 &bundle->plane_infos[planes_count];
8a48b44c 7864
caff0e66
NK
7865 /*
7866 * Only allow immediate flips for fast updates that don't
7867 * change FB pitch, DCC state, rotation or mirroing.
7868 */
f5031000 7869 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7870 crtc->state->async_flip &&
caff0e66 7871 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7872
f5031000
DF
7873 timestamp_ns = ktime_get_ns();
7874 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7875 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7876 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7877
f5031000
DF
7878 if (!bundle->surface_updates[planes_count].surface) {
7879 DRM_ERROR("No surface for CRTC: id=%d\n",
7880 acrtc_attach->crtc_id);
7881 continue;
bc7f670e
DF
7882 }
7883
f5031000
DF
7884 if (plane == pcrtc->primary)
7885 update_freesync_state_on_stream(
7886 dm,
7887 acrtc_state,
7888 acrtc_state->stream,
7889 dc_plane,
7890 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7891
f5031000
DF
7892 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7893 __func__,
7894 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7895 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7896
7897 planes_count += 1;
7898
8a48b44c
DF
7899 }
7900
74aa7bd4 7901 if (pflip_present) {
634092b1
MK
7902 if (!vrr_active) {
7903 /* Use old throttling in non-vrr fixed refresh rate mode
7904 * to keep flip scheduling based on target vblank counts
7905 * working in a backwards compatible way, e.g., for
7906 * clients using the GLX_OML_sync_control extension or
7907 * DRI3/Present extension with defined target_msc.
7908 */
e3eff4b5 7909 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7910 }
7911 else {
7912 /* For variable refresh rate mode only:
7913 * Get vblank of last completed flip to avoid > 1 vrr
7914 * flips per video frame by use of throttling, but allow
7915 * flip programming anywhere in the possibly large
7916 * variable vrr vblank interval for fine-grained flip
7917 * timing control and more opportunity to avoid stutter
7918 * on late submission of flips.
7919 */
7920 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7921 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7922 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7923 }
7924
fdd1fe57 7925 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7926
7927 /*
7928 * Wait until we're out of the vertical blank period before the one
7929 * targeted by the flip
7930 */
7931 while ((acrtc_attach->enabled &&
7932 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7933 0, &vpos, &hpos, NULL,
7934 NULL, &pcrtc->hwmode)
7935 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7936 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7937 (int)(target_vblank -
e3eff4b5 7938 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7939 usleep_range(1000, 1100);
7940 }
7941
8fe684e9
NK
7942 /**
7943 * Prepare the flip event for the pageflip interrupt to handle.
7944 *
7945 * This only works in the case where we've already turned on the
7946 * appropriate hardware blocks (eg. HUBP) so in the transition case
7947 * from 0 -> n planes we have to skip a hardware generated event
7948 * and rely on sending it from software.
7949 */
7950 if (acrtc_attach->base.state->event &&
7951 acrtc_state->active_planes > 0) {
8a48b44c
DF
7952 drm_crtc_vblank_get(pcrtc);
7953
7954 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7955
7956 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7957 prepare_flip_isr(acrtc_attach);
7958
7959 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7960 }
7961
7962 if (acrtc_state->stream) {
8a48b44c 7963 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7964 bundle->stream_update.vrr_infopacket =
8a48b44c 7965 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7966 }
e7b07cee
HW
7967 }
7968
bc92c065 7969 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7970 if ((planes_count || acrtc_state->active_planes == 0) &&
7971 acrtc_state->stream) {
b6e881c9 7972 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7973 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7974 bundle->stream_update.src = acrtc_state->stream->src;
7975 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7976 }
7977
cf020d49
NK
7978 if (new_pcrtc_state->color_mgmt_changed) {
7979 /*
7980 * TODO: This isn't fully correct since we've actually
7981 * already modified the stream in place.
7982 */
7983 bundle->stream_update.gamut_remap =
7984 &acrtc_state->stream->gamut_remap_matrix;
7985 bundle->stream_update.output_csc_transform =
7986 &acrtc_state->stream->csc_color_matrix;
7987 bundle->stream_update.out_transfer_func =
7988 acrtc_state->stream->out_transfer_func;
7989 }
bc7f670e 7990
8a48b44c 7991 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7992 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7993 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7994
e63e2491
EB
7995 /*
7996 * If FreeSync state on the stream has changed then we need to
7997 * re-adjust the min/max bounds now that DC doesn't handle this
7998 * as part of commit.
7999 */
8000 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
8001 amdgpu_dm_vrr_active(acrtc_state)) {
8002 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8003 dc_stream_adjust_vmin_vmax(
8004 dm->dc, acrtc_state->stream,
585d450c 8005 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8006 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8007 }
bc7f670e 8008 mutex_lock(&dm->dc_lock);
8c322309 8009 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8010 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8011 amdgpu_dm_psr_disable(acrtc_state->stream);
8012
bc7f670e 8013 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8014 bundle->surface_updates,
bc7f670e
DF
8015 planes_count,
8016 acrtc_state->stream,
74aa7bd4 8017 &bundle->stream_update,
bc7f670e 8018 dc_state);
8c322309 8019
8fe684e9
NK
8020 /**
8021 * Enable or disable the interrupts on the backend.
8022 *
8023 * Most pipes are put into power gating when unused.
8024 *
8025 * When power gating is enabled on a pipe we lose the
8026 * interrupt enablement state when power gating is disabled.
8027 *
8028 * So we need to update the IRQ control state in hardware
8029 * whenever the pipe turns on (since it could be previously
8030 * power gated) or off (since some pipes can't be power gated
8031 * on some ASICs).
8032 */
8033 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8034 dm_update_pflip_irq_state(drm_to_adev(dev),
8035 acrtc_attach);
8fe684e9 8036
8c322309 8037 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8038 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8039 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8040 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8041 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8042 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8043 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8044 amdgpu_dm_psr_enable(acrtc_state->stream);
8045 }
8046
bc7f670e 8047 mutex_unlock(&dm->dc_lock);
e7b07cee 8048 }
4b510503 8049
8ad27806
NK
8050 /*
8051 * Update cursor state *after* programming all the planes.
8052 * This avoids redundant programming in the case where we're going
8053 * to be disabling a single plane - those pipes are being disabled.
8054 */
8055 if (acrtc_state->active_planes)
8056 amdgpu_dm_commit_cursors(state);
80c218d5 8057
4b510503 8058cleanup:
74aa7bd4 8059 kfree(bundle);
e7b07cee
HW
8060}
8061
6ce8f316
NK
8062static void amdgpu_dm_commit_audio(struct drm_device *dev,
8063 struct drm_atomic_state *state)
8064{
1348969a 8065 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8066 struct amdgpu_dm_connector *aconnector;
8067 struct drm_connector *connector;
8068 struct drm_connector_state *old_con_state, *new_con_state;
8069 struct drm_crtc_state *new_crtc_state;
8070 struct dm_crtc_state *new_dm_crtc_state;
8071 const struct dc_stream_status *status;
8072 int i, inst;
8073
8074 /* Notify device removals. */
8075 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8076 if (old_con_state->crtc != new_con_state->crtc) {
8077 /* CRTC changes require notification. */
8078 goto notify;
8079 }
8080
8081 if (!new_con_state->crtc)
8082 continue;
8083
8084 new_crtc_state = drm_atomic_get_new_crtc_state(
8085 state, new_con_state->crtc);
8086
8087 if (!new_crtc_state)
8088 continue;
8089
8090 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8091 continue;
8092
8093 notify:
8094 aconnector = to_amdgpu_dm_connector(connector);
8095
8096 mutex_lock(&adev->dm.audio_lock);
8097 inst = aconnector->audio_inst;
8098 aconnector->audio_inst = -1;
8099 mutex_unlock(&adev->dm.audio_lock);
8100
8101 amdgpu_dm_audio_eld_notify(adev, inst);
8102 }
8103
8104 /* Notify audio device additions. */
8105 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8106 if (!new_con_state->crtc)
8107 continue;
8108
8109 new_crtc_state = drm_atomic_get_new_crtc_state(
8110 state, new_con_state->crtc);
8111
8112 if (!new_crtc_state)
8113 continue;
8114
8115 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8116 continue;
8117
8118 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8119 if (!new_dm_crtc_state->stream)
8120 continue;
8121
8122 status = dc_stream_get_status(new_dm_crtc_state->stream);
8123 if (!status)
8124 continue;
8125
8126 aconnector = to_amdgpu_dm_connector(connector);
8127
8128 mutex_lock(&adev->dm.audio_lock);
8129 inst = status->audio_inst;
8130 aconnector->audio_inst = inst;
8131 mutex_unlock(&adev->dm.audio_lock);
8132
8133 amdgpu_dm_audio_eld_notify(adev, inst);
8134 }
8135}
8136
1f6010a9 8137/*
27b3f4fc
LSL
8138 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8139 * @crtc_state: the DRM CRTC state
8140 * @stream_state: the DC stream state.
8141 *
8142 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8143 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8144 */
8145static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8146 struct dc_stream_state *stream_state)
8147{
b9952f93 8148 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8149}
e7b07cee 8150
b8592b48
LL
8151/**
8152 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8153 * @state: The atomic state to commit
8154 *
8155 * This will tell DC to commit the constructed DC state from atomic_check,
8156 * programming the hardware. Any failures here implies a hardware failure, since
8157 * atomic check should have filtered anything non-kosher.
8158 */
7578ecda 8159static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8160{
8161 struct drm_device *dev = state->dev;
1348969a 8162 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8163 struct amdgpu_display_manager *dm = &adev->dm;
8164 struct dm_atomic_state *dm_state;
eb3dc897 8165 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8166 uint32_t i, j;
5cc6dcbd 8167 struct drm_crtc *crtc;
0bc9706d 8168 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8169 unsigned long flags;
8170 bool wait_for_vblank = true;
8171 struct drm_connector *connector;
c2cea706 8172 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8173 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8174 int crtc_disable_count = 0;
6ee90e88 8175 bool mode_set_reset_required = false;
e7b07cee 8176
e8a98235
RS
8177 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8178
e7b07cee
HW
8179 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8180
eb3dc897
NK
8181 dm_state = dm_atomic_get_new_state(state);
8182 if (dm_state && dm_state->context) {
8183 dc_state = dm_state->context;
8184 } else {
8185 /* No state changes, retain current state. */
813d20dc 8186 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8187 ASSERT(dc_state_temp);
8188 dc_state = dc_state_temp;
8189 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8190 }
e7b07cee 8191
6d90a208
AP
8192 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8193 new_crtc_state, i) {
8194 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8195
8196 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8197
8198 if (old_crtc_state->active &&
8199 (!new_crtc_state->active ||
8200 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8201 manage_dm_interrupts(adev, acrtc, false);
8202 dc_stream_release(dm_old_crtc_state->stream);
8203 }
8204 }
8205
8976f73b
RS
8206 drm_atomic_helper_calc_timestamping_constants(state);
8207
e7b07cee 8208 /* update changed items */
0bc9706d 8209 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8210 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8211
54d76575
LSL
8212 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8213 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8214
f1ad2f5e 8215 DRM_DEBUG_DRIVER(
e7b07cee
HW
8216 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8217 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8218 "connectors_changed:%d\n",
8219 acrtc->crtc_id,
0bc9706d
LSL
8220 new_crtc_state->enable,
8221 new_crtc_state->active,
8222 new_crtc_state->planes_changed,
8223 new_crtc_state->mode_changed,
8224 new_crtc_state->active_changed,
8225 new_crtc_state->connectors_changed);
e7b07cee 8226
5c68c652
VL
8227 /* Disable cursor if disabling crtc */
8228 if (old_crtc_state->active && !new_crtc_state->active) {
8229 struct dc_cursor_position position;
8230
8231 memset(&position, 0, sizeof(position));
8232 mutex_lock(&dm->dc_lock);
8233 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8234 mutex_unlock(&dm->dc_lock);
8235 }
8236
27b3f4fc
LSL
8237 /* Copy all transient state flags into dc state */
8238 if (dm_new_crtc_state->stream) {
8239 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8240 dm_new_crtc_state->stream);
8241 }
8242
e7b07cee
HW
8243 /* handles headless hotplug case, updating new_state and
8244 * aconnector as needed
8245 */
8246
54d76575 8247 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8248
f1ad2f5e 8249 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8250
54d76575 8251 if (!dm_new_crtc_state->stream) {
e7b07cee 8252 /*
b830ebc9
HW
8253 * this could happen because of issues with
8254 * userspace notifications delivery.
8255 * In this case userspace tries to set mode on
1f6010a9
DF
8256 * display which is disconnected in fact.
8257 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8258 * We expect reset mode will come soon.
8259 *
8260 * This can also happen when unplug is done
8261 * during resume sequence ended
8262 *
8263 * In this case, we want to pretend we still
8264 * have a sink to keep the pipe running so that
8265 * hw state is consistent with the sw state
8266 */
f1ad2f5e 8267 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8268 __func__, acrtc->base.base.id);
8269 continue;
8270 }
8271
54d76575
LSL
8272 if (dm_old_crtc_state->stream)
8273 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8274
97028037
LP
8275 pm_runtime_get_noresume(dev->dev);
8276
e7b07cee 8277 acrtc->enabled = true;
0bc9706d
LSL
8278 acrtc->hw_mode = new_crtc_state->mode;
8279 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8280 mode_set_reset_required = true;
0bc9706d 8281 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8282 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8283 /* i.e. reset mode */
6ee90e88 8284 if (dm_old_crtc_state->stream)
54d76575 8285 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8286 mode_set_reset_required = true;
e7b07cee
HW
8287 }
8288 } /* for_each_crtc_in_state() */
8289
eb3dc897 8290 if (dc_state) {
6ee90e88 8291 /* if there mode set or reset, disable eDP PSR */
8292 if (mode_set_reset_required)
8293 amdgpu_dm_psr_disable_all(dm);
8294
eb3dc897 8295 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8296 mutex_lock(&dm->dc_lock);
eb3dc897 8297 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8298 mutex_unlock(&dm->dc_lock);
fa2123db 8299 }
e7b07cee 8300
0bc9706d 8301 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8302 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8303
54d76575 8304 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8305
54d76575 8306 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8307 const struct dc_stream_status *status =
54d76575 8308 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8309
eb3dc897 8310 if (!status)
09f609c3
LL
8311 status = dc_stream_get_status_from_state(dc_state,
8312 dm_new_crtc_state->stream);
e7b07cee 8313 if (!status)
54d76575 8314 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8315 else
8316 acrtc->otg_inst = status->primary_otg_inst;
8317 }
8318 }
0c8620d6
BL
8319#ifdef CONFIG_DRM_AMD_DC_HDCP
8320 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8321 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8322 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8323 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8324
8325 new_crtc_state = NULL;
8326
8327 if (acrtc)
8328 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8329
8330 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8331
8332 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8333 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8334 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8335 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8336 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8337 continue;
8338 }
8339
8340 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8341 hdcp_update_display(
8342 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8343 new_con_state->hdcp_content_type,
b1abe558
BL
8344 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8345 : false);
0c8620d6
BL
8346 }
8347#endif
e7b07cee 8348
02d6a6fc 8349 /* Handle connector state changes */
c2cea706 8350 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8351 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8352 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8353 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
8354 struct dc_surface_update dummy_updates[MAX_SURFACES];
8355 struct dc_stream_update stream_update;
b232d4ed 8356 struct dc_info_packet hdr_packet;
e7b07cee 8357 struct dc_stream_status *status = NULL;
b232d4ed 8358 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8359
19afd799
NC
8360 memset(&dummy_updates, 0, sizeof(dummy_updates));
8361 memset(&stream_update, 0, sizeof(stream_update));
8362
44d09c6a 8363 if (acrtc) {
0bc9706d 8364 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8365 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8366 }
0bc9706d 8367
e7b07cee 8368 /* Skip any modesets/resets */
0bc9706d 8369 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8370 continue;
8371
54d76575 8372 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8373 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8374
b232d4ed
NK
8375 scaling_changed = is_scaling_state_different(dm_new_con_state,
8376 dm_old_con_state);
8377
8378 abm_changed = dm_new_crtc_state->abm_level !=
8379 dm_old_crtc_state->abm_level;
8380
8381 hdr_changed =
8382 is_hdr_metadata_different(old_con_state, new_con_state);
8383
8384 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8385 continue;
e7b07cee 8386
b6e881c9 8387 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8388 if (scaling_changed) {
02d6a6fc 8389 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8390 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8391
02d6a6fc
DF
8392 stream_update.src = dm_new_crtc_state->stream->src;
8393 stream_update.dst = dm_new_crtc_state->stream->dst;
8394 }
8395
b232d4ed 8396 if (abm_changed) {
02d6a6fc
DF
8397 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8398
8399 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8400 }
70e8ffc5 8401
b232d4ed
NK
8402 if (hdr_changed) {
8403 fill_hdr_info_packet(new_con_state, &hdr_packet);
8404 stream_update.hdr_static_metadata = &hdr_packet;
8405 }
8406
54d76575 8407 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8408 WARN_ON(!status);
3be5262e 8409 WARN_ON(!status->plane_count);
e7b07cee 8410
02d6a6fc
DF
8411 /*
8412 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8413 * Here we create an empty update on each plane.
8414 * To fix this, DC should permit updating only stream properties.
8415 */
8416 for (j = 0; j < status->plane_count; j++)
8417 dummy_updates[j].surface = status->plane_states[0];
8418
8419
8420 mutex_lock(&dm->dc_lock);
8421 dc_commit_updates_for_stream(dm->dc,
8422 dummy_updates,
8423 status->plane_count,
8424 dm_new_crtc_state->stream,
8425 &stream_update,
8426 dc_state);
8427 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8428 }
8429
b5e83f6f 8430 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8431 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8432 new_crtc_state, i) {
fe2a1965
LP
8433 if (old_crtc_state->active && !new_crtc_state->active)
8434 crtc_disable_count++;
8435
54d76575 8436 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8437 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8438
585d450c
AP
8439 /* For freesync config update on crtc state and params for irq */
8440 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8441
66b0c973
MK
8442 /* Handle vrr on->off / off->on transitions */
8443 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8444 dm_new_crtc_state);
e7b07cee
HW
8445 }
8446
8fe684e9
NK
8447 /**
8448 * Enable interrupts for CRTCs that are newly enabled or went through
8449 * a modeset. It was intentionally deferred until after the front end
8450 * state was modified to wait until the OTG was on and so the IRQ
8451 * handlers didn't access stale or invalid state.
8452 */
8453 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8454 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
c920888c 8455 bool configure_crc = false;
8fe684e9 8456
585d450c
AP
8457 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8458
8fe684e9
NK
8459 if (new_crtc_state->active &&
8460 (!old_crtc_state->active ||
8461 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8462 dc_stream_retain(dm_new_crtc_state->stream);
8463 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8464 manage_dm_interrupts(adev, acrtc, true);
c920888c 8465 }
f01afd1e 8466 if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
c920888c 8467 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8fe684e9
NK
8468 /**
8469 * Frontend may have changed so reapply the CRC capture
8470 * settings for the stream.
8471 */
8472 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8473 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8fe684e9 8474
c920888c
WL
8475 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8476 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8477 configure_crc = true;
8478 } else {
8479 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8480 configure_crc = true;
8fe684e9 8481 }
c920888c
WL
8482
8483 if (configure_crc)
8484 amdgpu_dm_crtc_configure_crc_source(
8485 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8fe684e9
NK
8486 }
8487 }
e7b07cee 8488
420cd472 8489 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8490 if (new_crtc_state->async_flip)
420cd472
DF
8491 wait_for_vblank = false;
8492
e7b07cee 8493 /* update planes when needed per crtc*/
5cc6dcbd 8494 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8495 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8496
54d76575 8497 if (dm_new_crtc_state->stream)
eb3dc897 8498 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8499 dm, crtc, wait_for_vblank);
e7b07cee
HW
8500 }
8501
6ce8f316
NK
8502 /* Update audio instances for each connector. */
8503 amdgpu_dm_commit_audio(dev, state);
8504
e7b07cee
HW
8505 /*
8506 * send vblank event on all events not handled in flip and
8507 * mark consumed event for drm_atomic_helper_commit_hw_done
8508 */
4a580877 8509 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8510 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8511
0bc9706d
LSL
8512 if (new_crtc_state->event)
8513 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8514
0bc9706d 8515 new_crtc_state->event = NULL;
e7b07cee 8516 }
4a580877 8517 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8518
29c8f234
LL
8519 /* Signal HW programming completion */
8520 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8521
8522 if (wait_for_vblank)
320a1274 8523 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8524
8525 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8526
5f6fab24
AD
8527 /* return the stolen vga memory back to VRAM */
8528 if (!adev->mman.keep_stolen_vga_memory)
8529 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8530 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8531
1f6010a9
DF
8532 /*
8533 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8534 * so we can put the GPU into runtime suspend if we're not driving any
8535 * displays anymore
8536 */
fe2a1965
LP
8537 for (i = 0; i < crtc_disable_count; i++)
8538 pm_runtime_put_autosuspend(dev->dev);
97028037 8539 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8540
8541 if (dc_state_temp)
8542 dc_release_state(dc_state_temp);
e7b07cee
HW
8543}
8544
8545
8546static int dm_force_atomic_commit(struct drm_connector *connector)
8547{
8548 int ret = 0;
8549 struct drm_device *ddev = connector->dev;
8550 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8551 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8552 struct drm_plane *plane = disconnected_acrtc->base.primary;
8553 struct drm_connector_state *conn_state;
8554 struct drm_crtc_state *crtc_state;
8555 struct drm_plane_state *plane_state;
8556
8557 if (!state)
8558 return -ENOMEM;
8559
8560 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8561
8562 /* Construct an atomic state to restore previous display setting */
8563
8564 /*
8565 * Attach connectors to drm_atomic_state
8566 */
8567 conn_state = drm_atomic_get_connector_state(state, connector);
8568
8569 ret = PTR_ERR_OR_ZERO(conn_state);
8570 if (ret)
8571 goto err;
8572
8573 /* Attach crtc to drm_atomic_state*/
8574 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8575
8576 ret = PTR_ERR_OR_ZERO(crtc_state);
8577 if (ret)
8578 goto err;
8579
8580 /* force a restore */
8581 crtc_state->mode_changed = true;
8582
8583 /* Attach plane to drm_atomic_state */
8584 plane_state = drm_atomic_get_plane_state(state, plane);
8585
8586 ret = PTR_ERR_OR_ZERO(plane_state);
8587 if (ret)
8588 goto err;
8589
8590
8591 /* Call commit internally with the state we just constructed */
8592 ret = drm_atomic_commit(state);
8593 if (!ret)
8594 return 0;
8595
8596err:
8597 DRM_ERROR("Restoring old state failed with %i\n", ret);
8598 drm_atomic_state_put(state);
8599
8600 return ret;
8601}
8602
8603/*
1f6010a9
DF
8604 * This function handles all cases when set mode does not come upon hotplug.
8605 * This includes when a display is unplugged then plugged back into the
8606 * same port and when running without usermode desktop manager supprot
e7b07cee 8607 */
3ee6b26b
AD
8608void dm_restore_drm_connector_state(struct drm_device *dev,
8609 struct drm_connector *connector)
e7b07cee 8610{
c84dec2f 8611 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8612 struct amdgpu_crtc *disconnected_acrtc;
8613 struct dm_crtc_state *acrtc_state;
8614
8615 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8616 return;
8617
8618 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8619 if (!disconnected_acrtc)
8620 return;
e7b07cee 8621
70e8ffc5
HW
8622 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8623 if (!acrtc_state->stream)
e7b07cee
HW
8624 return;
8625
8626 /*
8627 * If the previous sink is not released and different from the current,
8628 * we deduce we are in a state where we can not rely on usermode call
8629 * to turn on the display, so we do it here
8630 */
8631 if (acrtc_state->stream->sink != aconnector->dc_sink)
8632 dm_force_atomic_commit(&aconnector->base);
8633}
8634
1f6010a9 8635/*
e7b07cee
HW
8636 * Grabs all modesetting locks to serialize against any blocking commits,
8637 * Waits for completion of all non blocking commits.
8638 */
3ee6b26b
AD
8639static int do_aquire_global_lock(struct drm_device *dev,
8640 struct drm_atomic_state *state)
e7b07cee
HW
8641{
8642 struct drm_crtc *crtc;
8643 struct drm_crtc_commit *commit;
8644 long ret;
8645
1f6010a9
DF
8646 /*
8647 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8648 * ensure that when the framework release it the
8649 * extra locks we are locking here will get released to
8650 */
8651 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8652 if (ret)
8653 return ret;
8654
8655 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8656 spin_lock(&crtc->commit_lock);
8657 commit = list_first_entry_or_null(&crtc->commit_list,
8658 struct drm_crtc_commit, commit_entry);
8659 if (commit)
8660 drm_crtc_commit_get(commit);
8661 spin_unlock(&crtc->commit_lock);
8662
8663 if (!commit)
8664 continue;
8665
1f6010a9
DF
8666 /*
8667 * Make sure all pending HW programming completed and
e7b07cee
HW
8668 * page flips done
8669 */
8670 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8671
8672 if (ret > 0)
8673 ret = wait_for_completion_interruptible_timeout(
8674 &commit->flip_done, 10*HZ);
8675
8676 if (ret == 0)
8677 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8678 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8679
8680 drm_crtc_commit_put(commit);
8681 }
8682
8683 return ret < 0 ? ret : 0;
8684}
8685
bb47de73
NK
8686static void get_freesync_config_for_crtc(
8687 struct dm_crtc_state *new_crtc_state,
8688 struct dm_connector_state *new_con_state)
98e6436d
AK
8689{
8690 struct mod_freesync_config config = {0};
98e6436d
AK
8691 struct amdgpu_dm_connector *aconnector =
8692 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8693 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8694 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8695
a057ec46 8696 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8697 vrefresh >= aconnector->min_vfreq &&
8698 vrefresh <= aconnector->max_vfreq;
bb47de73 8699
a057ec46
IB
8700 if (new_crtc_state->vrr_supported) {
8701 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8702 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8703 VRR_STATE_ACTIVE_VARIABLE :
8704 VRR_STATE_INACTIVE;
8705 config.min_refresh_in_uhz =
8706 aconnector->min_vfreq * 1000000;
8707 config.max_refresh_in_uhz =
8708 aconnector->max_vfreq * 1000000;
69ff8845 8709 config.vsif_supported = true;
180db303 8710 config.btr = true;
98e6436d
AK
8711 }
8712
bb47de73
NK
8713 new_crtc_state->freesync_config = config;
8714}
98e6436d 8715
bb47de73
NK
8716static void reset_freesync_config_for_crtc(
8717 struct dm_crtc_state *new_crtc_state)
8718{
8719 new_crtc_state->vrr_supported = false;
98e6436d 8720
bb47de73
NK
8721 memset(&new_crtc_state->vrr_infopacket, 0,
8722 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8723}
8724
4b9674e5
LL
8725static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8726 struct drm_atomic_state *state,
8727 struct drm_crtc *crtc,
8728 struct drm_crtc_state *old_crtc_state,
8729 struct drm_crtc_state *new_crtc_state,
8730 bool enable,
8731 bool *lock_and_validation_needed)
e7b07cee 8732{
eb3dc897 8733 struct dm_atomic_state *dm_state = NULL;
54d76575 8734 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8735 struct dc_stream_state *new_stream;
62f55537 8736 int ret = 0;
d4d4a645 8737
1f6010a9
DF
8738 /*
8739 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8740 * update changed items
8741 */
4b9674e5
LL
8742 struct amdgpu_crtc *acrtc = NULL;
8743 struct amdgpu_dm_connector *aconnector = NULL;
8744 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8745 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8746
4b9674e5 8747 new_stream = NULL;
9635b754 8748
4b9674e5
LL
8749 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8750 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8751 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8752 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8753
4b9674e5
LL
8754 /* TODO This hack should go away */
8755 if (aconnector && enable) {
8756 /* Make sure fake sink is created in plug-in scenario */
8757 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8758 &aconnector->base);
8759 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8760 &aconnector->base);
19f89e23 8761
4b9674e5
LL
8762 if (IS_ERR(drm_new_conn_state)) {
8763 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8764 goto fail;
8765 }
19f89e23 8766
4b9674e5
LL
8767 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8768 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8769
02d35a67
JFZ
8770 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8771 goto skip_modeset;
8772
cbd14ae7
SW
8773 new_stream = create_validate_stream_for_sink(aconnector,
8774 &new_crtc_state->mode,
8775 dm_new_conn_state,
8776 dm_old_crtc_state->stream);
19f89e23 8777
4b9674e5
LL
8778 /*
8779 * we can have no stream on ACTION_SET if a display
8780 * was disconnected during S3, in this case it is not an
8781 * error, the OS will be updated after detection, and
8782 * will do the right thing on next atomic commit
8783 */
19f89e23 8784
4b9674e5
LL
8785 if (!new_stream) {
8786 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8787 __func__, acrtc->base.base.id);
8788 ret = -ENOMEM;
8789 goto fail;
8790 }
e7b07cee 8791
3d4e52d0
VL
8792 /*
8793 * TODO: Check VSDB bits to decide whether this should
8794 * be enabled or not.
8795 */
8796 new_stream->triggered_crtc_reset.enabled =
8797 dm->force_timing_sync;
8798
4b9674e5 8799 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8800
88694af9
NK
8801 ret = fill_hdr_info_packet(drm_new_conn_state,
8802 &new_stream->hdr_static_metadata);
8803 if (ret)
8804 goto fail;
8805
7e930949
NK
8806 /*
8807 * If we already removed the old stream from the context
8808 * (and set the new stream to NULL) then we can't reuse
8809 * the old stream even if the stream and scaling are unchanged.
8810 * We'll hit the BUG_ON and black screen.
8811 *
8812 * TODO: Refactor this function to allow this check to work
8813 * in all conditions.
8814 */
8815 if (dm_new_crtc_state->stream &&
8816 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8817 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8818 new_crtc_state->mode_changed = false;
8819 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8820 new_crtc_state->mode_changed);
62f55537 8821 }
4b9674e5 8822 }
b830ebc9 8823
02d35a67 8824 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8825 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8826 goto skip_modeset;
e7b07cee 8827
4b9674e5
LL
8828 DRM_DEBUG_DRIVER(
8829 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8830 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8831 "connectors_changed:%d\n",
8832 acrtc->crtc_id,
8833 new_crtc_state->enable,
8834 new_crtc_state->active,
8835 new_crtc_state->planes_changed,
8836 new_crtc_state->mode_changed,
8837 new_crtc_state->active_changed,
8838 new_crtc_state->connectors_changed);
62f55537 8839
4b9674e5
LL
8840 /* Remove stream for any changed/disabled CRTC */
8841 if (!enable) {
62f55537 8842
4b9674e5
LL
8843 if (!dm_old_crtc_state->stream)
8844 goto skip_modeset;
eb3dc897 8845
4b9674e5
LL
8846 ret = dm_atomic_get_state(state, &dm_state);
8847 if (ret)
8848 goto fail;
e7b07cee 8849
4b9674e5
LL
8850 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8851 crtc->base.id);
62f55537 8852
4b9674e5
LL
8853 /* i.e. reset mode */
8854 if (dc_remove_stream_from_ctx(
8855 dm->dc,
8856 dm_state->context,
8857 dm_old_crtc_state->stream) != DC_OK) {
8858 ret = -EINVAL;
8859 goto fail;
8860 }
62f55537 8861
4b9674e5
LL
8862 dc_stream_release(dm_old_crtc_state->stream);
8863 dm_new_crtc_state->stream = NULL;
bb47de73 8864
4b9674e5 8865 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8866
4b9674e5 8867 *lock_and_validation_needed = true;
62f55537 8868
4b9674e5
LL
8869 } else {/* Add stream for any updated/enabled CRTC */
8870 /*
8871 * Quick fix to prevent NULL pointer on new_stream when
8872 * added MST connectors not found in existing crtc_state in the chained mode
8873 * TODO: need to dig out the root cause of that
8874 */
8875 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8876 goto skip_modeset;
62f55537 8877
4b9674e5
LL
8878 if (modereset_required(new_crtc_state))
8879 goto skip_modeset;
62f55537 8880
4b9674e5
LL
8881 if (modeset_required(new_crtc_state, new_stream,
8882 dm_old_crtc_state->stream)) {
62f55537 8883
4b9674e5 8884 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8885
4b9674e5
LL
8886 ret = dm_atomic_get_state(state, &dm_state);
8887 if (ret)
8888 goto fail;
27b3f4fc 8889
4b9674e5 8890 dm_new_crtc_state->stream = new_stream;
62f55537 8891
4b9674e5 8892 dc_stream_retain(new_stream);
1dc90497 8893
4b9674e5
LL
8894 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8895 crtc->base.id);
1dc90497 8896
4b9674e5
LL
8897 if (dc_add_stream_to_ctx(
8898 dm->dc,
8899 dm_state->context,
8900 dm_new_crtc_state->stream) != DC_OK) {
8901 ret = -EINVAL;
8902 goto fail;
9b690ef3
BL
8903 }
8904
4b9674e5
LL
8905 *lock_and_validation_needed = true;
8906 }
8907 }
e277adc5 8908
4b9674e5
LL
8909skip_modeset:
8910 /* Release extra reference */
8911 if (new_stream)
8912 dc_stream_release(new_stream);
e277adc5 8913
4b9674e5
LL
8914 /*
8915 * We want to do dc stream updates that do not require a
8916 * full modeset below.
8917 */
2afda735 8918 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8919 return 0;
8920 /*
8921 * Given above conditions, the dc state cannot be NULL because:
8922 * 1. We're in the process of enabling CRTCs (just been added
8923 * to the dc context, or already is on the context)
8924 * 2. Has a valid connector attached, and
8925 * 3. Is currently active and enabled.
8926 * => The dc stream state currently exists.
8927 */
8928 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8929
4b9674e5
LL
8930 /* Scaling or underscan settings */
8931 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8932 update_stream_scaling_settings(
8933 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8934
b05e2c5e
DF
8935 /* ABM settings */
8936 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8937
4b9674e5
LL
8938 /*
8939 * Color management settings. We also update color properties
8940 * when a modeset is needed, to ensure it gets reprogrammed.
8941 */
8942 if (dm_new_crtc_state->base.color_mgmt_changed ||
8943 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8944 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8945 if (ret)
8946 goto fail;
62f55537 8947 }
e7b07cee 8948
4b9674e5
LL
8949 /* Update Freesync settings. */
8950 get_freesync_config_for_crtc(dm_new_crtc_state,
8951 dm_new_conn_state);
8952
62f55537 8953 return ret;
9635b754
DS
8954
8955fail:
8956 if (new_stream)
8957 dc_stream_release(new_stream);
8958 return ret;
62f55537 8959}
9b690ef3 8960
f6ff2a08
NK
8961static bool should_reset_plane(struct drm_atomic_state *state,
8962 struct drm_plane *plane,
8963 struct drm_plane_state *old_plane_state,
8964 struct drm_plane_state *new_plane_state)
8965{
8966 struct drm_plane *other;
8967 struct drm_plane_state *old_other_state, *new_other_state;
8968 struct drm_crtc_state *new_crtc_state;
8969 int i;
8970
70a1efac
NK
8971 /*
8972 * TODO: Remove this hack once the checks below are sufficient
8973 * enough to determine when we need to reset all the planes on
8974 * the stream.
8975 */
8976 if (state->allow_modeset)
8977 return true;
8978
f6ff2a08
NK
8979 /* Exit early if we know that we're adding or removing the plane. */
8980 if (old_plane_state->crtc != new_plane_state->crtc)
8981 return true;
8982
8983 /* old crtc == new_crtc == NULL, plane not in context. */
8984 if (!new_plane_state->crtc)
8985 return false;
8986
8987 new_crtc_state =
8988 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8989
8990 if (!new_crtc_state)
8991 return true;
8992
7316c4ad
NK
8993 /* CRTC Degamma changes currently require us to recreate planes. */
8994 if (new_crtc_state->color_mgmt_changed)
8995 return true;
8996
f6ff2a08
NK
8997 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8998 return true;
8999
9000 /*
9001 * If there are any new primary or overlay planes being added or
9002 * removed then the z-order can potentially change. To ensure
9003 * correct z-order and pipe acquisition the current DC architecture
9004 * requires us to remove and recreate all existing planes.
9005 *
9006 * TODO: Come up with a more elegant solution for this.
9007 */
9008 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9009 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9010 if (other->type == DRM_PLANE_TYPE_CURSOR)
9011 continue;
9012
9013 if (old_other_state->crtc != new_plane_state->crtc &&
9014 new_other_state->crtc != new_plane_state->crtc)
9015 continue;
9016
9017 if (old_other_state->crtc != new_other_state->crtc)
9018 return true;
9019
dc4cb30d
NK
9020 /* Src/dst size and scaling updates. */
9021 if (old_other_state->src_w != new_other_state->src_w ||
9022 old_other_state->src_h != new_other_state->src_h ||
9023 old_other_state->crtc_w != new_other_state->crtc_w ||
9024 old_other_state->crtc_h != new_other_state->crtc_h)
9025 return true;
9026
9027 /* Rotation / mirroring updates. */
9028 if (old_other_state->rotation != new_other_state->rotation)
9029 return true;
9030
9031 /* Blending updates. */
9032 if (old_other_state->pixel_blend_mode !=
9033 new_other_state->pixel_blend_mode)
9034 return true;
9035
9036 /* Alpha updates. */
9037 if (old_other_state->alpha != new_other_state->alpha)
9038 return true;
9039
9040 /* Colorspace changes. */
9041 if (old_other_state->color_range != new_other_state->color_range ||
9042 old_other_state->color_encoding != new_other_state->color_encoding)
9043 return true;
9044
9a81cc60
NK
9045 /* Framebuffer checks fall at the end. */
9046 if (!old_other_state->fb || !new_other_state->fb)
9047 continue;
9048
9049 /* Pixel format changes can require bandwidth updates. */
9050 if (old_other_state->fb->format != new_other_state->fb->format)
9051 return true;
9052
6eed95b0
BN
9053 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9054 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9055
9056 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9057 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9058 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9059 return true;
9060 }
9061
9062 return false;
9063}
9064
b0455fda
SS
9065static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9066 struct drm_plane_state *new_plane_state,
9067 struct drm_framebuffer *fb)
9068{
e72868c4
SS
9069 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9070 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9071 unsigned int pitch;
e72868c4 9072 bool linear;
b0455fda
SS
9073
9074 if (fb->width > new_acrtc->max_cursor_width ||
9075 fb->height > new_acrtc->max_cursor_height) {
9076 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9077 new_plane_state->fb->width,
9078 new_plane_state->fb->height);
9079 return -EINVAL;
9080 }
9081 if (new_plane_state->src_w != fb->width << 16 ||
9082 new_plane_state->src_h != fb->height << 16) {
9083 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9084 return -EINVAL;
9085 }
9086
9087 /* Pitch in pixels */
9088 pitch = fb->pitches[0] / fb->format->cpp[0];
9089
9090 if (fb->width != pitch) {
9091 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9092 fb->width, pitch);
9093 return -EINVAL;
9094 }
9095
9096 switch (pitch) {
9097 case 64:
9098 case 128:
9099 case 256:
9100 /* FB pitch is supported by cursor plane */
9101 break;
9102 default:
9103 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9104 return -EINVAL;
9105 }
9106
e72868c4
SS
9107 /* Core DRM takes care of checking FB modifiers, so we only need to
9108 * check tiling flags when the FB doesn't have a modifier. */
9109 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9110 if (adev->family < AMDGPU_FAMILY_AI) {
9111 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9112 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9113 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9114 } else {
9115 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9116 }
9117 if (!linear) {
9118 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9119 return -EINVAL;
9120 }
9121 }
9122
b0455fda
SS
9123 return 0;
9124}
9125
9e869063
LL
9126static int dm_update_plane_state(struct dc *dc,
9127 struct drm_atomic_state *state,
9128 struct drm_plane *plane,
9129 struct drm_plane_state *old_plane_state,
9130 struct drm_plane_state *new_plane_state,
9131 bool enable,
9132 bool *lock_and_validation_needed)
62f55537 9133{
eb3dc897
NK
9134
9135 struct dm_atomic_state *dm_state = NULL;
62f55537 9136 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9137 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9138 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9139 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9140 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9141 bool needs_reset;
62f55537 9142 int ret = 0;
e7b07cee 9143
9b690ef3 9144
9e869063
LL
9145 new_plane_crtc = new_plane_state->crtc;
9146 old_plane_crtc = old_plane_state->crtc;
9147 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9148 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9149
626bf90f
SS
9150 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9151 if (!enable || !new_plane_crtc ||
9152 drm_atomic_plane_disabling(plane->state, new_plane_state))
9153 return 0;
9154
9155 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9156
5f581248
SS
9157 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9158 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9159 return -EINVAL;
9160 }
9161
24f99d2b 9162 if (new_plane_state->fb) {
b0455fda
SS
9163 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9164 new_plane_state->fb);
9165 if (ret)
9166 return ret;
24f99d2b
SS
9167 }
9168
9e869063 9169 return 0;
626bf90f 9170 }
9b690ef3 9171
f6ff2a08
NK
9172 needs_reset = should_reset_plane(state, plane, old_plane_state,
9173 new_plane_state);
9174
9e869063
LL
9175 /* Remove any changed/removed planes */
9176 if (!enable) {
f6ff2a08 9177 if (!needs_reset)
9e869063 9178 return 0;
a7b06724 9179
9e869063
LL
9180 if (!old_plane_crtc)
9181 return 0;
62f55537 9182
9e869063
LL
9183 old_crtc_state = drm_atomic_get_old_crtc_state(
9184 state, old_plane_crtc);
9185 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9186
9e869063
LL
9187 if (!dm_old_crtc_state->stream)
9188 return 0;
62f55537 9189
9e869063
LL
9190 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9191 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9192
9e869063
LL
9193 ret = dm_atomic_get_state(state, &dm_state);
9194 if (ret)
9195 return ret;
eb3dc897 9196
9e869063
LL
9197 if (!dc_remove_plane_from_context(
9198 dc,
9199 dm_old_crtc_state->stream,
9200 dm_old_plane_state->dc_state,
9201 dm_state->context)) {
62f55537 9202
c3537613 9203 return -EINVAL;
9e869063 9204 }
e7b07cee 9205
9b690ef3 9206
9e869063
LL
9207 dc_plane_state_release(dm_old_plane_state->dc_state);
9208 dm_new_plane_state->dc_state = NULL;
1dc90497 9209
9e869063 9210 *lock_and_validation_needed = true;
1dc90497 9211
9e869063
LL
9212 } else { /* Add new planes */
9213 struct dc_plane_state *dc_new_plane_state;
1dc90497 9214
9e869063
LL
9215 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9216 return 0;
e7b07cee 9217
9e869063
LL
9218 if (!new_plane_crtc)
9219 return 0;
e7b07cee 9220
9e869063
LL
9221 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9222 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9223
9e869063
LL
9224 if (!dm_new_crtc_state->stream)
9225 return 0;
62f55537 9226
f6ff2a08 9227 if (!needs_reset)
9e869063 9228 return 0;
62f55537 9229
8c44515b
AP
9230 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9231 if (ret)
9232 return ret;
9233
9e869063 9234 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9235
9e869063
LL
9236 dc_new_plane_state = dc_create_plane_state(dc);
9237 if (!dc_new_plane_state)
9238 return -ENOMEM;
62f55537 9239
9e869063
LL
9240 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9241 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9242
695af5f9 9243 ret = fill_dc_plane_attributes(
1348969a 9244 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9245 dc_new_plane_state,
9246 new_plane_state,
9247 new_crtc_state);
9248 if (ret) {
9249 dc_plane_state_release(dc_new_plane_state);
9250 return ret;
9251 }
62f55537 9252
9e869063
LL
9253 ret = dm_atomic_get_state(state, &dm_state);
9254 if (ret) {
9255 dc_plane_state_release(dc_new_plane_state);
9256 return ret;
9257 }
eb3dc897 9258
9e869063
LL
9259 /*
9260 * Any atomic check errors that occur after this will
9261 * not need a release. The plane state will be attached
9262 * to the stream, and therefore part of the atomic
9263 * state. It'll be released when the atomic state is
9264 * cleaned.
9265 */
9266 if (!dc_add_plane_to_context(
9267 dc,
9268 dm_new_crtc_state->stream,
9269 dc_new_plane_state,
9270 dm_state->context)) {
62f55537 9271
9e869063
LL
9272 dc_plane_state_release(dc_new_plane_state);
9273 return -EINVAL;
9274 }
8c45c5db 9275
9e869063 9276 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9277
9e869063
LL
9278 /* Tell DC to do a full surface update every time there
9279 * is a plane change. Inefficient, but works for now.
9280 */
9281 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9282
9283 *lock_and_validation_needed = true;
62f55537 9284 }
e7b07cee
HW
9285
9286
62f55537
AG
9287 return ret;
9288}
a87fa993 9289
12f4849a
SS
9290static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9291 struct drm_crtc *crtc,
9292 struct drm_crtc_state *new_crtc_state)
9293{
9294 struct drm_plane_state *new_cursor_state, *new_primary_state;
9295 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9296
9297 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9298 * cursor per pipe but it's going to inherit the scaling and
9299 * positioning from the underlying pipe. Check the cursor plane's
9300 * blending properties match the primary plane's. */
9301
9302 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9303 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9304 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9305 return 0;
9306 }
9307
9308 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9309 (new_cursor_state->src_w >> 16);
9310 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9311 (new_cursor_state->src_h >> 16);
9312
9313 primary_scale_w = new_primary_state->crtc_w * 1000 /
9314 (new_primary_state->src_w >> 16);
9315 primary_scale_h = new_primary_state->crtc_h * 1000 /
9316 (new_primary_state->src_h >> 16);
9317
9318 if (cursor_scale_w != primary_scale_w ||
9319 cursor_scale_h != primary_scale_h) {
9320 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9321 return -EINVAL;
9322 }
9323
9324 return 0;
9325}
9326
e10517b3 9327#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9328static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9329{
9330 struct drm_connector *connector;
9331 struct drm_connector_state *conn_state;
9332 struct amdgpu_dm_connector *aconnector = NULL;
9333 int i;
9334 for_each_new_connector_in_state(state, connector, conn_state, i) {
9335 if (conn_state->crtc != crtc)
9336 continue;
9337
9338 aconnector = to_amdgpu_dm_connector(connector);
9339 if (!aconnector->port || !aconnector->mst_port)
9340 aconnector = NULL;
9341 else
9342 break;
9343 }
9344
9345 if (!aconnector)
9346 return 0;
9347
9348 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9349}
e10517b3 9350#endif
44be939f 9351
b8592b48
LL
9352/**
9353 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9354 * @dev: The DRM device
9355 * @state: The atomic state to commit
9356 *
9357 * Validate that the given atomic state is programmable by DC into hardware.
9358 * This involves constructing a &struct dc_state reflecting the new hardware
9359 * state we wish to commit, then querying DC to see if it is programmable. It's
9360 * important not to modify the existing DC state. Otherwise, atomic_check
9361 * may unexpectedly commit hardware changes.
9362 *
9363 * When validating the DC state, it's important that the right locks are
9364 * acquired. For full updates case which removes/adds/updates streams on one
9365 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9366 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9367 * flip using DRMs synchronization events.
b8592b48
LL
9368 *
9369 * Note that DM adds the affected connectors for all CRTCs in state, when that
9370 * might not seem necessary. This is because DC stream creation requires the
9371 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9372 * be possible but non-trivial - a possible TODO item.
9373 *
9374 * Return: -Error code if validation failed.
9375 */
7578ecda
AD
9376static int amdgpu_dm_atomic_check(struct drm_device *dev,
9377 struct drm_atomic_state *state)
62f55537 9378{
1348969a 9379 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9380 struct dm_atomic_state *dm_state = NULL;
62f55537 9381 struct dc *dc = adev->dm.dc;
62f55537 9382 struct drm_connector *connector;
c2cea706 9383 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9384 struct drm_crtc *crtc;
fc9e9920 9385 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9386 struct drm_plane *plane;
9387 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9388 enum dc_status status;
1e88ad0a 9389 int ret, i;
62f55537 9390 bool lock_and_validation_needed = false;
886876ec 9391 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9392
e8a98235 9393 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9394
62f55537 9395 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9396 if (ret)
9397 goto fail;
62f55537 9398
c5892a10
SW
9399 /* Check connector changes */
9400 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9401 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9402 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9403
9404 /* Skip connectors that are disabled or part of modeset already. */
9405 if (!old_con_state->crtc && !new_con_state->crtc)
9406 continue;
9407
9408 if (!new_con_state->crtc)
9409 continue;
9410
9411 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9412 if (IS_ERR(new_crtc_state)) {
9413 ret = PTR_ERR(new_crtc_state);
9414 goto fail;
9415 }
9416
9417 if (dm_old_con_state->abm_level !=
9418 dm_new_con_state->abm_level)
9419 new_crtc_state->connectors_changed = true;
9420 }
9421
e10517b3 9422#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9423 if (adev->asic_type >= CHIP_NAVI10) {
9424 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9425 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9426 ret = add_affected_mst_dsc_crtcs(state, crtc);
9427 if (ret)
9428 goto fail;
9429 }
9430 }
9431 }
e10517b3 9432#endif
1e88ad0a 9433 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9434 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9435
1e88ad0a 9436 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9437 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9438 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9439 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9440 continue;
7bef1af3 9441
1e88ad0a
S
9442 if (!new_crtc_state->enable)
9443 continue;
fc9e9920 9444
1e88ad0a
S
9445 ret = drm_atomic_add_affected_connectors(state, crtc);
9446 if (ret)
9447 return ret;
fc9e9920 9448
1e88ad0a
S
9449 ret = drm_atomic_add_affected_planes(state, crtc);
9450 if (ret)
9451 goto fail;
115a385c 9452
cbac53f7 9453 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9454 new_crtc_state->mode_changed = true;
e7b07cee
HW
9455 }
9456
2d9e6431
NK
9457 /*
9458 * Add all primary and overlay planes on the CRTC to the state
9459 * whenever a plane is enabled to maintain correct z-ordering
9460 * and to enable fast surface updates.
9461 */
9462 drm_for_each_crtc(crtc, dev) {
9463 bool modified = false;
9464
9465 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9466 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9467 continue;
9468
9469 if (new_plane_state->crtc == crtc ||
9470 old_plane_state->crtc == crtc) {
9471 modified = true;
9472 break;
9473 }
9474 }
9475
9476 if (!modified)
9477 continue;
9478
9479 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9480 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9481 continue;
9482
9483 new_plane_state =
9484 drm_atomic_get_plane_state(state, plane);
9485
9486 if (IS_ERR(new_plane_state)) {
9487 ret = PTR_ERR(new_plane_state);
9488 goto fail;
9489 }
9490 }
9491 }
9492
62f55537 9493 /* Remove exiting planes if they are modified */
9e869063
LL
9494 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9495 ret = dm_update_plane_state(dc, state, plane,
9496 old_plane_state,
9497 new_plane_state,
9498 false,
9499 &lock_and_validation_needed);
9500 if (ret)
9501 goto fail;
62f55537
AG
9502 }
9503
9504 /* Disable all crtcs which require disable */
4b9674e5
LL
9505 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9506 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9507 old_crtc_state,
9508 new_crtc_state,
9509 false,
9510 &lock_and_validation_needed);
9511 if (ret)
9512 goto fail;
62f55537
AG
9513 }
9514
9515 /* Enable all crtcs which require enable */
4b9674e5
LL
9516 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9517 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9518 old_crtc_state,
9519 new_crtc_state,
9520 true,
9521 &lock_and_validation_needed);
9522 if (ret)
9523 goto fail;
62f55537
AG
9524 }
9525
9526 /* Add new/modified planes */
9e869063
LL
9527 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9528 ret = dm_update_plane_state(dc, state, plane,
9529 old_plane_state,
9530 new_plane_state,
9531 true,
9532 &lock_and_validation_needed);
9533 if (ret)
9534 goto fail;
62f55537
AG
9535 }
9536
b349f76e
ES
9537 /* Run this here since we want to validate the streams we created */
9538 ret = drm_atomic_helper_check_planes(dev, state);
9539 if (ret)
9540 goto fail;
62f55537 9541
12f4849a
SS
9542 /* Check cursor planes scaling */
9543 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9544 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9545 if (ret)
9546 goto fail;
9547 }
9548
43d10d30
NK
9549 if (state->legacy_cursor_update) {
9550 /*
9551 * This is a fast cursor update coming from the plane update
9552 * helper, check if it can be done asynchronously for better
9553 * performance.
9554 */
9555 state->async_update =
9556 !drm_atomic_helper_async_check(dev, state);
9557
9558 /*
9559 * Skip the remaining global validation if this is an async
9560 * update. Cursor updates can be done without affecting
9561 * state or bandwidth calcs and this avoids the performance
9562 * penalty of locking the private state object and
9563 * allocating a new dc_state.
9564 */
9565 if (state->async_update)
9566 return 0;
9567 }
9568
ebdd27e1 9569 /* Check scaling and underscan changes*/
1f6010a9 9570 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9571 * new stream into context w\o causing full reset. Need to
9572 * decide how to handle.
9573 */
c2cea706 9574 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9575 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9576 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9577 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9578
9579 /* Skip any modesets/resets */
0bc9706d
LSL
9580 if (!acrtc || drm_atomic_crtc_needs_modeset(
9581 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9582 continue;
9583
b830ebc9 9584 /* Skip any thing not scale or underscan changes */
54d76575 9585 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9586 continue;
9587
9588 lock_and_validation_needed = true;
9589 }
9590
f6d7c7fa
NK
9591 /**
9592 * Streams and planes are reset when there are changes that affect
9593 * bandwidth. Anything that affects bandwidth needs to go through
9594 * DC global validation to ensure that the configuration can be applied
9595 * to hardware.
9596 *
9597 * We have to currently stall out here in atomic_check for outstanding
9598 * commits to finish in this case because our IRQ handlers reference
9599 * DRM state directly - we can end up disabling interrupts too early
9600 * if we don't.
9601 *
9602 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9603 */
f6d7c7fa 9604 if (lock_and_validation_needed) {
eb3dc897
NK
9605 ret = dm_atomic_get_state(state, &dm_state);
9606 if (ret)
9607 goto fail;
e7b07cee
HW
9608
9609 ret = do_aquire_global_lock(dev, state);
9610 if (ret)
9611 goto fail;
1dc90497 9612
d9fe1a4c 9613#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9614 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9615 goto fail;
9616
29b9ba74
ML
9617 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9618 if (ret)
9619 goto fail;
d9fe1a4c 9620#endif
29b9ba74 9621
ded58c7b
ZL
9622 /*
9623 * Perform validation of MST topology in the state:
9624 * We need to perform MST atomic check before calling
9625 * dc_validate_global_state(), or there is a chance
9626 * to get stuck in an infinite loop and hang eventually.
9627 */
9628 ret = drm_dp_mst_atomic_check(state);
9629 if (ret)
9630 goto fail;
74a16675
RS
9631 status = dc_validate_global_state(dc, dm_state->context, false);
9632 if (status != DC_OK) {
9633 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9634 dc_status_to_str(status), status);
e7b07cee
HW
9635 ret = -EINVAL;
9636 goto fail;
9637 }
bd200d19 9638 } else {
674e78ac 9639 /*
bd200d19
NK
9640 * The commit is a fast update. Fast updates shouldn't change
9641 * the DC context, affect global validation, and can have their
9642 * commit work done in parallel with other commits not touching
9643 * the same resource. If we have a new DC context as part of
9644 * the DM atomic state from validation we need to free it and
9645 * retain the existing one instead.
fde9f39a
MR
9646 *
9647 * Furthermore, since the DM atomic state only contains the DC
9648 * context and can safely be annulled, we can free the state
9649 * and clear the associated private object now to free
9650 * some memory and avoid a possible use-after-free later.
674e78ac 9651 */
bd200d19 9652
fde9f39a
MR
9653 for (i = 0; i < state->num_private_objs; i++) {
9654 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9655
fde9f39a
MR
9656 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9657 int j = state->num_private_objs-1;
bd200d19 9658
fde9f39a
MR
9659 dm_atomic_destroy_state(obj,
9660 state->private_objs[i].state);
9661
9662 /* If i is not at the end of the array then the
9663 * last element needs to be moved to where i was
9664 * before the array can safely be truncated.
9665 */
9666 if (i != j)
9667 state->private_objs[i] =
9668 state->private_objs[j];
bd200d19 9669
fde9f39a
MR
9670 state->private_objs[j].ptr = NULL;
9671 state->private_objs[j].state = NULL;
9672 state->private_objs[j].old_state = NULL;
9673 state->private_objs[j].new_state = NULL;
9674
9675 state->num_private_objs = j;
9676 break;
9677 }
bd200d19 9678 }
e7b07cee
HW
9679 }
9680
caff0e66
NK
9681 /* Store the overall update type for use later in atomic check. */
9682 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9683 struct dm_crtc_state *dm_new_crtc_state =
9684 to_dm_crtc_state(new_crtc_state);
9685
f6d7c7fa
NK
9686 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9687 UPDATE_TYPE_FULL :
9688 UPDATE_TYPE_FAST;
e7b07cee
HW
9689 }
9690
9691 /* Must be success */
9692 WARN_ON(ret);
e8a98235
RS
9693
9694 trace_amdgpu_dm_atomic_check_finish(state, ret);
9695
e7b07cee
HW
9696 return ret;
9697
9698fail:
9699 if (ret == -EDEADLK)
01e28f9c 9700 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9701 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9702 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9703 else
01e28f9c 9704 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9705
e8a98235
RS
9706 trace_amdgpu_dm_atomic_check_finish(state, ret);
9707
e7b07cee
HW
9708 return ret;
9709}
9710
3ee6b26b
AD
9711static bool is_dp_capable_without_timing_msa(struct dc *dc,
9712 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9713{
9714 uint8_t dpcd_data;
9715 bool capable = false;
9716
c84dec2f 9717 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9718 dm_helpers_dp_read_dpcd(
9719 NULL,
c84dec2f 9720 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9721 DP_DOWN_STREAM_PORT_COUNT,
9722 &dpcd_data,
9723 sizeof(dpcd_data))) {
9724 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9725 }
9726
9727 return capable;
9728}
98e6436d
AK
9729void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9730 struct edid *edid)
e7b07cee
HW
9731{
9732 int i;
e7b07cee
HW
9733 bool edid_check_required;
9734 struct detailed_timing *timing;
9735 struct detailed_non_pixel *data;
9736 struct detailed_data_monitor_range *range;
c84dec2f
HW
9737 struct amdgpu_dm_connector *amdgpu_dm_connector =
9738 to_amdgpu_dm_connector(connector);
bb47de73 9739 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9740
9741 struct drm_device *dev = connector->dev;
1348969a 9742 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9743 bool freesync_capable = false;
b830ebc9 9744
8218d7f1
HW
9745 if (!connector->state) {
9746 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9747 goto update;
8218d7f1
HW
9748 }
9749
98e6436d
AK
9750 if (!edid) {
9751 dm_con_state = to_dm_connector_state(connector->state);
9752
9753 amdgpu_dm_connector->min_vfreq = 0;
9754 amdgpu_dm_connector->max_vfreq = 0;
9755 amdgpu_dm_connector->pixel_clock_mhz = 0;
9756
bb47de73 9757 goto update;
98e6436d
AK
9758 }
9759
8218d7f1
HW
9760 dm_con_state = to_dm_connector_state(connector->state);
9761
e7b07cee 9762 edid_check_required = false;
c84dec2f 9763 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9764 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9765 goto update;
e7b07cee
HW
9766 }
9767 if (!adev->dm.freesync_module)
bb47de73 9768 goto update;
e7b07cee
HW
9769 /*
9770 * if edid non zero restrict freesync only for dp and edp
9771 */
9772 if (edid) {
c84dec2f
HW
9773 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9774 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9775 edid_check_required = is_dp_capable_without_timing_msa(
9776 adev->dm.dc,
c84dec2f 9777 amdgpu_dm_connector);
e7b07cee
HW
9778 }
9779 }
e7b07cee
HW
9780 if (edid_check_required == true && (edid->version > 1 ||
9781 (edid->version == 1 && edid->revision > 1))) {
9782 for (i = 0; i < 4; i++) {
9783
9784 timing = &edid->detailed_timings[i];
9785 data = &timing->data.other_data;
9786 range = &data->data.range;
9787 /*
9788 * Check if monitor has continuous frequency mode
9789 */
9790 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9791 continue;
9792 /*
9793 * Check for flag range limits only. If flag == 1 then
9794 * no additional timing information provided.
9795 * Default GTF, GTF Secondary curve and CVT are not
9796 * supported
9797 */
9798 if (range->flags != 1)
9799 continue;
9800
c84dec2f
HW
9801 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9802 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9803 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9804 range->pixel_clock_mhz * 10;
9805 break;
9806 }
9807
c84dec2f 9808 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9809 amdgpu_dm_connector->min_vfreq > 10) {
9810
bb47de73 9811 freesync_capable = true;
e7b07cee
HW
9812 }
9813 }
bb47de73
NK
9814
9815update:
9816 if (dm_con_state)
9817 dm_con_state->freesync_capable = freesync_capable;
9818
9819 if (connector->vrr_capable_property)
9820 drm_connector_set_vrr_capable_property(connector,
9821 freesync_capable);
e7b07cee
HW
9822}
9823
8c322309
RL
9824static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9825{
9826 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9827
9828 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9829 return;
9830 if (link->type == dc_connection_none)
9831 return;
9832 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9833 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9834 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9835
9836 if (dpcd_data[0] == 0) {
1cfbbdde 9837 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9838 link->psr_settings.psr_feature_enabled = false;
9839 } else {
1cfbbdde 9840 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9841 link->psr_settings.psr_feature_enabled = true;
9842 }
9843
9844 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9845 }
9846}
9847
9848/*
9849 * amdgpu_dm_link_setup_psr() - configure psr link
9850 * @stream: stream state
9851 *
9852 * Return: true if success
9853 */
9854static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9855{
9856 struct dc_link *link = NULL;
9857 struct psr_config psr_config = {0};
9858 struct psr_context psr_context = {0};
8c322309
RL
9859 bool ret = false;
9860
9861 if (stream == NULL)
9862 return false;
9863
9864 link = stream->link;
8c322309 9865
d1ebfdd8 9866 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9867
9868 if (psr_config.psr_version > 0) {
9869 psr_config.psr_exit_link_training_required = 0x1;
9870 psr_config.psr_frame_capture_indication_req = 0;
9871 psr_config.psr_rfb_setup_time = 0x37;
9872 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9873 psr_config.allow_smu_optimizations = 0x0;
9874
9875 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9876
9877 }
d1ebfdd8 9878 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9879
9880 return ret;
9881}
9882
9883/*
9884 * amdgpu_dm_psr_enable() - enable psr f/w
9885 * @stream: stream state
9886 *
9887 * Return: true if success
9888 */
9889bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9890{
9891 struct dc_link *link = stream->link;
5b5abe95
AK
9892 unsigned int vsync_rate_hz = 0;
9893 struct dc_static_screen_params params = {0};
9894 /* Calculate number of static frames before generating interrupt to
9895 * enter PSR.
9896 */
5b5abe95
AK
9897 // Init fail safe of 2 frames static
9898 unsigned int num_frames_static = 2;
8c322309
RL
9899
9900 DRM_DEBUG_DRIVER("Enabling psr...\n");
9901
5b5abe95
AK
9902 vsync_rate_hz = div64_u64(div64_u64((
9903 stream->timing.pix_clk_100hz * 100),
9904 stream->timing.v_total),
9905 stream->timing.h_total);
9906
9907 /* Round up
9908 * Calculate number of frames such that at least 30 ms of time has
9909 * passed.
9910 */
7aa62404
RL
9911 if (vsync_rate_hz != 0) {
9912 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9913 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9914 }
5b5abe95
AK
9915
9916 params.triggers.cursor_update = true;
9917 params.triggers.overlay_update = true;
9918 params.triggers.surface_update = true;
9919 params.num_frames = num_frames_static;
8c322309 9920
5b5abe95 9921 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9922 &stream, 1,
5b5abe95 9923 &params);
8c322309 9924
1d496907 9925 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
9926}
9927
9928/*
9929 * amdgpu_dm_psr_disable() - disable psr f/w
9930 * @stream: stream state
9931 *
9932 * Return: true if success
9933 */
9934static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9935{
9936
9937 DRM_DEBUG_DRIVER("Disabling psr...\n");
9938
1d496907 9939 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 9940}
3d4e52d0 9941
6ee90e88 9942/*
9943 * amdgpu_dm_psr_disable() - disable psr f/w
9944 * if psr is enabled on any stream
9945 *
9946 * Return: true if success
9947 */
9948static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9949{
9950 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9951 return dc_set_psr_allow_active(dm->dc, false);
9952}
9953
3d4e52d0
VL
9954void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9955{
1348969a 9956 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9957 struct dc *dc = adev->dm.dc;
9958 int i;
9959
9960 mutex_lock(&adev->dm.dc_lock);
9961 if (dc->current_state) {
9962 for (i = 0; i < dc->current_state->stream_count; ++i)
9963 dc->current_state->streams[i]
9964 ->triggered_crtc_reset.enabled =
9965 adev->dm.force_timing_sync;
9966
9967 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9968 dc_trigger_sync(dc, dc->current_state);
9969 }
9970 mutex_unlock(&adev->dm.dc_lock);
9971}
9d83722d
RS
9972
9973void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9974 uint32_t value, const char *func_name)
9975{
9976#ifdef DM_CHECK_ADDR_0
9977 if (address == 0) {
9978 DC_ERR("invalid register write. address = 0");
9979 return;
9980 }
9981#endif
9982 cgs_write_register(ctx->cgs_device, address, value);
9983 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9984}
9985
9986uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9987 const char *func_name)
9988{
9989 uint32_t value;
9990#ifdef DM_CHECK_ADDR_0
9991 if (address == 0) {
9992 DC_ERR("invalid register read; address = 0\n");
9993 return 0;
9994 }
9995#endif
9996
9997 if (ctx->dmub_srv &&
9998 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9999 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10000 ASSERT(false);
10001 return 0;
10002 }
10003
10004 value = cgs_read_register(ctx->cgs_device, address);
10005
10006 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10007
10008 return value;
10009}