drm/amd/display: Remove semicolon from to_dm_plane_state definition
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
4562236b
HW
32
33#include "vid.h"
34#include "amdgpu.h"
a49dcb88 35#include "amdgpu_display.h"
a94d5569 36#include "amdgpu_ucode.h"
4562236b
HW
37#include "atom.h"
38#include "amdgpu_dm.h"
e7b07cee 39#include "amdgpu_pm.h"
4562236b
HW
40
41#include "amd_shared.h"
42#include "amdgpu_dm_irq.h"
43#include "dm_helpers.h"
e7b07cee 44#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
45#if defined(CONFIG_DEBUG_FS)
46#include "amdgpu_dm_debugfs.h"
47#endif
4562236b
HW
48
49#include "ivsrcid/ivsrcid_vislands30.h"
50
51#include <linux/module.h>
52#include <linux/moduleparam.h>
53#include <linux/version.h>
e7b07cee 54#include <linux/types.h>
97028037 55#include <linux/pm_runtime.h>
a94d5569 56#include <linux/firmware.h>
4562236b 57
e7b07cee 58#include <drm/drmP.h>
4562236b 59#include <drm/drm_atomic.h>
674e78ac 60#include <drm/drm_atomic_uapi.h>
4562236b
HW
61#include <drm/drm_atomic_helper.h>
62#include <drm/drm_dp_mst_helper.h>
e7b07cee
HW
63#include <drm/drm_fb_helper.h>
64#include <drm/drm_edid.h>
4562236b 65
ff5ef992
AD
66#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
67#include "ivsrcid/irqsrcs_dcn_1_0.h"
68
ad941f7a
FX
69#include "dcn/dcn_1_0_offset.h"
70#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
71#include "soc15_hw_ip.h"
72#include "vega10_ip_offset.h"
ff5ef992
AD
73
74#include "soc15_common.h"
75#endif
76
e7b07cee 77#include "modules/inc/mod_freesync.h"
bbf854dc 78#include "modules/power/power_helpers.h"
ecd0136b 79#include "modules/inc/mod_info_packet.h"
e7b07cee 80
a94d5569
DF
81#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
82MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 83
b8592b48
LL
84/**
85 * DOC: overview
86 *
87 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
88 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
89 * requests into DC requests, and DC responses into DRM responses.
90 *
91 * The root control structure is &struct amdgpu_display_manager.
92 */
93
7578ecda
AD
94/* basic init/fini API */
95static int amdgpu_dm_init(struct amdgpu_device *adev);
96static void amdgpu_dm_fini(struct amdgpu_device *adev);
97
1f6010a9
DF
98/*
99 * initializes drm_device display related structures, based on the information
7578ecda
AD
100 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
101 * drm_encoder, drm_mode_config
102 *
103 * Returns 0 on success
104 */
105static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
106/* removes and deallocates the drm structures, created by the above function */
107static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
108
109static void
110amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
111
112static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 113 struct drm_plane *plane,
7578ecda
AD
114 unsigned long possible_crtcs);
115static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
116 struct drm_plane *plane,
117 uint32_t link_index);
118static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
119 struct amdgpu_dm_connector *amdgpu_dm_connector,
120 uint32_t link_index,
121 struct amdgpu_encoder *amdgpu_encoder);
122static int amdgpu_dm_encoder_init(struct drm_device *dev,
123 struct amdgpu_encoder *aencoder,
124 uint32_t link_index);
125
126static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
127
128static int amdgpu_dm_atomic_commit(struct drm_device *dev,
129 struct drm_atomic_state *state,
130 bool nonblock);
131
132static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
133
134static int amdgpu_dm_atomic_check(struct drm_device *dev,
135 struct drm_atomic_state *state);
136
674e78ac
NK
137static void handle_cursor_update(struct drm_plane *plane,
138 struct drm_plane_state *old_plane_state);
7578ecda 139
4562236b
HW
140/*
141 * dm_vblank_get_counter
142 *
143 * @brief
144 * Get counter for number of vertical blanks
145 *
146 * @param
147 * struct amdgpu_device *adev - [in] desired amdgpu device
148 * int disp_idx - [in] which CRTC to get the counter from
149 *
150 * @return
151 * Counter for vertical blanks
152 */
153static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154{
155 if (crtc >= adev->mode_info.num_crtc)
156 return 0;
157 else {
158 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
159 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160 acrtc->base.state);
4562236b 161
da5c47f6
AG
162
163 if (acrtc_state->stream == NULL) {
0971c40e
HW
164 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165 crtc);
4562236b
HW
166 return 0;
167 }
168
da5c47f6 169 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
170 }
171}
172
173static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 174 u32 *vbl, u32 *position)
4562236b 175{
81c50963
ST
176 uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
4562236b
HW
178 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179 return -EINVAL;
180 else {
181 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
182 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183 acrtc->base.state);
4562236b 184
da5c47f6 185 if (acrtc_state->stream == NULL) {
0971c40e
HW
186 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187 crtc);
4562236b
HW
188 return 0;
189 }
190
81c50963
ST
191 /*
192 * TODO rework base driver to use values directly.
193 * for now parse it back into reg-format
194 */
da5c47f6 195 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
196 &v_blank_start,
197 &v_blank_end,
198 &h_position,
199 &v_position);
200
e806208d
AG
201 *position = v_position | (h_position << 16);
202 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
203 }
204
205 return 0;
206}
207
208static bool dm_is_idle(void *handle)
209{
210 /* XXX todo */
211 return true;
212}
213
214static int dm_wait_for_idle(void *handle)
215{
216 /* XXX todo */
217 return 0;
218}
219
220static bool dm_check_soft_reset(void *handle)
221{
222 return false;
223}
224
225static int dm_soft_reset(void *handle)
226{
227 /* XXX todo */
228 return 0;
229}
230
3ee6b26b
AD
231static struct amdgpu_crtc *
232get_crtc_by_otg_inst(struct amdgpu_device *adev,
233 int otg_inst)
4562236b
HW
234{
235 struct drm_device *dev = adev->ddev;
236 struct drm_crtc *crtc;
237 struct amdgpu_crtc *amdgpu_crtc;
238
4562236b
HW
239 if (otg_inst == -1) {
240 WARN_ON(1);
241 return adev->mode_info.crtcs[0];
242 }
243
244 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
245 amdgpu_crtc = to_amdgpu_crtc(crtc);
246
247 if (amdgpu_crtc->otg_inst == otg_inst)
248 return amdgpu_crtc;
249 }
250
251 return NULL;
252}
253
66b0c973
MK
254static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
255{
256 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
257 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
258}
259
4562236b
HW
260static void dm_pflip_high_irq(void *interrupt_params)
261{
4562236b
HW
262 struct amdgpu_crtc *amdgpu_crtc;
263 struct common_irq_params *irq_params = interrupt_params;
264 struct amdgpu_device *adev = irq_params->adev;
265 unsigned long flags;
71bbe51a
MK
266 struct drm_pending_vblank_event *e;
267 struct dm_crtc_state *acrtc_state;
268 uint32_t vpos, hpos, v_blank_start, v_blank_end;
269 bool vrr_active;
4562236b
HW
270
271 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
272
273 /* IRQ could occur when in initial stage */
1f6010a9 274 /* TODO work and BO cleanup */
4562236b
HW
275 if (amdgpu_crtc == NULL) {
276 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
277 return;
278 }
279
280 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
281
282 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
283 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
284 amdgpu_crtc->pflip_status,
285 AMDGPU_FLIP_SUBMITTED,
286 amdgpu_crtc->crtc_id,
287 amdgpu_crtc);
288 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
289 return;
290 }
291
71bbe51a
MK
292 /* page flip completed. */
293 e = amdgpu_crtc->event;
294 amdgpu_crtc->event = NULL;
4562236b 295
71bbe51a
MK
296 if (!e)
297 WARN_ON(1);
1159898a 298
71bbe51a
MK
299 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
300 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
301
302 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
303 if (!vrr_active ||
304 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
305 &v_blank_end, &hpos, &vpos) ||
306 (vpos < v_blank_start)) {
307 /* Update to correct count and vblank timestamp if racing with
308 * vblank irq. This also updates to the correct vblank timestamp
309 * even in VRR mode, as scanout is past the front-porch atm.
310 */
311 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 312
71bbe51a
MK
313 /* Wake up userspace by sending the pageflip event with proper
314 * count and timestamp of vblank of flip completion.
315 */
316 if (e) {
317 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
318
319 /* Event sent, so done with vblank for this flip */
320 drm_crtc_vblank_put(&amdgpu_crtc->base);
321 }
322 } else if (e) {
323 /* VRR active and inside front-porch: vblank count and
324 * timestamp for pageflip event will only be up to date after
325 * drm_crtc_handle_vblank() has been executed from late vblank
326 * irq handler after start of back-porch (vline 0). We queue the
327 * pageflip event for send-out by drm_crtc_handle_vblank() with
328 * updated timestamp and count, once it runs after us.
329 *
330 * We need to open-code this instead of using the helper
331 * drm_crtc_arm_vblank_event(), as that helper would
332 * call drm_crtc_accurate_vblank_count(), which we must
333 * not call in VRR mode while we are in front-porch!
334 */
335
336 /* sequence will be replaced by real count during send-out. */
337 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
338 e->pipe = amdgpu_crtc->crtc_id;
339
340 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
341 e = NULL;
342 }
4562236b 343
fdd1fe57
MK
344 /* Keep track of vblank of this flip for flip throttling. We use the
345 * cooked hw counter, as that one incremented at start of this vblank
346 * of pageflip completion, so last_flip_vblank is the forbidden count
347 * for queueing new pageflips if vsync + VRR is enabled.
348 */
349 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
350 amdgpu_crtc->crtc_id);
351
54f5499a 352 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
353 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
354
71bbe51a
MK
355 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
356 amdgpu_crtc->crtc_id, amdgpu_crtc,
357 vrr_active, (int) !e);
4562236b
HW
358}
359
d2574c33
MK
360static void dm_vupdate_high_irq(void *interrupt_params)
361{
362 struct common_irq_params *irq_params = interrupt_params;
363 struct amdgpu_device *adev = irq_params->adev;
364 struct amdgpu_crtc *acrtc;
365 struct dm_crtc_state *acrtc_state;
366
367 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
368
369 if (acrtc) {
370 acrtc_state = to_dm_crtc_state(acrtc->base.state);
371
372 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
373 amdgpu_dm_vrr_active(acrtc_state));
374
375 /* Core vblank handling is done here after end of front-porch in
376 * vrr mode, as vblank timestamping will give valid results
377 * while now done after front-porch. This will also deliver
378 * page-flip completion events that have been queued to us
379 * if a pageflip happened inside front-porch.
380 */
381 if (amdgpu_dm_vrr_active(acrtc_state))
382 drm_crtc_handle_vblank(&acrtc->base);
383 }
384}
385
4562236b
HW
386static void dm_crtc_high_irq(void *interrupt_params)
387{
388 struct common_irq_params *irq_params = interrupt_params;
389 struct amdgpu_device *adev = irq_params->adev;
4562236b 390 struct amdgpu_crtc *acrtc;
180db303 391 struct dm_crtc_state *acrtc_state;
4562236b 392
b57de80a 393 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b 394
e5d0170e 395 if (acrtc) {
180db303
NK
396 acrtc_state = to_dm_crtc_state(acrtc->base.state);
397
d2574c33
MK
398 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
399 amdgpu_dm_vrr_active(acrtc_state));
400
401 /* Core vblank handling at start of front-porch is only possible
402 * in non-vrr mode, as only there vblank timestamping will give
403 * valid results while done in front-porch. Otherwise defer it
404 * to dm_vupdate_high_irq after end of front-porch.
405 */
406 if (!amdgpu_dm_vrr_active(acrtc_state))
407 drm_crtc_handle_vblank(&acrtc->base);
408
409 /* Following stuff must happen at start of vblank, for crc
410 * computation and below-the-range btr support in vrr mode.
411 */
412 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
413
180db303
NK
414 if (acrtc_state->stream &&
415 acrtc_state->vrr_params.supported &&
416 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
417 mod_freesync_handle_v_update(
418 adev->dm.freesync_module,
419 acrtc_state->stream,
420 &acrtc_state->vrr_params);
421
422 dc_stream_adjust_vmin_vmax(
423 adev->dm.dc,
424 acrtc_state->stream,
425 &acrtc_state->vrr_params.adjust);
426 }
e5d0170e 427 }
4562236b
HW
428}
429
430static int dm_set_clockgating_state(void *handle,
431 enum amd_clockgating_state state)
432{
433 return 0;
434}
435
436static int dm_set_powergating_state(void *handle,
437 enum amd_powergating_state state)
438{
439 return 0;
440}
441
442/* Prototypes of private functions */
443static int dm_early_init(void* handle);
444
a32e24b4 445/* Allocate memory for FBC compressed data */
3e332d3a 446static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 447{
3e332d3a
RL
448 struct drm_device *dev = connector->dev;
449 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 450 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
451 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
452 struct drm_display_mode *mode;
42e67c3b
RL
453 unsigned long max_size = 0;
454
455 if (adev->dm.dc->fbc_compressor == NULL)
456 return;
a32e24b4 457
3e332d3a 458 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
459 return;
460
3e332d3a
RL
461 if (compressor->bo_ptr)
462 return;
42e67c3b 463
42e67c3b 464
3e332d3a
RL
465 list_for_each_entry(mode, &connector->modes, head) {
466 if (max_size < mode->htotal * mode->vtotal)
467 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
468 }
469
470 if (max_size) {
471 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 472 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 473 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
474
475 if (r)
42e67c3b
RL
476 DRM_ERROR("DM: Failed to initialize FBC\n");
477 else {
478 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
479 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
480 }
481
a32e24b4
RL
482 }
483
484}
a32e24b4 485
7578ecda 486static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
487{
488 struct dc_init_data init_data;
489 adev->dm.ddev = adev->ddev;
490 adev->dm.adev = adev;
491
4562236b
HW
492 /* Zero all the fields */
493 memset(&init_data, 0, sizeof(init_data));
494
674e78ac
NK
495 mutex_init(&adev->dm.dc_lock);
496
4562236b
HW
497 if(amdgpu_dm_irq_init(adev)) {
498 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
499 goto error;
500 }
501
502 init_data.asic_id.chip_family = adev->family;
503
504 init_data.asic_id.pci_revision_id = adev->rev_id;
505 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
506
770d13b1 507 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
508 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
509 init_data.asic_id.atombios_base_address =
510 adev->mode_info.atom_context->bios;
511
512 init_data.driver = adev;
513
514 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
515
516 if (!adev->dm.cgs_device) {
517 DRM_ERROR("amdgpu: failed to create cgs device.\n");
518 goto error;
519 }
520
521 init_data.cgs_device = adev->dm.cgs_device;
522
4562236b
HW
523 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
524
6e227308
HW
525 /*
526 * TODO debug why this doesn't work on Raven
527 */
528 if (adev->flags & AMD_IS_APU &&
529 adev->asic_type >= CHIP_CARRIZO &&
530 adev->asic_type < CHIP_RAVEN)
531 init_data.flags.gpu_vm_support = true;
532
04b94af4
AD
533 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
534 init_data.flags.fbc_support = true;
535
4562236b
HW
536 /* Display Core create. */
537 adev->dm.dc = dc_create(&init_data);
538
423788c7 539 if (adev->dm.dc) {
76121231 540 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 541 } else {
76121231 542 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
543 goto error;
544 }
4562236b 545
4562236b
HW
546 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
547 if (!adev->dm.freesync_module) {
548 DRM_ERROR(
549 "amdgpu: failed to initialize freesync_module.\n");
550 } else
f1ad2f5e 551 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
552 adev->dm.freesync_module);
553
e277adc5
LSL
554 amdgpu_dm_init_color_mod();
555
4562236b
HW
556 if (amdgpu_dm_initialize_drm_device(adev)) {
557 DRM_ERROR(
558 "amdgpu: failed to initialize sw for display support.\n");
559 goto error;
560 }
561
562 /* Update the actual used number of crtc */
563 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
564
565 /* TODO: Add_display_info? */
566
567 /* TODO use dynamic cursor width */
ce75805e
AG
568 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
569 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
570
571 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
572 DRM_ERROR(
573 "amdgpu: failed to initialize sw for display support.\n");
574 goto error;
575 }
576
e498eb71
NK
577#if defined(CONFIG_DEBUG_FS)
578 if (dtn_debugfs_init(adev))
579 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
580#endif
581
f1ad2f5e 582 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
583
584 return 0;
585error:
586 amdgpu_dm_fini(adev);
587
59d0f396 588 return -EINVAL;
4562236b
HW
589}
590
7578ecda 591static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b
HW
592{
593 amdgpu_dm_destroy_drm_device(&adev->dm);
594 /*
595 * TODO: pageflip, vlank interrupt
596 *
597 * amdgpu_dm_irq_fini(adev);
598 */
599
600 if (adev->dm.cgs_device) {
601 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
602 adev->dm.cgs_device = NULL;
603 }
604 if (adev->dm.freesync_module) {
605 mod_freesync_destroy(adev->dm.freesync_module);
606 adev->dm.freesync_module = NULL;
607 }
608 /* DC Destroy TODO: Replace destroy DAL */
21de3396 609 if (adev->dm.dc)
4562236b 610 dc_destroy(&adev->dm.dc);
674e78ac
NK
611
612 mutex_destroy(&adev->dm.dc_lock);
613
4562236b
HW
614 return;
615}
616
a94d5569 617static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 618{
a94d5569
DF
619 const char *fw_name_dmcu;
620 int r;
621 const struct dmcu_firmware_header_v1_0 *hdr;
622
623 switch(adev->asic_type) {
624 case CHIP_BONAIRE:
625 case CHIP_HAWAII:
626 case CHIP_KAVERI:
627 case CHIP_KABINI:
628 case CHIP_MULLINS:
629 case CHIP_TONGA:
630 case CHIP_FIJI:
631 case CHIP_CARRIZO:
632 case CHIP_STONEY:
633 case CHIP_POLARIS11:
634 case CHIP_POLARIS10:
635 case CHIP_POLARIS12:
636 case CHIP_VEGAM:
637 case CHIP_VEGA10:
638 case CHIP_VEGA12:
639 case CHIP_VEGA20:
640 return 0;
641 case CHIP_RAVEN:
642 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
643 break;
644 default:
645 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 646 return -EINVAL;
a94d5569
DF
647 }
648
649 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
650 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
651 return 0;
652 }
653
654 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
655 if (r == -ENOENT) {
656 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
657 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
658 adev->dm.fw_dmcu = NULL;
659 return 0;
660 }
661 if (r) {
662 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
663 fw_name_dmcu);
664 return r;
665 }
666
667 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
668 if (r) {
669 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
670 fw_name_dmcu);
671 release_firmware(adev->dm.fw_dmcu);
672 adev->dm.fw_dmcu = NULL;
673 return r;
674 }
675
676 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
677 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
678 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
679 adev->firmware.fw_size +=
680 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
681
682 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
683 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
684 adev->firmware.fw_size +=
685 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
686
ee6e89c0
DF
687 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
688
a94d5569
DF
689 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
690
4562236b
HW
691 return 0;
692}
693
a94d5569
DF
694static int dm_sw_init(void *handle)
695{
696 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
697
698 return load_dmcu_fw(adev);
699}
700
4562236b
HW
701static int dm_sw_fini(void *handle)
702{
a94d5569
DF
703 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
704
705 if(adev->dm.fw_dmcu) {
706 release_firmware(adev->dm.fw_dmcu);
707 adev->dm.fw_dmcu = NULL;
708 }
709
4562236b
HW
710 return 0;
711}
712
7abcf6b5 713static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 714{
c84dec2f 715 struct amdgpu_dm_connector *aconnector;
4562236b 716 struct drm_connector *connector;
7abcf6b5 717 int ret = 0;
4562236b
HW
718
719 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
720
721 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
b349f76e 722 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
723 if (aconnector->dc_link->type == dc_connection_mst_branch &&
724 aconnector->mst_mgr.aux) {
f1ad2f5e 725 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
7abcf6b5
AG
726 aconnector, aconnector->base.base.id);
727
728 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
729 if (ret < 0) {
730 DRM_ERROR("DM_MST: Failed to start MST\n");
731 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
732 return ret;
4562236b 733 }
7abcf6b5 734 }
4562236b
HW
735 }
736
737 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
738 return ret;
739}
740
741static int dm_late_init(void *handle)
742{
42e67c3b 743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 744
bbf854dc
DF
745 struct dmcu_iram_parameters params;
746 unsigned int linear_lut[16];
747 int i;
748 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
749 bool ret;
750
751 for (i = 0; i < 16; i++)
752 linear_lut[i] = 0xFFFF * i / 15;
753
754 params.set = 0;
755 params.backlight_ramping_start = 0xCCCC;
756 params.backlight_ramping_reduction = 0xCCCCCCCC;
757 params.backlight_lut_array_size = 16;
758 params.backlight_lut_array = linear_lut;
759
760 ret = dmcu_load_iram(dmcu, params);
761
762 if (!ret)
763 return -EINVAL;
764
42e67c3b 765 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
766}
767
768static void s3_handle_mst(struct drm_device *dev, bool suspend)
769{
c84dec2f 770 struct amdgpu_dm_connector *aconnector;
4562236b 771 struct drm_connector *connector;
fe7553be
LP
772 struct drm_dp_mst_topology_mgr *mgr;
773 int ret;
774 bool need_hotplug = false;
4562236b
HW
775
776 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
777
fe7553be
LP
778 list_for_each_entry(connector, &dev->mode_config.connector_list,
779 head) {
780 aconnector = to_amdgpu_dm_connector(connector);
781 if (aconnector->dc_link->type != dc_connection_mst_branch ||
782 aconnector->mst_port)
783 continue;
784
785 mgr = &aconnector->mst_mgr;
786
787 if (suspend) {
788 drm_dp_mst_topology_mgr_suspend(mgr);
789 } else {
790 ret = drm_dp_mst_topology_mgr_resume(mgr);
791 if (ret < 0) {
792 drm_dp_mst_topology_mgr_set_mst(mgr, false);
793 need_hotplug = true;
794 }
795 }
4562236b
HW
796 }
797
798 drm_modeset_unlock(&dev->mode_config.connection_mutex);
fe7553be
LP
799
800 if (need_hotplug)
801 drm_kms_helper_hotplug_event(dev);
4562236b
HW
802}
803
b8592b48
LL
804/**
805 * dm_hw_init() - Initialize DC device
806 * @handle: The base driver device containing the amdpgu_dm device.
807 *
808 * Initialize the &struct amdgpu_display_manager device. This involves calling
809 * the initializers of each DM component, then populating the struct with them.
810 *
811 * Although the function implies hardware initialization, both hardware and
812 * software are initialized here. Splitting them out to their relevant init
813 * hooks is a future TODO item.
814 *
815 * Some notable things that are initialized here:
816 *
817 * - Display Core, both software and hardware
818 * - DC modules that we need (freesync and color management)
819 * - DRM software states
820 * - Interrupt sources and handlers
821 * - Vblank support
822 * - Debug FS entries, if enabled
823 */
4562236b
HW
824static int dm_hw_init(void *handle)
825{
826 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
827 /* Create DAL display manager */
828 amdgpu_dm_init(adev);
4562236b
HW
829 amdgpu_dm_hpd_init(adev);
830
4562236b
HW
831 return 0;
832}
833
b8592b48
LL
834/**
835 * dm_hw_fini() - Teardown DC device
836 * @handle: The base driver device containing the amdpgu_dm device.
837 *
838 * Teardown components within &struct amdgpu_display_manager that require
839 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
840 * were loaded. Also flush IRQ workqueues and disable them.
841 */
4562236b
HW
842static int dm_hw_fini(void *handle)
843{
844 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
845
846 amdgpu_dm_hpd_fini(adev);
847
848 amdgpu_dm_irq_fini(adev);
21de3396 849 amdgpu_dm_fini(adev);
4562236b
HW
850 return 0;
851}
852
853static int dm_suspend(void *handle)
854{
855 struct amdgpu_device *adev = handle;
856 struct amdgpu_display_manager *dm = &adev->dm;
857 int ret = 0;
4562236b 858
d2f0b53b
LHM
859 WARN_ON(adev->dm.cached_state);
860 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
861
4562236b
HW
862 s3_handle_mst(adev->ddev, true);
863
4562236b
HW
864 amdgpu_dm_irq_suspend(adev);
865
a3621485 866
32f5062d 867 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
868
869 return ret;
870}
871
1daf8c63
AD
872static struct amdgpu_dm_connector *
873amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
874 struct drm_crtc *crtc)
4562236b
HW
875{
876 uint32_t i;
c2cea706 877 struct drm_connector_state *new_con_state;
4562236b
HW
878 struct drm_connector *connector;
879 struct drm_crtc *crtc_from_state;
880
c2cea706
LSL
881 for_each_new_connector_in_state(state, connector, new_con_state, i) {
882 crtc_from_state = new_con_state->crtc;
4562236b
HW
883
884 if (crtc_from_state == crtc)
c84dec2f 885 return to_amdgpu_dm_connector(connector);
4562236b
HW
886 }
887
888 return NULL;
889}
890
fbbdadf2
BL
891static void emulated_link_detect(struct dc_link *link)
892{
893 struct dc_sink_init_data sink_init_data = { 0 };
894 struct display_sink_capability sink_caps = { 0 };
895 enum dc_edid_status edid_status;
896 struct dc_context *dc_ctx = link->ctx;
897 struct dc_sink *sink = NULL;
898 struct dc_sink *prev_sink = NULL;
899
900 link->type = dc_connection_none;
901 prev_sink = link->local_sink;
902
903 if (prev_sink != NULL)
904 dc_sink_retain(prev_sink);
905
906 switch (link->connector_signal) {
907 case SIGNAL_TYPE_HDMI_TYPE_A: {
908 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
909 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
910 break;
911 }
912
913 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
914 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
915 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
916 break;
917 }
918
919 case SIGNAL_TYPE_DVI_DUAL_LINK: {
920 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
921 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
922 break;
923 }
924
925 case SIGNAL_TYPE_LVDS: {
926 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
927 sink_caps.signal = SIGNAL_TYPE_LVDS;
928 break;
929 }
930
931 case SIGNAL_TYPE_EDP: {
932 sink_caps.transaction_type =
933 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
934 sink_caps.signal = SIGNAL_TYPE_EDP;
935 break;
936 }
937
938 case SIGNAL_TYPE_DISPLAY_PORT: {
939 sink_caps.transaction_type =
940 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
941 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
942 break;
943 }
944
945 default:
946 DC_ERROR("Invalid connector type! signal:%d\n",
947 link->connector_signal);
948 return;
949 }
950
951 sink_init_data.link = link;
952 sink_init_data.sink_signal = sink_caps.signal;
953
954 sink = dc_sink_create(&sink_init_data);
955 if (!sink) {
956 DC_ERROR("Failed to create sink!\n");
957 return;
958 }
959
dcd5fb82 960 /* dc_sink_create returns a new reference */
fbbdadf2
BL
961 link->local_sink = sink;
962
963 edid_status = dm_helpers_read_local_edid(
964 link->ctx,
965 link,
966 sink);
967
968 if (edid_status != EDID_OK)
969 DC_ERROR("Failed to read EDID");
970
971}
972
4562236b
HW
973static int dm_resume(void *handle)
974{
975 struct amdgpu_device *adev = handle;
4562236b
HW
976 struct drm_device *ddev = adev->ddev;
977 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 978 struct amdgpu_dm_connector *aconnector;
4562236b 979 struct drm_connector *connector;
4562236b 980 struct drm_crtc *crtc;
c2cea706 981 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
982 struct dm_crtc_state *dm_new_crtc_state;
983 struct drm_plane *plane;
984 struct drm_plane_state *new_plane_state;
985 struct dm_plane_state *dm_new_plane_state;
fbbdadf2 986 enum dc_connection_type new_connection_type = dc_connection_none;
a3621485 987 int i;
4562236b 988
a80aa93d
ML
989 /* power on hardware */
990 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
991
4562236b
HW
992 /* program HPD filter */
993 dc_resume(dm->dc);
994
995 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
996 s3_handle_mst(ddev, false);
997
998 /*
999 * early enable HPD Rx IRQ, should be done before set mode as short
1000 * pulse interrupts are used for MST
1001 */
1002 amdgpu_dm_irq_resume_early(adev);
1003
4562236b 1004 /* Do detection*/
a80aa93d 1005 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
c84dec2f 1006 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1007
1008 /*
1009 * this is the case when traversing through already created
1010 * MST connectors, should be skipped
1011 */
1012 if (aconnector->mst_port)
1013 continue;
1014
03ea364c 1015 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1016 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1017 DRM_ERROR("KMS: Failed to detect connector\n");
1018
1019 if (aconnector->base.force && new_connection_type == dc_connection_none)
1020 emulated_link_detect(aconnector->dc_link);
1021 else
1022 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1023
1024 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1025 aconnector->fake_enable = false;
1026
dcd5fb82
MF
1027 if (aconnector->dc_sink)
1028 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1029 aconnector->dc_sink = NULL;
1030 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1031 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
1032 }
1033
1f6010a9 1034 /* Force mode set in atomic commit */
a80aa93d 1035 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1036 new_crtc_state->active_changed = true;
4f346e65 1037
fcb4019e
LSL
1038 /*
1039 * atomic_check is expected to create the dc states. We need to release
1040 * them here, since they were duplicated as part of the suspend
1041 * procedure.
1042 */
a80aa93d 1043 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1044 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1045 if (dm_new_crtc_state->stream) {
1046 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1047 dc_stream_release(dm_new_crtc_state->stream);
1048 dm_new_crtc_state->stream = NULL;
1049 }
1050 }
1051
a80aa93d 1052 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1053 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1054 if (dm_new_plane_state->dc_state) {
1055 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1056 dc_plane_state_release(dm_new_plane_state->dc_state);
1057 dm_new_plane_state->dc_state = NULL;
1058 }
1059 }
1060
2d1af6a1 1061 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1062
a80aa93d 1063 dm->cached_state = NULL;
0a214e2f 1064
9faa4237 1065 amdgpu_dm_irq_resume_late(adev);
4562236b 1066
2d1af6a1 1067 return 0;
4562236b
HW
1068}
1069
b8592b48
LL
1070/**
1071 * DOC: DM Lifecycle
1072 *
1073 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1074 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1075 * the base driver's device list to be initialized and torn down accordingly.
1076 *
1077 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1078 */
1079
4562236b
HW
1080static const struct amd_ip_funcs amdgpu_dm_funcs = {
1081 .name = "dm",
1082 .early_init = dm_early_init,
7abcf6b5 1083 .late_init = dm_late_init,
4562236b
HW
1084 .sw_init = dm_sw_init,
1085 .sw_fini = dm_sw_fini,
1086 .hw_init = dm_hw_init,
1087 .hw_fini = dm_hw_fini,
1088 .suspend = dm_suspend,
1089 .resume = dm_resume,
1090 .is_idle = dm_is_idle,
1091 .wait_for_idle = dm_wait_for_idle,
1092 .check_soft_reset = dm_check_soft_reset,
1093 .soft_reset = dm_soft_reset,
1094 .set_clockgating_state = dm_set_clockgating_state,
1095 .set_powergating_state = dm_set_powergating_state,
1096};
1097
1098const struct amdgpu_ip_block_version dm_ip_block =
1099{
1100 .type = AMD_IP_BLOCK_TYPE_DCE,
1101 .major = 1,
1102 .minor = 0,
1103 .rev = 0,
1104 .funcs = &amdgpu_dm_funcs,
1105};
1106
ca3268c4 1107
b8592b48
LL
1108/**
1109 * DOC: atomic
1110 *
1111 * *WIP*
1112 */
0a323b84 1113
b3663f70 1114static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1115 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1116 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1117 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1118 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1119};
1120
1121static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1122 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1123};
1124
7578ecda 1125static void
3ee6b26b 1126amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
4562236b
HW
1127{
1128 struct drm_connector *connector = &aconnector->base;
1129 struct drm_device *dev = connector->dev;
b73a22d3 1130 struct dc_sink *sink;
4562236b
HW
1131
1132 /* MST handled by drm_mst framework */
1133 if (aconnector->mst_mgr.mst_state == true)
1134 return;
1135
1136
1137 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1138 if (sink)
1139 dc_sink_retain(sink);
4562236b 1140
1f6010a9
DF
1141 /*
1142 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1143 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1144 * Skip if already done during boot.
4562236b
HW
1145 */
1146 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1147 && aconnector->dc_em_sink) {
1148
1f6010a9
DF
1149 /*
1150 * For S3 resume with headless use eml_sink to fake stream
1151 * because on resume connector->sink is set to NULL
4562236b
HW
1152 */
1153 mutex_lock(&dev->mode_config.mutex);
1154
1155 if (sink) {
922aa1e1 1156 if (aconnector->dc_sink) {
98e6436d 1157 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1158 /*
1159 * retain and release below are used to
1160 * bump up refcount for sink because the link doesn't point
1161 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1162 * reshuffle by UMD we will get into unwanted dc_sink release
1163 */
dcd5fb82 1164 dc_sink_release(aconnector->dc_sink);
922aa1e1 1165 }
4562236b 1166 aconnector->dc_sink = sink;
dcd5fb82 1167 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1168 amdgpu_dm_update_freesync_caps(connector,
1169 aconnector->edid);
4562236b 1170 } else {
98e6436d 1171 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1172 if (!aconnector->dc_sink) {
4562236b 1173 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1174 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1175 }
4562236b
HW
1176 }
1177
1178 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1179
1180 if (sink)
1181 dc_sink_release(sink);
4562236b
HW
1182 return;
1183 }
1184
1185 /*
1186 * TODO: temporary guard to look for proper fix
1187 * if this sink is MST sink, we should not do anything
1188 */
dcd5fb82
MF
1189 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1190 dc_sink_release(sink);
4562236b 1191 return;
dcd5fb82 1192 }
4562236b
HW
1193
1194 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1195 /*
1196 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1197 * Do nothing!!
1198 */
f1ad2f5e 1199 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1200 aconnector->connector_id);
dcd5fb82
MF
1201 if (sink)
1202 dc_sink_release(sink);
4562236b
HW
1203 return;
1204 }
1205
f1ad2f5e 1206 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1207 aconnector->connector_id, aconnector->dc_sink, sink);
1208
1209 mutex_lock(&dev->mode_config.mutex);
1210
1f6010a9
DF
1211 /*
1212 * 1. Update status of the drm connector
1213 * 2. Send an event and let userspace tell us what to do
1214 */
4562236b 1215 if (sink) {
1f6010a9
DF
1216 /*
1217 * TODO: check if we still need the S3 mode update workaround.
1218 * If yes, put it here.
1219 */
4562236b 1220 if (aconnector->dc_sink)
98e6436d 1221 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
1222
1223 aconnector->dc_sink = sink;
dcd5fb82 1224 dc_sink_retain(aconnector->dc_sink);
900b3cb1 1225 if (sink->dc_edid.length == 0) {
4562236b 1226 aconnector->edid = NULL;
e86e8947 1227 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
900b3cb1 1228 } else {
4562236b
HW
1229 aconnector->edid =
1230 (struct edid *) sink->dc_edid.raw_edid;
1231
1232
c555f023 1233 drm_connector_update_edid_property(connector,
4562236b 1234 aconnector->edid);
e86e8947
HV
1235 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1236 aconnector->edid);
4562236b 1237 }
98e6436d 1238 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
4562236b
HW
1239
1240 } else {
e86e8947 1241 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 1242 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 1243 drm_connector_update_edid_property(connector, NULL);
4562236b 1244 aconnector->num_modes = 0;
dcd5fb82 1245 dc_sink_release(aconnector->dc_sink);
4562236b 1246 aconnector->dc_sink = NULL;
5326c452 1247 aconnector->edid = NULL;
4562236b
HW
1248 }
1249
1250 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1251
1252 if (sink)
1253 dc_sink_release(sink);
4562236b
HW
1254}
1255
1256static void handle_hpd_irq(void *param)
1257{
c84dec2f 1258 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
1259 struct drm_connector *connector = &aconnector->base;
1260 struct drm_device *dev = connector->dev;
fbbdadf2 1261 enum dc_connection_type new_connection_type = dc_connection_none;
4562236b 1262
1f6010a9
DF
1263 /*
1264 * In case of failure or MST no need to update connector status or notify the OS
1265 * since (for MST case) MST does this in its own context.
4562236b
HW
1266 */
1267 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6
HW
1268
1269 if (aconnector->fake_enable)
1270 aconnector->fake_enable = false;
1271
fbbdadf2
BL
1272 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1273 DRM_ERROR("KMS: Failed to detect connector\n");
1274
1275 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1276 emulated_link_detect(aconnector->dc_link);
1277
1278
1279 drm_modeset_lock_all(dev);
1280 dm_restore_drm_connector_state(dev, connector);
1281 drm_modeset_unlock_all(dev);
1282
1283 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1284 drm_kms_helper_hotplug_event(dev);
1285
1286 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
1287 amdgpu_dm_update_connector_after_detect(aconnector);
1288
1289
1290 drm_modeset_lock_all(dev);
1291 dm_restore_drm_connector_state(dev, connector);
1292 drm_modeset_unlock_all(dev);
1293
1294 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1295 drm_kms_helper_hotplug_event(dev);
1296 }
1297 mutex_unlock(&aconnector->hpd_lock);
1298
1299}
1300
c84dec2f 1301static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
1302{
1303 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1304 uint8_t dret;
1305 bool new_irq_handled = false;
1306 int dpcd_addr;
1307 int dpcd_bytes_to_read;
1308
1309 const int max_process_count = 30;
1310 int process_count = 0;
1311
1312 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1313
1314 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1315 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1316 /* DPCD 0x200 - 0x201 for downstream IRQ */
1317 dpcd_addr = DP_SINK_COUNT;
1318 } else {
1319 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1320 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1321 dpcd_addr = DP_SINK_COUNT_ESI;
1322 }
1323
1324 dret = drm_dp_dpcd_read(
1325 &aconnector->dm_dp_aux.aux,
1326 dpcd_addr,
1327 esi,
1328 dpcd_bytes_to_read);
1329
1330 while (dret == dpcd_bytes_to_read &&
1331 process_count < max_process_count) {
1332 uint8_t retry;
1333 dret = 0;
1334
1335 process_count++;
1336
f1ad2f5e 1337 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
1338 /* handle HPD short pulse irq */
1339 if (aconnector->mst_mgr.mst_state)
1340 drm_dp_mst_hpd_irq(
1341 &aconnector->mst_mgr,
1342 esi,
1343 &new_irq_handled);
4562236b
HW
1344
1345 if (new_irq_handled) {
1346 /* ACK at DPCD to notify down stream */
1347 const int ack_dpcd_bytes_to_write =
1348 dpcd_bytes_to_read - 1;
1349
1350 for (retry = 0; retry < 3; retry++) {
1351 uint8_t wret;
1352
1353 wret = drm_dp_dpcd_write(
1354 &aconnector->dm_dp_aux.aux,
1355 dpcd_addr + 1,
1356 &esi[1],
1357 ack_dpcd_bytes_to_write);
1358 if (wret == ack_dpcd_bytes_to_write)
1359 break;
1360 }
1361
1f6010a9 1362 /* check if there is new irq to be handled */
4562236b
HW
1363 dret = drm_dp_dpcd_read(
1364 &aconnector->dm_dp_aux.aux,
1365 dpcd_addr,
1366 esi,
1367 dpcd_bytes_to_read);
1368
1369 new_irq_handled = false;
d4a6e8a9 1370 } else {
4562236b 1371 break;
d4a6e8a9 1372 }
4562236b
HW
1373 }
1374
1375 if (process_count == max_process_count)
f1ad2f5e 1376 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
1377}
1378
1379static void handle_hpd_rx_irq(void *param)
1380{
c84dec2f 1381 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
1382 struct drm_connector *connector = &aconnector->base;
1383 struct drm_device *dev = connector->dev;
53cbf65c 1384 struct dc_link *dc_link = aconnector->dc_link;
4562236b 1385 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 1386 enum dc_connection_type new_connection_type = dc_connection_none;
4562236b 1387
1f6010a9
DF
1388 /*
1389 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
1390 * conflict, after implement i2c helper, this mutex should be
1391 * retired.
1392 */
53cbf65c 1393 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
1394 mutex_lock(&aconnector->hpd_lock);
1395
4e18814e 1396 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
4562236b
HW
1397 !is_mst_root_connector) {
1398 /* Downstream Port status changed. */
fbbdadf2
BL
1399 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1400 DRM_ERROR("KMS: Failed to detect connector\n");
1401
1402 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1403 emulated_link_detect(dc_link);
1404
1405 if (aconnector->fake_enable)
1406 aconnector->fake_enable = false;
1407
1408 amdgpu_dm_update_connector_after_detect(aconnector);
1409
1410
1411 drm_modeset_lock_all(dev);
1412 dm_restore_drm_connector_state(dev, connector);
1413 drm_modeset_unlock_all(dev);
1414
1415 drm_kms_helper_hotplug_event(dev);
1416 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
1417
1418 if (aconnector->fake_enable)
1419 aconnector->fake_enable = false;
1420
4562236b
HW
1421 amdgpu_dm_update_connector_after_detect(aconnector);
1422
1423
1424 drm_modeset_lock_all(dev);
1425 dm_restore_drm_connector_state(dev, connector);
1426 drm_modeset_unlock_all(dev);
1427
1428 drm_kms_helper_hotplug_event(dev);
1429 }
1430 }
1431 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 1432 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
1433 dm_handle_hpd_rx_irq(aconnector);
1434
e86e8947
HV
1435 if (dc_link->type != dc_connection_mst_branch) {
1436 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 1437 mutex_unlock(&aconnector->hpd_lock);
e86e8947 1438 }
4562236b
HW
1439}
1440
1441static void register_hpd_handlers(struct amdgpu_device *adev)
1442{
1443 struct drm_device *dev = adev->ddev;
1444 struct drm_connector *connector;
c84dec2f 1445 struct amdgpu_dm_connector *aconnector;
4562236b
HW
1446 const struct dc_link *dc_link;
1447 struct dc_interrupt_params int_params = {0};
1448
1449 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1450 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1451
1452 list_for_each_entry(connector,
1453 &dev->mode_config.connector_list, head) {
1454
c84dec2f 1455 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1456 dc_link = aconnector->dc_link;
1457
1458 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1459 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1460 int_params.irq_source = dc_link->irq_source_hpd;
1461
1462 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1463 handle_hpd_irq,
1464 (void *) aconnector);
1465 }
1466
1467 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1468
1469 /* Also register for DP short pulse (hpd_rx). */
1470 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1471 int_params.irq_source = dc_link->irq_source_hpd_rx;
1472
1473 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1474 handle_hpd_rx_irq,
1475 (void *) aconnector);
1476 }
1477 }
1478}
1479
1480/* Register IRQ sources and initialize IRQ callbacks */
1481static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1482{
1483 struct dc *dc = adev->dm.dc;
1484 struct common_irq_params *c_irq_params;
1485 struct dc_interrupt_params int_params = {0};
1486 int r;
1487 int i;
1ffdeca6 1488 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 1489
ff5ef992 1490 if (adev->asic_type == CHIP_VEGA10 ||
2325ff30 1491 adev->asic_type == CHIP_VEGA12 ||
1fe6bf2f 1492 adev->asic_type == CHIP_VEGA20 ||
ff5ef992 1493 adev->asic_type == CHIP_RAVEN)
3760f76c 1494 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
1495
1496 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1497 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1498
1f6010a9
DF
1499 /*
1500 * Actions of amdgpu_irq_add_id():
4562236b
HW
1501 * 1. Register a set() function with base driver.
1502 * Base driver will call set() function to enable/disable an
1503 * interrupt in DC hardware.
1504 * 2. Register amdgpu_dm_irq_handler().
1505 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1506 * coming from DC hardware.
1507 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1508 * for acknowledging and handling. */
1509
b57de80a 1510 /* Use VBLANK interrupt */
e9029155 1511 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 1512 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
1513 if (r) {
1514 DRM_ERROR("Failed to add crtc irq id!\n");
1515 return r;
1516 }
1517
1518 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1519 int_params.irq_source =
3d761e79 1520 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 1521
b57de80a 1522 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
1523
1524 c_irq_params->adev = adev;
1525 c_irq_params->irq_src = int_params.irq_source;
1526
1527 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1528 dm_crtc_high_irq, c_irq_params);
1529 }
1530
d2574c33
MK
1531 /* Use VUPDATE interrupt */
1532 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1533 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1534 if (r) {
1535 DRM_ERROR("Failed to add vupdate irq id!\n");
1536 return r;
1537 }
1538
1539 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1540 int_params.irq_source =
1541 dc_interrupt_to_irq_source(dc, i, 0);
1542
1543 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1544
1545 c_irq_params->adev = adev;
1546 c_irq_params->irq_src = int_params.irq_source;
1547
1548 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1549 dm_vupdate_high_irq, c_irq_params);
1550 }
1551
3d761e79 1552 /* Use GRPH_PFLIP interrupt */
4562236b
HW
1553 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1554 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 1555 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
1556 if (r) {
1557 DRM_ERROR("Failed to add page flip irq id!\n");
1558 return r;
1559 }
1560
1561 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1562 int_params.irq_source =
1563 dc_interrupt_to_irq_source(dc, i, 0);
1564
1565 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1566
1567 c_irq_params->adev = adev;
1568 c_irq_params->irq_src = int_params.irq_source;
1569
1570 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1571 dm_pflip_high_irq, c_irq_params);
1572
1573 }
1574
1575 /* HPD */
2c8ad2d5
AD
1576 r = amdgpu_irq_add_id(adev, client_id,
1577 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
1578 if (r) {
1579 DRM_ERROR("Failed to add hpd irq id!\n");
1580 return r;
1581 }
1582
1583 register_hpd_handlers(adev);
1584
1585 return 0;
1586}
1587
ff5ef992
AD
1588#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1589/* Register IRQ sources and initialize IRQ callbacks */
1590static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1591{
1592 struct dc *dc = adev->dm.dc;
1593 struct common_irq_params *c_irq_params;
1594 struct dc_interrupt_params int_params = {0};
1595 int r;
1596 int i;
1597
1598 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1599 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1600
1f6010a9
DF
1601 /*
1602 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
1603 * 1. Register a set() function with base driver.
1604 * Base driver will call set() function to enable/disable an
1605 * interrupt in DC hardware.
1606 * 2. Register amdgpu_dm_irq_handler().
1607 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1608 * coming from DC hardware.
1609 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1610 * for acknowledging and handling.
1f6010a9 1611 */
ff5ef992
AD
1612
1613 /* Use VSTARTUP interrupt */
1614 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1615 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1616 i++) {
3760f76c 1617 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
1618
1619 if (r) {
1620 DRM_ERROR("Failed to add crtc irq id!\n");
1621 return r;
1622 }
1623
1624 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1625 int_params.irq_source =
1626 dc_interrupt_to_irq_source(dc, i, 0);
1627
1628 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1629
1630 c_irq_params->adev = adev;
1631 c_irq_params->irq_src = int_params.irq_source;
1632
1633 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1634 dm_crtc_high_irq, c_irq_params);
1635 }
1636
d2574c33
MK
1637 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1638 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1639 * to trigger at end of each vblank, regardless of state of the lock,
1640 * matching DCE behaviour.
1641 */
1642 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1643 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1644 i++) {
1645 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1646
1647 if (r) {
1648 DRM_ERROR("Failed to add vupdate irq id!\n");
1649 return r;
1650 }
1651
1652 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1653 int_params.irq_source =
1654 dc_interrupt_to_irq_source(dc, i, 0);
1655
1656 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1657
1658 c_irq_params->adev = adev;
1659 c_irq_params->irq_src = int_params.irq_source;
1660
1661 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1662 dm_vupdate_high_irq, c_irq_params);
1663 }
1664
ff5ef992
AD
1665 /* Use GRPH_PFLIP interrupt */
1666 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1667 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1668 i++) {
3760f76c 1669 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
1670 if (r) {
1671 DRM_ERROR("Failed to add page flip irq id!\n");
1672 return r;
1673 }
1674
1675 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1676 int_params.irq_source =
1677 dc_interrupt_to_irq_source(dc, i, 0);
1678
1679 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1680
1681 c_irq_params->adev = adev;
1682 c_irq_params->irq_src = int_params.irq_source;
1683
1684 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1685 dm_pflip_high_irq, c_irq_params);
1686
1687 }
1688
1689 /* HPD */
3760f76c 1690 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
1691 &adev->hpd_irq);
1692 if (r) {
1693 DRM_ERROR("Failed to add hpd irq id!\n");
1694 return r;
1695 }
1696
1697 register_hpd_handlers(adev);
1698
1699 return 0;
1700}
1701#endif
1702
eb3dc897
NK
1703/*
1704 * Acquires the lock for the atomic state object and returns
1705 * the new atomic state.
1706 *
1707 * This should only be called during atomic check.
1708 */
1709static int dm_atomic_get_state(struct drm_atomic_state *state,
1710 struct dm_atomic_state **dm_state)
1711{
1712 struct drm_device *dev = state->dev;
1713 struct amdgpu_device *adev = dev->dev_private;
1714 struct amdgpu_display_manager *dm = &adev->dm;
1715 struct drm_private_state *priv_state;
eb3dc897
NK
1716
1717 if (*dm_state)
1718 return 0;
1719
eb3dc897
NK
1720 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1721 if (IS_ERR(priv_state))
1722 return PTR_ERR(priv_state);
1723
1724 *dm_state = to_dm_atomic_state(priv_state);
1725
1726 return 0;
1727}
1728
1729struct dm_atomic_state *
1730dm_atomic_get_new_state(struct drm_atomic_state *state)
1731{
1732 struct drm_device *dev = state->dev;
1733 struct amdgpu_device *adev = dev->dev_private;
1734 struct amdgpu_display_manager *dm = &adev->dm;
1735 struct drm_private_obj *obj;
1736 struct drm_private_state *new_obj_state;
1737 int i;
1738
1739 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1740 if (obj->funcs == dm->atomic_obj.funcs)
1741 return to_dm_atomic_state(new_obj_state);
1742 }
1743
1744 return NULL;
1745}
1746
1747struct dm_atomic_state *
1748dm_atomic_get_old_state(struct drm_atomic_state *state)
1749{
1750 struct drm_device *dev = state->dev;
1751 struct amdgpu_device *adev = dev->dev_private;
1752 struct amdgpu_display_manager *dm = &adev->dm;
1753 struct drm_private_obj *obj;
1754 struct drm_private_state *old_obj_state;
1755 int i;
1756
1757 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1758 if (obj->funcs == dm->atomic_obj.funcs)
1759 return to_dm_atomic_state(old_obj_state);
1760 }
1761
1762 return NULL;
1763}
1764
1765static struct drm_private_state *
1766dm_atomic_duplicate_state(struct drm_private_obj *obj)
1767{
1768 struct dm_atomic_state *old_state, *new_state;
1769
1770 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1771 if (!new_state)
1772 return NULL;
1773
1774 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1775
813d20dc
AW
1776 old_state = to_dm_atomic_state(obj->state);
1777
1778 if (old_state && old_state->context)
1779 new_state->context = dc_copy_state(old_state->context);
1780
eb3dc897
NK
1781 if (!new_state->context) {
1782 kfree(new_state);
1783 return NULL;
1784 }
1785
eb3dc897
NK
1786 return &new_state->base;
1787}
1788
1789static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1790 struct drm_private_state *state)
1791{
1792 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1793
1794 if (dm_state && dm_state->context)
1795 dc_release_state(dm_state->context);
1796
1797 kfree(dm_state);
1798}
1799
1800static struct drm_private_state_funcs dm_atomic_state_funcs = {
1801 .atomic_duplicate_state = dm_atomic_duplicate_state,
1802 .atomic_destroy_state = dm_atomic_destroy_state,
1803};
1804
4562236b
HW
1805static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1806{
eb3dc897 1807 struct dm_atomic_state *state;
4562236b
HW
1808 int r;
1809
1810 adev->mode_info.mode_config_initialized = true;
1811
4562236b 1812 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 1813 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
1814
1815 adev->ddev->mode_config.max_width = 16384;
1816 adev->ddev->mode_config.max_height = 16384;
1817
1818 adev->ddev->mode_config.preferred_depth = 24;
1819 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 1820 /* indicates support for immediate flip */
4562236b
HW
1821 adev->ddev->mode_config.async_page_flip = true;
1822
770d13b1 1823 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 1824
eb3dc897
NK
1825 state = kzalloc(sizeof(*state), GFP_KERNEL);
1826 if (!state)
1827 return -ENOMEM;
1828
813d20dc 1829 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
1830 if (!state->context) {
1831 kfree(state);
1832 return -ENOMEM;
1833 }
1834
1835 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1836
8c1a765b
DA
1837 drm_atomic_private_obj_init(adev->ddev,
1838 &adev->dm.atomic_obj,
eb3dc897
NK
1839 &state->base,
1840 &dm_atomic_state_funcs);
1841
3dc9b1ce 1842 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
1843 if (r)
1844 return r;
1845
1846 return 0;
1847}
1848
206bbafe
DF
1849#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1850#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1851
4562236b
HW
1852#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1853 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1854
206bbafe
DF
1855static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1856{
1857#if defined(CONFIG_ACPI)
1858 struct amdgpu_dm_backlight_caps caps;
1859
1860 if (dm->backlight_caps.caps_valid)
1861 return;
1862
1863 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1864 if (caps.caps_valid) {
1865 dm->backlight_caps.min_input_signal = caps.min_input_signal;
1866 dm->backlight_caps.max_input_signal = caps.max_input_signal;
1867 dm->backlight_caps.caps_valid = true;
1868 } else {
1869 dm->backlight_caps.min_input_signal =
1870 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1871 dm->backlight_caps.max_input_signal =
1872 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1873 }
1874#else
8bcbc9ef
DF
1875 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1876 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
1877#endif
1878}
1879
4562236b
HW
1880static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1881{
1882 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe
DF
1883 struct amdgpu_dm_backlight_caps caps;
1884 uint32_t brightness = bd->props.brightness;
4562236b 1885
206bbafe
DF
1886 amdgpu_dm_update_backlight_caps(dm);
1887 caps = dm->backlight_caps;
0cafc82f 1888 /*
206bbafe
DF
1889 * The brightness input is in the range 0-255
1890 * It needs to be rescaled to be between the
1891 * requested min and max input signal
1892 *
1893 * It also needs to be scaled up by 0x101 to
1894 * match the DC interface which has a range of
1895 * 0 to 0xffff
0cafc82f 1896 */
206bbafe
DF
1897 brightness =
1898 brightness
1899 * 0x101
1900 * (caps.max_input_signal - caps.min_input_signal)
1901 / AMDGPU_MAX_BL_LEVEL
1902 + caps.min_input_signal * 0x101;
4562236b
HW
1903
1904 if (dc_link_set_backlight_level(dm->backlight_link,
923fe495 1905 brightness, 0))
4562236b
HW
1906 return 0;
1907 else
1908 return 1;
1909}
1910
1911static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1912{
620a0d27
DF
1913 struct amdgpu_display_manager *dm = bl_get_data(bd);
1914 int ret = dc_link_get_backlight_level(dm->backlight_link);
1915
1916 if (ret == DC_ERROR_UNEXPECTED)
1917 return bd->props.brightness;
1918 return ret;
4562236b
HW
1919}
1920
1921static const struct backlight_ops amdgpu_dm_backlight_ops = {
1922 .get_brightness = amdgpu_dm_backlight_get_brightness,
1923 .update_status = amdgpu_dm_backlight_update_status,
1924};
1925
7578ecda
AD
1926static void
1927amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
1928{
1929 char bl_name[16];
1930 struct backlight_properties props = { 0 };
1931
206bbafe
DF
1932 amdgpu_dm_update_backlight_caps(dm);
1933
4562236b 1934 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 1935 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
1936 props.type = BACKLIGHT_RAW;
1937
1938 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1939 dm->adev->ddev->primary->index);
1940
1941 dm->backlight_dev = backlight_device_register(bl_name,
1942 dm->adev->ddev->dev,
1943 dm,
1944 &amdgpu_dm_backlight_ops,
1945 &props);
1946
74baea42 1947 if (IS_ERR(dm->backlight_dev))
4562236b
HW
1948 DRM_ERROR("DM: Backlight registration failed!\n");
1949 else
f1ad2f5e 1950 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
1951}
1952
1953#endif
1954
df534fff 1955static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13
NK
1956 struct amdgpu_mode_info *mode_info, int plane_id,
1957 enum drm_plane_type plane_type)
df534fff 1958{
f180b4bc 1959 struct drm_plane *plane;
df534fff
S
1960 unsigned long possible_crtcs;
1961 int ret = 0;
1962
f180b4bc 1963 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
1964 if (!plane) {
1965 DRM_ERROR("KMS: Failed to allocate plane\n");
1966 return -ENOMEM;
1967 }
b2fddb13 1968 plane->type = plane_type;
df534fff
S
1969
1970 /*
b2fddb13
NK
1971 * HACK: IGT tests expect that the primary plane for a CRTC
1972 * can only have one possible CRTC. Only expose support for
1973 * any CRTC if they're not going to be used as a primary plane
1974 * for a CRTC - like overlay or underlay planes.
df534fff
S
1975 */
1976 possible_crtcs = 1 << plane_id;
1977 if (plane_id >= dm->dc->caps.max_streams)
1978 possible_crtcs = 0xff;
1979
54087768 1980 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs);
df534fff
S
1981
1982 if (ret) {
1983 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 1984 kfree(plane);
df534fff
S
1985 return ret;
1986 }
1987
54087768
NK
1988 if (mode_info)
1989 mode_info->planes[plane_id] = plane;
1990
df534fff
S
1991 return ret;
1992}
1993
89fc8d4e
HW
1994
1995static void register_backlight_device(struct amdgpu_display_manager *dm,
1996 struct dc_link *link)
1997{
1998#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1999 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2000
2001 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2002 link->type != dc_connection_none) {
1f6010a9
DF
2003 /*
2004 * Event if registration failed, we should continue with
89fc8d4e
HW
2005 * DM initialization because not having a backlight control
2006 * is better then a black screen.
2007 */
2008 amdgpu_dm_register_backlight_device(dm);
2009
2010 if (dm->backlight_dev)
2011 dm->backlight_link = link;
2012 }
2013#endif
2014}
2015
2016
1f6010a9
DF
2017/*
2018 * In this architecture, the association
4562236b
HW
2019 * connector -> encoder -> crtc
2020 * id not really requried. The crtc and connector will hold the
2021 * display_index as an abstraction to use with DAL component
2022 *
2023 * Returns 0 on success
2024 */
7578ecda 2025static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2026{
2027 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2028 int32_t i;
c84dec2f 2029 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2030 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2031 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2032 uint32_t link_cnt;
54087768 2033 int32_t overlay_planes, primary_planes;
fbbdadf2 2034 enum dc_connection_type new_connection_type = dc_connection_none;
4562236b
HW
2035
2036 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2037 if (amdgpu_dm_mode_config_init(dm->adev)) {
2038 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2039 return -EINVAL;
4562236b
HW
2040 }
2041
0d579c7e
NK
2042 /*
2043 * Determine the number of overlay planes supported.
2044 * Only support DCN for now, and cap so we don't encourage
2045 * userspace to use up all the planes.
2046 */
2047 overlay_planes = 0;
2048
2049 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2050 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
efa6a8b7 2051
0d579c7e
NK
2052 if (plane->type == DC_PLANE_TYPE_DCN_UNIVERSAL &&
2053 plane->blends_with_above && plane->blends_with_below &&
2054 plane->supports_argb8888)
2055 overlay_planes += 1;
2056 }
2057
2058 overlay_planes = min(overlay_planes, 1);
2059
b2fddb13
NK
2060 /* There is one primary plane per CRTC */
2061 primary_planes = dm->dc->caps.max_streams;
54087768 2062 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2063
b2fddb13
NK
2064 /*
2065 * Initialize primary planes, implicit planes for legacy IOCTLS.
2066 * Order is reversed to match iteration order in atomic check.
2067 */
2068 for (i = (primary_planes - 1); i >= 0; i--) {
2069 if (initialize_plane(dm, mode_info, i,
2070 DRM_PLANE_TYPE_PRIMARY)) {
df534fff 2071 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2072 goto fail;
d4e13b0d 2073 }
df534fff 2074 }
92f3ac40 2075
0d579c7e
NK
2076 /*
2077 * Initialize overlay planes, index starting after primary planes.
2078 * These planes have a higher DRM index than the primary planes since
2079 * they should be considered as having a higher z-order.
2080 * Order is reversed to match iteration order in atomic check.
2081 */
2082 for (i = (overlay_planes - 1); i >= 0; i--) {
54087768 2083 if (initialize_plane(dm, NULL, primary_planes + i,
0d579c7e
NK
2084 DRM_PLANE_TYPE_OVERLAY)) {
2085 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2086 goto fail;
d4e13b0d
AD
2087 }
2088 }
4562236b 2089
d4e13b0d 2090 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2091 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2092 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2093 goto fail;
4562236b 2094 }
4562236b 2095
ab2541b6 2096 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2097
2098 /* loops over all connectors on the board */
2099 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2100 struct dc_link *link = NULL;
4562236b
HW
2101
2102 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2103 DRM_ERROR(
2104 "KMS: Cannot support more than %d display indexes\n",
2105 AMDGPU_DM_MAX_DISPLAY_INDEX);
2106 continue;
2107 }
2108
2109 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2110 if (!aconnector)
cd8a2ae8 2111 goto fail;
4562236b
HW
2112
2113 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2114 if (!aencoder)
cd8a2ae8 2115 goto fail;
4562236b
HW
2116
2117 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2118 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2119 goto fail;
4562236b
HW
2120 }
2121
2122 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2123 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2124 goto fail;
4562236b
HW
2125 }
2126
89fc8d4e
HW
2127 link = dc_get_link_at_index(dm->dc, i);
2128
fbbdadf2
BL
2129 if (!dc_link_detect_sink(link, &new_connection_type))
2130 DRM_ERROR("KMS: Failed to detect connector\n");
2131
2132 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2133 emulated_link_detect(link);
2134 amdgpu_dm_update_connector_after_detect(aconnector);
2135
2136 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2137 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e
HW
2138 register_backlight_device(dm, link);
2139 }
2140
2141
4562236b
HW
2142 }
2143
2144 /* Software is initialized. Now we can register interrupt handlers. */
2145 switch (adev->asic_type) {
2146 case CHIP_BONAIRE:
2147 case CHIP_HAWAII:
cd4b356f
AD
2148 case CHIP_KAVERI:
2149 case CHIP_KABINI:
2150 case CHIP_MULLINS:
4562236b
HW
2151 case CHIP_TONGA:
2152 case CHIP_FIJI:
2153 case CHIP_CARRIZO:
2154 case CHIP_STONEY:
2155 case CHIP_POLARIS11:
2156 case CHIP_POLARIS10:
b264d345 2157 case CHIP_POLARIS12:
7737de91 2158 case CHIP_VEGAM:
2c8ad2d5 2159 case CHIP_VEGA10:
2325ff30 2160 case CHIP_VEGA12:
1fe6bf2f 2161 case CHIP_VEGA20:
4562236b
HW
2162 if (dce110_register_irq_handlers(dm->adev)) {
2163 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2164 goto fail;
4562236b
HW
2165 }
2166 break;
ff5ef992
AD
2167#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2168 case CHIP_RAVEN:
2169 if (dcn10_register_irq_handlers(dm->adev)) {
2170 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2171 goto fail;
ff5ef992
AD
2172 }
2173 break;
2174#endif
4562236b 2175 default:
e63f8673 2176 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 2177 goto fail;
4562236b
HW
2178 }
2179
1bc460a4
HW
2180 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2181 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2182
4562236b 2183 return 0;
cd8a2ae8 2184fail:
4562236b 2185 kfree(aencoder);
4562236b 2186 kfree(aconnector);
54087768 2187
59d0f396 2188 return -EINVAL;
4562236b
HW
2189}
2190
7578ecda 2191static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
2192{
2193 drm_mode_config_cleanup(dm->ddev);
eb3dc897 2194 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
2195 return;
2196}
2197
2198/******************************************************************************
2199 * amdgpu_display_funcs functions
2200 *****************************************************************************/
2201
1f6010a9 2202/*
4562236b
HW
2203 * dm_bandwidth_update - program display watermarks
2204 *
2205 * @adev: amdgpu_device pointer
2206 *
2207 * Calculate and program the display watermarks and line buffer allocation.
2208 */
2209static void dm_bandwidth_update(struct amdgpu_device *adev)
2210{
49c07a99 2211 /* TODO: implement later */
4562236b
HW
2212}
2213
39cc5be2 2214static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
2215 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2216 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
2217 .backlight_set_level = NULL, /* never called for DC */
2218 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
2219 .hpd_sense = NULL,/* called unconditionally */
2220 .hpd_set_polarity = NULL, /* called unconditionally */
2221 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
2222 .page_flip_get_scanoutpos =
2223 dm_crtc_get_scanoutpos,/* called unconditionally */
2224 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2225 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
2226};
2227
2228#if defined(CONFIG_DEBUG_KERNEL_DC)
2229
3ee6b26b
AD
2230static ssize_t s3_debug_store(struct device *device,
2231 struct device_attribute *attr,
2232 const char *buf,
2233 size_t count)
4562236b
HW
2234{
2235 int ret;
2236 int s3_state;
2237 struct pci_dev *pdev = to_pci_dev(device);
2238 struct drm_device *drm_dev = pci_get_drvdata(pdev);
2239 struct amdgpu_device *adev = drm_dev->dev_private;
2240
2241 ret = kstrtoint(buf, 0, &s3_state);
2242
2243 if (ret == 0) {
2244 if (s3_state) {
2245 dm_resume(adev);
4562236b
HW
2246 drm_kms_helper_hotplug_event(adev->ddev);
2247 } else
2248 dm_suspend(adev);
2249 }
2250
2251 return ret == 0 ? count : 0;
2252}
2253
2254DEVICE_ATTR_WO(s3_debug);
2255
2256#endif
2257
2258static int dm_early_init(void *handle)
2259{
2260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2261
4562236b
HW
2262 switch (adev->asic_type) {
2263 case CHIP_BONAIRE:
2264 case CHIP_HAWAII:
2265 adev->mode_info.num_crtc = 6;
2266 adev->mode_info.num_hpd = 6;
2267 adev->mode_info.num_dig = 6;
4562236b 2268 break;
cd4b356f
AD
2269 case CHIP_KAVERI:
2270 adev->mode_info.num_crtc = 4;
2271 adev->mode_info.num_hpd = 6;
2272 adev->mode_info.num_dig = 7;
cd4b356f
AD
2273 break;
2274 case CHIP_KABINI:
2275 case CHIP_MULLINS:
2276 adev->mode_info.num_crtc = 2;
2277 adev->mode_info.num_hpd = 6;
2278 adev->mode_info.num_dig = 6;
cd4b356f 2279 break;
4562236b
HW
2280 case CHIP_FIJI:
2281 case CHIP_TONGA:
2282 adev->mode_info.num_crtc = 6;
2283 adev->mode_info.num_hpd = 6;
2284 adev->mode_info.num_dig = 7;
4562236b
HW
2285 break;
2286 case CHIP_CARRIZO:
2287 adev->mode_info.num_crtc = 3;
2288 adev->mode_info.num_hpd = 6;
2289 adev->mode_info.num_dig = 9;
4562236b
HW
2290 break;
2291 case CHIP_STONEY:
2292 adev->mode_info.num_crtc = 2;
2293 adev->mode_info.num_hpd = 6;
2294 adev->mode_info.num_dig = 9;
4562236b
HW
2295 break;
2296 case CHIP_POLARIS11:
b264d345 2297 case CHIP_POLARIS12:
4562236b
HW
2298 adev->mode_info.num_crtc = 5;
2299 adev->mode_info.num_hpd = 5;
2300 adev->mode_info.num_dig = 5;
4562236b
HW
2301 break;
2302 case CHIP_POLARIS10:
7737de91 2303 case CHIP_VEGAM:
4562236b
HW
2304 adev->mode_info.num_crtc = 6;
2305 adev->mode_info.num_hpd = 6;
2306 adev->mode_info.num_dig = 6;
4562236b 2307 break;
2c8ad2d5 2308 case CHIP_VEGA10:
2325ff30 2309 case CHIP_VEGA12:
1fe6bf2f 2310 case CHIP_VEGA20:
2c8ad2d5
AD
2311 adev->mode_info.num_crtc = 6;
2312 adev->mode_info.num_hpd = 6;
2313 adev->mode_info.num_dig = 6;
2314 break;
ff5ef992
AD
2315#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2316 case CHIP_RAVEN:
2317 adev->mode_info.num_crtc = 4;
2318 adev->mode_info.num_hpd = 4;
2319 adev->mode_info.num_dig = 4;
ff5ef992
AD
2320 break;
2321#endif
4562236b 2322 default:
e63f8673 2323 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
2324 return -EINVAL;
2325 }
2326
c8dd5715
MD
2327 amdgpu_dm_set_irq_funcs(adev);
2328
39cc5be2
AD
2329 if (adev->mode_info.funcs == NULL)
2330 adev->mode_info.funcs = &dm_display_funcs;
2331
1f6010a9
DF
2332 /*
2333 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 2334 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
2335 * amdgpu_device_init()
2336 */
4562236b
HW
2337#if defined(CONFIG_DEBUG_KERNEL_DC)
2338 device_create_file(
2339 adev->ddev->dev,
2340 &dev_attr_s3_debug);
2341#endif
2342
2343 return 0;
2344}
2345
9b690ef3 2346static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
2347 struct dc_stream_state *new_stream,
2348 struct dc_stream_state *old_stream)
9b690ef3 2349{
e7b07cee
HW
2350 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2351 return false;
2352
2353 if (!crtc_state->enable)
2354 return false;
2355
2356 return crtc_state->active;
2357}
2358
2359static bool modereset_required(struct drm_crtc_state *crtc_state)
2360{
2361 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2362 return false;
2363
2364 return !crtc_state->enable || !crtc_state->active;
2365}
2366
7578ecda 2367static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
2368{
2369 drm_encoder_cleanup(encoder);
2370 kfree(encoder);
2371}
2372
2373static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2374 .destroy = amdgpu_dm_encoder_destroy,
2375};
2376
3ee6b26b
AD
2377static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
2378 struct dc_plane_state *plane_state)
e7b07cee 2379{
3be5262e
HW
2380 plane_state->src_rect.x = state->src_x >> 16;
2381 plane_state->src_rect.y = state->src_y >> 16;
1f6010a9 2382 /* we ignore the mantissa for now and do not deal with floating pixels :( */
3be5262e 2383 plane_state->src_rect.width = state->src_w >> 16;
e7b07cee 2384
3be5262e 2385 if (plane_state->src_rect.width == 0)
e7b07cee
HW
2386 return false;
2387
3be5262e
HW
2388 plane_state->src_rect.height = state->src_h >> 16;
2389 if (plane_state->src_rect.height == 0)
e7b07cee
HW
2390 return false;
2391
3be5262e
HW
2392 plane_state->dst_rect.x = state->crtc_x;
2393 plane_state->dst_rect.y = state->crtc_y;
e7b07cee
HW
2394
2395 if (state->crtc_w == 0)
2396 return false;
2397
3be5262e 2398 plane_state->dst_rect.width = state->crtc_w;
e7b07cee
HW
2399
2400 if (state->crtc_h == 0)
2401 return false;
2402
3be5262e 2403 plane_state->dst_rect.height = state->crtc_h;
e7b07cee 2404
3be5262e 2405 plane_state->clip_rect = plane_state->dst_rect;
e7b07cee
HW
2406
2407 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
2408 case DRM_MODE_ROTATE_0:
3be5262e 2409 plane_state->rotation = ROTATION_ANGLE_0;
e7b07cee
HW
2410 break;
2411 case DRM_MODE_ROTATE_90:
3be5262e 2412 plane_state->rotation = ROTATION_ANGLE_90;
e7b07cee
HW
2413 break;
2414 case DRM_MODE_ROTATE_180:
3be5262e 2415 plane_state->rotation = ROTATION_ANGLE_180;
e7b07cee
HW
2416 break;
2417 case DRM_MODE_ROTATE_270:
3be5262e 2418 plane_state->rotation = ROTATION_ANGLE_270;
e7b07cee
HW
2419 break;
2420 default:
3be5262e 2421 plane_state->rotation = ROTATION_ANGLE_0;
e7b07cee
HW
2422 break;
2423 }
2424
4562236b
HW
2425 return true;
2426}
3ee6b26b 2427static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 2428 uint64_t *tiling_flags)
e7b07cee 2429{
e68d14dd 2430 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 2431 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 2432
e7b07cee 2433 if (unlikely(r)) {
1f6010a9 2434 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
2435 if (r != -ERESTARTSYS)
2436 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
2437 return r;
2438 }
2439
e7b07cee
HW
2440 if (tiling_flags)
2441 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2442
2443 amdgpu_bo_unreserve(rbo);
2444
2445 return r;
2446}
2447
7df7e505
NK
2448static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2449{
2450 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2451
2452 return offset ? (address + offset * 256) : 0;
2453}
2454
09e5665a 2455static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
7df7e505 2456 const struct amdgpu_framebuffer *afb,
09e5665a
NK
2457 const struct dc_plane_state *plane_state,
2458 struct dc_plane_dcc_param *dcc,
2459 struct dc_plane_address *address,
7df7e505
NK
2460 uint64_t info)
2461{
2462 struct dc *dc = adev->dm.dc;
8daa1218
NC
2463 struct dc_dcc_surface_param input;
2464 struct dc_surface_dcc_cap output;
7df7e505
NK
2465 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2466 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2467 uint64_t dcc_address;
2468
8daa1218
NC
2469 memset(&input, 0, sizeof(input));
2470 memset(&output, 0, sizeof(output));
2471
7df7e505 2472 if (!offset)
09e5665a
NK
2473 return 0;
2474
2475 if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS)
2476 return 0;
7df7e505
NK
2477
2478 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 2479 return -EINVAL;
7df7e505
NK
2480
2481 input.format = plane_state->format;
2482 input.surface_size.width =
2483 plane_state->plane_size.grph.surface_size.width;
2484 input.surface_size.height =
2485 plane_state->plane_size.grph.surface_size.height;
2486 input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
2487
2488 if (plane_state->rotation == ROTATION_ANGLE_0 ||
2489 plane_state->rotation == ROTATION_ANGLE_180)
2490 input.scan = SCAN_DIRECTION_HORIZONTAL;
2491 else if (plane_state->rotation == ROTATION_ANGLE_90 ||
2492 plane_state->rotation == ROTATION_ANGLE_270)
2493 input.scan = SCAN_DIRECTION_VERTICAL;
2494
2495 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 2496 return -EINVAL;
7df7e505
NK
2497
2498 if (!output.capable)
09e5665a 2499 return -EINVAL;
7df7e505
NK
2500
2501 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 2502 return -EINVAL;
7df7e505 2503
09e5665a
NK
2504 dcc->enable = 1;
2505 dcc->grph.meta_pitch =
7df7e505 2506 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
09e5665a 2507 dcc->grph.independent_64b_blks = i64b;
7df7e505
NK
2508
2509 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
2510 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2511 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 2512
09e5665a
NK
2513 return 0;
2514}
2515
2516static int
2517fill_plane_tiling_attributes(struct amdgpu_device *adev,
2518 const struct amdgpu_framebuffer *afb,
2519 const struct dc_plane_state *plane_state,
2520 union dc_tiling_info *tiling_info,
2521 struct dc_plane_dcc_param *dcc,
2522 struct dc_plane_address *address,
2523 uint64_t tiling_flags)
2524{
2525 int ret;
2526
2527 memset(tiling_info, 0, sizeof(*tiling_info));
2528 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
2529 memset(address, 0, sizeof(*address));
2530
2531 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2532 address->type = PLN_ADDR_TYPE_GRAPHICS;
2533 address->grph.addr.low_part = lower_32_bits(afb->address);
2534 address->grph.addr.high_part = upper_32_bits(afb->address);
2535 } else {
2536 const struct drm_framebuffer *fb = &afb->base;
1791e54f 2537 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d
NK
2538
2539 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2540 address->video_progressive.luma_addr.low_part =
2541 lower_32_bits(afb->address);
2542 address->video_progressive.luma_addr.high_part =
2543 upper_32_bits(afb->address);
2544 address->video_progressive.chroma_addr.low_part =
2545 lower_32_bits(chroma_addr);
2546 address->video_progressive.chroma_addr.high_part =
2547 upper_32_bits(chroma_addr);
2548 }
09e5665a
NK
2549
2550 /* Fill GFX8 params */
2551 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2552 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2553
2554 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2555 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2556 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2557 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2558 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2559
2560 /* XXX fix me for VI */
2561 tiling_info->gfx8.num_banks = num_banks;
2562 tiling_info->gfx8.array_mode =
2563 DC_ARRAY_2D_TILED_THIN1;
2564 tiling_info->gfx8.tile_split = tile_split;
2565 tiling_info->gfx8.bank_width = bankw;
2566 tiling_info->gfx8.bank_height = bankh;
2567 tiling_info->gfx8.tile_aspect = mtaspect;
2568 tiling_info->gfx8.tile_mode =
2569 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2570 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2571 == DC_ARRAY_1D_TILED_THIN1) {
2572 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2573 }
2574
2575 tiling_info->gfx8.pipe_config =
2576 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2577
2578 if (adev->asic_type == CHIP_VEGA10 ||
2579 adev->asic_type == CHIP_VEGA12 ||
2580 adev->asic_type == CHIP_VEGA20 ||
2581 adev->asic_type == CHIP_RAVEN) {
2582 /* Fill GFX9 params */
2583 tiling_info->gfx9.num_pipes =
2584 adev->gfx.config.gb_addr_config_fields.num_pipes;
2585 tiling_info->gfx9.num_banks =
2586 adev->gfx.config.gb_addr_config_fields.num_banks;
2587 tiling_info->gfx9.pipe_interleave =
2588 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2589 tiling_info->gfx9.num_shader_engines =
2590 adev->gfx.config.gb_addr_config_fields.num_se;
2591 tiling_info->gfx9.max_compressed_frags =
2592 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2593 tiling_info->gfx9.num_rb_per_se =
2594 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2595 tiling_info->gfx9.swizzle =
2596 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2597 tiling_info->gfx9.shaderEnable = 1;
2598
2599 ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc,
2600 address, tiling_flags);
2601 if (ret)
2602 return ret;
2603 }
2604
2605 return 0;
7df7e505
NK
2606}
2607
3ee6b26b
AD
2608static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2609 struct dc_plane_state *plane_state,
9817d5f5 2610 const struct amdgpu_framebuffer *amdgpu_fb)
e7b07cee
HW
2611{
2612 uint64_t tiling_flags;
e7b07cee
HW
2613 const struct drm_framebuffer *fb = &amdgpu_fb->base;
2614 int ret = 0;
2615 struct drm_format_name_buf format_name;
2616
2617 ret = get_fb_info(
2618 amdgpu_fb,
9817d5f5 2619 &tiling_flags);
e7b07cee
HW
2620
2621 if (ret)
2622 return ret;
2623
2624 switch (fb->format->format) {
2625 case DRM_FORMAT_C8:
3be5262e 2626 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
e7b07cee
HW
2627 break;
2628 case DRM_FORMAT_RGB565:
3be5262e 2629 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
e7b07cee
HW
2630 break;
2631 case DRM_FORMAT_XRGB8888:
2632 case DRM_FORMAT_ARGB8888:
3be5262e 2633 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
e7b07cee
HW
2634 break;
2635 case DRM_FORMAT_XRGB2101010:
2636 case DRM_FORMAT_ARGB2101010:
3be5262e 2637 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
e7b07cee
HW
2638 break;
2639 case DRM_FORMAT_XBGR2101010:
2640 case DRM_FORMAT_ABGR2101010:
3be5262e 2641 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
e7b07cee 2642 break;
bcd47f60
MR
2643 case DRM_FORMAT_XBGR8888:
2644 case DRM_FORMAT_ABGR8888:
2645 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2646 break;
e7b07cee 2647 case DRM_FORMAT_NV21:
3be5262e 2648 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
e7b07cee
HW
2649 break;
2650 case DRM_FORMAT_NV12:
3be5262e 2651 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
e7b07cee
HW
2652 break;
2653 default:
2654 DRM_ERROR("Unsupported screen format %s\n",
1ecfc3da 2655 drm_get_format_name(fb->format->format, &format_name));
e7b07cee
HW
2656 return -EINVAL;
2657 }
2658
7df7e505 2659 memset(&plane_state->address, 0, sizeof(plane_state->address));
7df7e505 2660
3be5262e 2661 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3be5262e
HW
2662 plane_state->plane_size.grph.surface_size.x = 0;
2663 plane_state->plane_size.grph.surface_size.y = 0;
2664 plane_state->plane_size.grph.surface_size.width = fb->width;
2665 plane_state->plane_size.grph.surface_size.height = fb->height;
2666 plane_state->plane_size.grph.surface_pitch =
e7b07cee
HW
2667 fb->pitches[0] / fb->format->cpp[0];
2668 /* TODO: unhardcode */
3be5262e 2669 plane_state->color_space = COLOR_SPACE_SRGB;
e7b07cee
HW
2670
2671 } else {
3be5262e
HW
2672 plane_state->plane_size.video.luma_size.x = 0;
2673 plane_state->plane_size.video.luma_size.y = 0;
1791e54f 2674 plane_state->plane_size.video.luma_size.width = fb->width;
3be5262e 2675 plane_state->plane_size.video.luma_size.height = fb->height;
1791e54f
NK
2676 plane_state->plane_size.video.luma_pitch =
2677 fb->pitches[0] / fb->format->cpp[0];
e7b07cee 2678
3be5262e
HW
2679 plane_state->plane_size.video.chroma_size.x = 0;
2680 plane_state->plane_size.video.chroma_size.y = 0;
1791e54f
NK
2681 /* TODO: set these based on surface format */
2682 plane_state->plane_size.video.chroma_size.width = fb->width / 2;
2683 plane_state->plane_size.video.chroma_size.height = fb->height / 2;
2684
2685 plane_state->plane_size.video.chroma_pitch =
2686 fb->pitches[1] / fb->format->cpp[1];
e7b07cee
HW
2687
2688 /* TODO: unhardcode */
3be5262e 2689 plane_state->color_space = COLOR_SPACE_YCBCR709;
e7b07cee
HW
2690 }
2691
09e5665a
NK
2692 fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state,
2693 &plane_state->tiling_info,
2694 &plane_state->dcc,
2695 &plane_state->address,
2696 tiling_flags);
e7b07cee 2697
3be5262e
HW
2698 plane_state->visible = true;
2699 plane_state->scaling_quality.h_taps_c = 0;
2700 plane_state->scaling_quality.v_taps_c = 0;
e7b07cee 2701
3be5262e
HW
2702 /* is this needed? is plane_state zeroed at allocation? */
2703 plane_state->scaling_quality.h_taps = 0;
2704 plane_state->scaling_quality.v_taps = 0;
2705 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
e7b07cee
HW
2706
2707 return ret;
2708
2709}
2710
d74004b6
NK
2711static void
2712fill_blending_from_plane_state(struct drm_plane_state *plane_state,
2713 const struct dc_plane_state *dc_plane_state,
2714 bool *per_pixel_alpha, bool *global_alpha,
2715 int *global_alpha_value)
2716{
2717 *per_pixel_alpha = false;
2718 *global_alpha = false;
2719 *global_alpha_value = 0xff;
2720
2721 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2722 return;
2723
2724 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2725 static const uint32_t alpha_formats[] = {
2726 DRM_FORMAT_ARGB8888,
2727 DRM_FORMAT_RGBA8888,
2728 DRM_FORMAT_ABGR8888,
2729 };
2730 uint32_t format = plane_state->fb->format->format;
2731 unsigned int i;
2732
2733 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2734 if (format == alpha_formats[i]) {
2735 *per_pixel_alpha = true;
2736 break;
2737 }
2738 }
2739 }
2740
2741 if (plane_state->alpha < 0xffff) {
2742 *global_alpha = true;
2743 *global_alpha_value = plane_state->alpha >> 8;
2744 }
2745}
2746
3ee6b26b
AD
2747static int fill_plane_attributes(struct amdgpu_device *adev,
2748 struct dc_plane_state *dc_plane_state,
2749 struct drm_plane_state *plane_state,
9817d5f5 2750 struct drm_crtc_state *crtc_state)
e7b07cee
HW
2751{
2752 const struct amdgpu_framebuffer *amdgpu_fb =
2753 to_amdgpu_framebuffer(plane_state->fb);
2754 const struct drm_crtc *crtc = plane_state->crtc;
e7b07cee
HW
2755 int ret = 0;
2756
3be5262e 2757 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
e7b07cee
HW
2758 return -EINVAL;
2759
2760 ret = fill_plane_attributes_from_fb(
2761 crtc->dev->dev_private,
3be5262e 2762 dc_plane_state,
9817d5f5 2763 amdgpu_fb);
e7b07cee
HW
2764
2765 if (ret)
2766 return ret;
2767
e277adc5
LSL
2768 /*
2769 * Always set input transfer function, since plane state is refreshed
2770 * every time.
2771 */
2772 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
8c45c5db
LSL
2773 if (ret) {
2774 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2775 dc_plane_state->in_transfer_func = NULL;
2776 }
e7b07cee 2777
d74004b6
NK
2778 fill_blending_from_plane_state(plane_state, dc_plane_state,
2779 &dc_plane_state->per_pixel_alpha,
2780 &dc_plane_state->global_alpha,
2781 &dc_plane_state->global_alpha_value);
2782
e7b07cee
HW
2783 return ret;
2784}
2785
3ee6b26b
AD
2786static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2787 const struct dm_connector_state *dm_state,
2788 struct dc_stream_state *stream)
e7b07cee
HW
2789{
2790 enum amdgpu_rmx_type rmx_type;
2791
2792 struct rect src = { 0 }; /* viewport in composition space*/
2793 struct rect dst = { 0 }; /* stream addressable area */
2794
2795 /* no mode. nothing to be done */
2796 if (!mode)
2797 return;
2798
2799 /* Full screen scaling by default */
2800 src.width = mode->hdisplay;
2801 src.height = mode->vdisplay;
2802 dst.width = stream->timing.h_addressable;
2803 dst.height = stream->timing.v_addressable;
2804
f4791779
HW
2805 if (dm_state) {
2806 rmx_type = dm_state->scaling;
2807 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2808 if (src.width * dst.height <
2809 src.height * dst.width) {
2810 /* height needs less upscaling/more downscaling */
2811 dst.width = src.width *
2812 dst.height / src.height;
2813 } else {
2814 /* width needs less upscaling/more downscaling */
2815 dst.height = src.height *
2816 dst.width / src.width;
2817 }
2818 } else if (rmx_type == RMX_CENTER) {
2819 dst = src;
e7b07cee 2820 }
e7b07cee 2821
f4791779
HW
2822 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2823 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 2824
f4791779
HW
2825 if (dm_state->underscan_enable) {
2826 dst.x += dm_state->underscan_hborder / 2;
2827 dst.y += dm_state->underscan_vborder / 2;
2828 dst.width -= dm_state->underscan_hborder;
2829 dst.height -= dm_state->underscan_vborder;
2830 }
e7b07cee
HW
2831 }
2832
2833 stream->src = src;
2834 stream->dst = dst;
2835
f1ad2f5e 2836 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
2837 dst.x, dst.y, dst.width, dst.height);
2838
2839}
2840
3ee6b26b
AD
2841static enum dc_color_depth
2842convert_color_depth_from_display_info(const struct drm_connector *connector)
e7b07cee 2843{
07e3a1cf
NK
2844 struct dm_connector_state *dm_conn_state =
2845 to_dm_connector_state(connector->state);
e7b07cee
HW
2846 uint32_t bpc = connector->display_info.bpc;
2847
07e3a1cf
NK
2848 /* TODO: Remove this when there's support for max_bpc in drm */
2849 if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2850 /* Round down to nearest even number. */
2851 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2852
e7b07cee
HW
2853 switch (bpc) {
2854 case 0:
1f6010a9
DF
2855 /*
2856 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
2857 * EDID revision before 1.4
2858 * TODO: Fix edid parsing
2859 */
2860 return COLOR_DEPTH_888;
2861 case 6:
2862 return COLOR_DEPTH_666;
2863 case 8:
2864 return COLOR_DEPTH_888;
2865 case 10:
2866 return COLOR_DEPTH_101010;
2867 case 12:
2868 return COLOR_DEPTH_121212;
2869 case 14:
2870 return COLOR_DEPTH_141414;
2871 case 16:
2872 return COLOR_DEPTH_161616;
2873 default:
2874 return COLOR_DEPTH_UNDEFINED;
2875 }
2876}
2877
3ee6b26b
AD
2878static enum dc_aspect_ratio
2879get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 2880{
e11d4147
LSL
2881 /* 1-1 mapping, since both enums follow the HDMI spec. */
2882 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
2883}
2884
3ee6b26b
AD
2885static enum dc_color_space
2886get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
2887{
2888 enum dc_color_space color_space = COLOR_SPACE_SRGB;
2889
2890 switch (dc_crtc_timing->pixel_encoding) {
2891 case PIXEL_ENCODING_YCBCR422:
2892 case PIXEL_ENCODING_YCBCR444:
2893 case PIXEL_ENCODING_YCBCR420:
2894 {
2895 /*
2896 * 27030khz is the separation point between HDTV and SDTV
2897 * according to HDMI spec, we use YCbCr709 and YCbCr601
2898 * respectively
2899 */
380604e2 2900 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
2901 if (dc_crtc_timing->flags.Y_ONLY)
2902 color_space =
2903 COLOR_SPACE_YCBCR709_LIMITED;
2904 else
2905 color_space = COLOR_SPACE_YCBCR709;
2906 } else {
2907 if (dc_crtc_timing->flags.Y_ONLY)
2908 color_space =
2909 COLOR_SPACE_YCBCR601_LIMITED;
2910 else
2911 color_space = COLOR_SPACE_YCBCR601;
2912 }
2913
2914 }
2915 break;
2916 case PIXEL_ENCODING_RGB:
2917 color_space = COLOR_SPACE_SRGB;
2918 break;
2919
2920 default:
2921 WARN_ON(1);
2922 break;
2923 }
2924
2925 return color_space;
2926}
2927
400443e8
ML
2928static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2929{
2930 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2931 return;
2932
2933 timing_out->display_color_depth--;
2934}
2935
2936static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2937 const struct drm_display_info *info)
2938{
2939 int normalized_clk;
2940 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2941 return;
2942 do {
380604e2 2943 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
2944 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2945 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2946 normalized_clk /= 2;
2947 /* Adjusting pix clock following on HDMI spec based on colour depth */
2948 switch (timing_out->display_color_depth) {
2949 case COLOR_DEPTH_101010:
2950 normalized_clk = (normalized_clk * 30) / 24;
2951 break;
2952 case COLOR_DEPTH_121212:
2953 normalized_clk = (normalized_clk * 36) / 24;
2954 break;
2955 case COLOR_DEPTH_161616:
2956 normalized_clk = (normalized_clk * 48) / 24;
2957 break;
2958 default:
2959 return;
2960 }
2961 if (normalized_clk <= info->max_tmds_clock)
2962 return;
2963 reduce_mode_colour_depth(timing_out);
2964
2965 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2966
2967}
e7b07cee 2968
3ee6b26b
AD
2969static void
2970fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2971 const struct drm_display_mode *mode_in,
b333730d
BL
2972 const struct drm_connector *connector,
2973 const struct dc_stream_state *old_stream)
e7b07cee
HW
2974{
2975 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 2976 const struct drm_display_info *info = &connector->display_info;
b830ebc9 2977
e7b07cee
HW
2978 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2979
2980 timing_out->h_border_left = 0;
2981 timing_out->h_border_right = 0;
2982 timing_out->v_border_top = 0;
2983 timing_out->v_border_bottom = 0;
2984 /* TODO: un-hardcode */
fe61a2f1 2985 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 2986 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1
ML
2987 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2988 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 2989 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
2990 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2991 else
2992 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2993
2994 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2995 timing_out->display_color_depth = convert_color_depth_from_display_info(
2996 connector);
2997 timing_out->scan_type = SCANNING_TYPE_NODATA;
2998 timing_out->hdmi_vic = 0;
b333730d
BL
2999
3000 if(old_stream) {
3001 timing_out->vic = old_stream->timing.vic;
3002 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3003 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3004 } else {
3005 timing_out->vic = drm_match_cea_mode(mode_in);
3006 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3007 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3008 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3009 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3010 }
e7b07cee
HW
3011
3012 timing_out->h_addressable = mode_in->crtc_hdisplay;
3013 timing_out->h_total = mode_in->crtc_htotal;
3014 timing_out->h_sync_width =
3015 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3016 timing_out->h_front_porch =
3017 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3018 timing_out->v_total = mode_in->crtc_vtotal;
3019 timing_out->v_addressable = mode_in->crtc_vdisplay;
3020 timing_out->v_front_porch =
3021 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3022 timing_out->v_sync_width =
3023 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 3024 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 3025 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
3026
3027 stream->output_color_space = get_output_color_space(timing_out);
3028
e43a432c
AK
3029 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3030 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ceb3dbb4 3031 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
400443e8 3032 adjust_colour_depth_from_display_info(timing_out, info);
e7b07cee
HW
3033}
3034
3ee6b26b
AD
3035static void fill_audio_info(struct audio_info *audio_info,
3036 const struct drm_connector *drm_connector,
3037 const struct dc_sink *dc_sink)
e7b07cee
HW
3038{
3039 int i = 0;
3040 int cea_revision = 0;
3041 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3042
3043 audio_info->manufacture_id = edid_caps->manufacturer_id;
3044 audio_info->product_id = edid_caps->product_id;
3045
3046 cea_revision = drm_connector->display_info.cea_rev;
3047
090afc1e 3048 strscpy(audio_info->display_name,
d2b2562c 3049 edid_caps->display_name,
090afc1e 3050 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 3051
b830ebc9 3052 if (cea_revision >= 3) {
e7b07cee
HW
3053 audio_info->mode_count = edid_caps->audio_mode_count;
3054
3055 for (i = 0; i < audio_info->mode_count; ++i) {
3056 audio_info->modes[i].format_code =
3057 (enum audio_format_code)
3058 (edid_caps->audio_modes[i].format_code);
3059 audio_info->modes[i].channel_count =
3060 edid_caps->audio_modes[i].channel_count;
3061 audio_info->modes[i].sample_rates.all =
3062 edid_caps->audio_modes[i].sample_rate;
3063 audio_info->modes[i].sample_size =
3064 edid_caps->audio_modes[i].sample_size;
3065 }
3066 }
3067
3068 audio_info->flags.all = edid_caps->speaker_flags;
3069
3070 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 3071 if (drm_connector->latency_present[0]) {
e7b07cee
HW
3072 audio_info->video_latency = drm_connector->video_latency[0];
3073 audio_info->audio_latency = drm_connector->audio_latency[0];
3074 }
3075
3076 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3077
3078}
3079
3ee6b26b
AD
3080static void
3081copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3082 struct drm_display_mode *dst_mode)
e7b07cee
HW
3083{
3084 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3085 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3086 dst_mode->crtc_clock = src_mode->crtc_clock;
3087 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3088 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 3089 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
3090 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3091 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3092 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3093 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3094 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3095 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3096 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3097 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3098}
3099
3ee6b26b
AD
3100static void
3101decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3102 const struct drm_display_mode *native_mode,
3103 bool scale_enabled)
e7b07cee
HW
3104{
3105 if (scale_enabled) {
3106 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3107 } else if (native_mode->clock == drm_mode->clock &&
3108 native_mode->htotal == drm_mode->htotal &&
3109 native_mode->vtotal == drm_mode->vtotal) {
3110 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3111 } else {
3112 /* no scaling nor amdgpu inserted, no need to patch */
3113 }
3114}
3115
aed15309
ML
3116static struct dc_sink *
3117create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 3118{
2e0ac3d6 3119 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 3120 struct dc_sink *sink = NULL;
2e0ac3d6
HW
3121 sink_init_data.link = aconnector->dc_link;
3122 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3123
3124 sink = dc_sink_create(&sink_init_data);
423788c7 3125 if (!sink) {
2e0ac3d6 3126 DRM_ERROR("Failed to create sink!\n");
aed15309 3127 return NULL;
423788c7 3128 }
2e0ac3d6 3129 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 3130
aed15309 3131 return sink;
2e0ac3d6
HW
3132}
3133
fa2123db
ML
3134static void set_multisync_trigger_params(
3135 struct dc_stream_state *stream)
3136{
3137 if (stream->triggered_crtc_reset.enabled) {
3138 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3139 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3140 }
3141}
3142
3143static void set_master_stream(struct dc_stream_state *stream_set[],
3144 int stream_count)
3145{
3146 int j, highest_rfr = 0, master_stream = 0;
3147
3148 for (j = 0; j < stream_count; j++) {
3149 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3150 int refresh_rate = 0;
3151
380604e2 3152 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
3153 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3154 if (refresh_rate > highest_rfr) {
3155 highest_rfr = refresh_rate;
3156 master_stream = j;
3157 }
3158 }
3159 }
3160 for (j = 0; j < stream_count; j++) {
03736f4c 3161 if (stream_set[j])
fa2123db
ML
3162 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3163 }
3164}
3165
3166static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3167{
3168 int i = 0;
3169
3170 if (context->stream_count < 2)
3171 return;
3172 for (i = 0; i < context->stream_count ; i++) {
3173 if (!context->streams[i])
3174 continue;
1f6010a9
DF
3175 /*
3176 * TODO: add a function to read AMD VSDB bits and set
fa2123db 3177 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 3178 * For now it's set to false
fa2123db
ML
3179 */
3180 set_multisync_trigger_params(context->streams[i]);
3181 }
3182 set_master_stream(context->streams, context->stream_count);
3183}
3184
3ee6b26b
AD
3185static struct dc_stream_state *
3186create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3187 const struct drm_display_mode *drm_mode,
b333730d
BL
3188 const struct dm_connector_state *dm_state,
3189 const struct dc_stream_state *old_stream)
e7b07cee
HW
3190{
3191 struct drm_display_mode *preferred_mode = NULL;
391ef035 3192 struct drm_connector *drm_connector;
0971c40e 3193 struct dc_stream_state *stream = NULL;
e7b07cee
HW
3194 struct drm_display_mode mode = *drm_mode;
3195 bool native_mode_found = false;
b333730d
BL
3196 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3197 int mode_refresh;
58124bf8 3198 int preferred_refresh = 0;
b333730d 3199
aed15309 3200 struct dc_sink *sink = NULL;
b830ebc9 3201 if (aconnector == NULL) {
e7b07cee 3202 DRM_ERROR("aconnector is NULL!\n");
64245fa7 3203 return stream;
e7b07cee
HW
3204 }
3205
e7b07cee 3206 drm_connector = &aconnector->base;
2e0ac3d6 3207
f4ac176e 3208 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
3209 sink = create_fake_sink(aconnector);
3210 if (!sink)
3211 return stream;
aed15309
ML
3212 } else {
3213 sink = aconnector->dc_sink;
dcd5fb82 3214 dc_sink_retain(sink);
f4ac176e 3215 }
2e0ac3d6 3216
aed15309 3217 stream = dc_create_stream_for_sink(sink);
4562236b 3218
b830ebc9 3219 if (stream == NULL) {
e7b07cee 3220 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 3221 goto finish;
e7b07cee
HW
3222 }
3223
ceb3dbb4
JL
3224 stream->dm_stream_context = aconnector;
3225
e7b07cee
HW
3226 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3227 /* Search for preferred mode */
3228 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3229 native_mode_found = true;
3230 break;
3231 }
3232 }
3233 if (!native_mode_found)
3234 preferred_mode = list_first_entry_or_null(
3235 &aconnector->base.modes,
3236 struct drm_display_mode,
3237 head);
3238
b333730d
BL
3239 mode_refresh = drm_mode_vrefresh(&mode);
3240
b830ebc9 3241 if (preferred_mode == NULL) {
1f6010a9
DF
3242 /*
3243 * This may not be an error, the use case is when we have no
e7b07cee
HW
3244 * usermode calls to reset and set mode upon hotplug. In this
3245 * case, we call set mode ourselves to restore the previous mode
3246 * and the modelist may not be filled in in time.
3247 */
f1ad2f5e 3248 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
3249 } else {
3250 decide_crtc_timing_for_drm_display_mode(
3251 &mode, preferred_mode,
f4791779 3252 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 3253 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
3254 }
3255
f783577c
JFZ
3256 if (!dm_state)
3257 drm_mode_set_crtcinfo(&mode, 0);
3258
b333730d
BL
3259 /*
3260 * If scaling is enabled and refresh rate didn't change
3261 * we copy the vic and polarities of the old timings
3262 */
3263 if (!scale || mode_refresh != preferred_refresh)
3264 fill_stream_properties_from_drm_display_mode(stream,
3265 &mode, &aconnector->base, NULL);
3266 else
3267 fill_stream_properties_from_drm_display_mode(stream,
3268 &mode, &aconnector->base, old_stream);
3269
e7b07cee
HW
3270 update_stream_scaling_settings(&mode, dm_state, stream);
3271
3272 fill_audio_info(
3273 &stream->audio_info,
3274 drm_connector,
aed15309 3275 sink);
e7b07cee 3276
ceb3dbb4 3277 update_stream_signal(stream, sink);
9182b4cb 3278
aed15309 3279finish:
dcd5fb82 3280 dc_sink_release(sink);
9e3efe3e 3281
e7b07cee
HW
3282 return stream;
3283}
3284
7578ecda 3285static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
3286{
3287 drm_crtc_cleanup(crtc);
3288 kfree(crtc);
3289}
3290
3291static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 3292 struct drm_crtc_state *state)
e7b07cee
HW
3293{
3294 struct dm_crtc_state *cur = to_dm_crtc_state(state);
3295
3296 /* TODO Destroy dc_stream objects are stream object is flattened */
3297 if (cur->stream)
3298 dc_stream_release(cur->stream);
3299
3300
3301 __drm_atomic_helper_crtc_destroy_state(state);
3302
3303
3304 kfree(state);
3305}
3306
3307static void dm_crtc_reset_state(struct drm_crtc *crtc)
3308{
3309 struct dm_crtc_state *state;
3310
3311 if (crtc->state)
3312 dm_crtc_destroy_state(crtc, crtc->state);
3313
3314 state = kzalloc(sizeof(*state), GFP_KERNEL);
3315 if (WARN_ON(!state))
3316 return;
3317
3318 crtc->state = &state->base;
3319 crtc->state->crtc = crtc;
3320
3321}
3322
3323static struct drm_crtc_state *
3324dm_crtc_duplicate_state(struct drm_crtc *crtc)
3325{
3326 struct dm_crtc_state *state, *cur;
3327
3328 cur = to_dm_crtc_state(crtc->state);
3329
3330 if (WARN_ON(!crtc->state))
3331 return NULL;
3332
2004f45e 3333 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
3334 if (!state)
3335 return NULL;
e7b07cee
HW
3336
3337 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3338
3339 if (cur->stream) {
3340 state->stream = cur->stream;
3341 dc_stream_retain(state->stream);
3342 }
3343
180db303 3344 state->vrr_params = cur->vrr_params;
98e6436d 3345 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 3346 state->abm_level = cur->abm_level;
bb47de73
NK
3347 state->vrr_supported = cur->vrr_supported;
3348 state->freesync_config = cur->freesync_config;
ed20dc0d 3349 state->crc_enabled = cur->crc_enabled;
98e6436d 3350
e7b07cee
HW
3351 /* TODO Duplicate dc_stream after objects are stream object is flattened */
3352
3353 return &state->base;
3354}
3355
d2574c33
MK
3356static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3357{
3358 enum dc_irq_source irq_source;
3359 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3360 struct amdgpu_device *adev = crtc->dev->dev_private;
3361 int rc;
3362
3363 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3364
3365 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3366
3367 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3368 acrtc->crtc_id, enable ? "en" : "dis", rc);
3369 return rc;
3370}
589d2739
HW
3371
3372static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3373{
3374 enum dc_irq_source irq_source;
3375 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3376 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
3377 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3378 int rc = 0;
3379
3380 if (enable) {
3381 /* vblank irq on -> Only need vupdate irq in vrr mode */
3382 if (amdgpu_dm_vrr_active(acrtc_state))
3383 rc = dm_set_vupdate_irq(crtc, true);
3384 } else {
3385 /* vblank irq off -> vupdate irq off */
3386 rc = dm_set_vupdate_irq(crtc, false);
3387 }
3388
3389 if (rc)
3390 return rc;
589d2739
HW
3391
3392 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 3393 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
3394}
3395
3396static int dm_enable_vblank(struct drm_crtc *crtc)
3397{
3398 return dm_set_vblank(crtc, true);
3399}
3400
3401static void dm_disable_vblank(struct drm_crtc *crtc)
3402{
3403 dm_set_vblank(crtc, false);
3404}
3405
e7b07cee
HW
3406/* Implemented only the options currently availible for the driver */
3407static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3408 .reset = dm_crtc_reset_state,
3409 .destroy = amdgpu_dm_crtc_destroy,
3410 .gamma_set = drm_atomic_helper_legacy_gamma_set,
3411 .set_config = drm_atomic_helper_set_config,
3412 .page_flip = drm_atomic_helper_page_flip,
3413 .atomic_duplicate_state = dm_crtc_duplicate_state,
3414 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 3415 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 3416 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
589d2739
HW
3417 .enable_vblank = dm_enable_vblank,
3418 .disable_vblank = dm_disable_vblank,
e7b07cee
HW
3419};
3420
3421static enum drm_connector_status
3422amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3423{
3424 bool connected;
c84dec2f 3425 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 3426
1f6010a9
DF
3427 /*
3428 * Notes:
e7b07cee
HW
3429 * 1. This interface is NOT called in context of HPD irq.
3430 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
3431 * makes it a bad place for *any* MST-related activity.
3432 */
e7b07cee 3433
8580d60b
HW
3434 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3435 !aconnector->fake_enable)
e7b07cee
HW
3436 connected = (aconnector->dc_sink != NULL);
3437 else
3438 connected = (aconnector->base.force == DRM_FORCE_ON);
3439
3440 return (connected ? connector_status_connected :
3441 connector_status_disconnected);
3442}
3443
3ee6b26b
AD
3444int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3445 struct drm_connector_state *connector_state,
3446 struct drm_property *property,
3447 uint64_t val)
e7b07cee
HW
3448{
3449 struct drm_device *dev = connector->dev;
3450 struct amdgpu_device *adev = dev->dev_private;
3451 struct dm_connector_state *dm_old_state =
3452 to_dm_connector_state(connector->state);
3453 struct dm_connector_state *dm_new_state =
3454 to_dm_connector_state(connector_state);
3455
3456 int ret = -EINVAL;
3457
3458 if (property == dev->mode_config.scaling_mode_property) {
3459 enum amdgpu_rmx_type rmx_type;
3460
3461 switch (val) {
3462 case DRM_MODE_SCALE_CENTER:
3463 rmx_type = RMX_CENTER;
3464 break;
3465 case DRM_MODE_SCALE_ASPECT:
3466 rmx_type = RMX_ASPECT;
3467 break;
3468 case DRM_MODE_SCALE_FULLSCREEN:
3469 rmx_type = RMX_FULL;
3470 break;
3471 case DRM_MODE_SCALE_NONE:
3472 default:
3473 rmx_type = RMX_OFF;
3474 break;
3475 }
3476
3477 if (dm_old_state->scaling == rmx_type)
3478 return 0;
3479
3480 dm_new_state->scaling = rmx_type;
3481 ret = 0;
3482 } else if (property == adev->mode_info.underscan_hborder_property) {
3483 dm_new_state->underscan_hborder = val;
3484 ret = 0;
3485 } else if (property == adev->mode_info.underscan_vborder_property) {
3486 dm_new_state->underscan_vborder = val;
3487 ret = 0;
3488 } else if (property == adev->mode_info.underscan_property) {
3489 dm_new_state->underscan_enable = val;
3490 ret = 0;
07e3a1cf
NK
3491 } else if (property == adev->mode_info.max_bpc_property) {
3492 dm_new_state->max_bpc = val;
3493 ret = 0;
c1ee92f9
DF
3494 } else if (property == adev->mode_info.abm_level_property) {
3495 dm_new_state->abm_level = val;
3496 ret = 0;
e7b07cee
HW
3497 }
3498
3499 return ret;
3500}
3501
3ee6b26b
AD
3502int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3503 const struct drm_connector_state *state,
3504 struct drm_property *property,
3505 uint64_t *val)
e7b07cee
HW
3506{
3507 struct drm_device *dev = connector->dev;
3508 struct amdgpu_device *adev = dev->dev_private;
3509 struct dm_connector_state *dm_state =
3510 to_dm_connector_state(state);
3511 int ret = -EINVAL;
3512
3513 if (property == dev->mode_config.scaling_mode_property) {
3514 switch (dm_state->scaling) {
3515 case RMX_CENTER:
3516 *val = DRM_MODE_SCALE_CENTER;
3517 break;
3518 case RMX_ASPECT:
3519 *val = DRM_MODE_SCALE_ASPECT;
3520 break;
3521 case RMX_FULL:
3522 *val = DRM_MODE_SCALE_FULLSCREEN;
3523 break;
3524 case RMX_OFF:
3525 default:
3526 *val = DRM_MODE_SCALE_NONE;
3527 break;
3528 }
3529 ret = 0;
3530 } else if (property == adev->mode_info.underscan_hborder_property) {
3531 *val = dm_state->underscan_hborder;
3532 ret = 0;
3533 } else if (property == adev->mode_info.underscan_vborder_property) {
3534 *val = dm_state->underscan_vborder;
3535 ret = 0;
3536 } else if (property == adev->mode_info.underscan_property) {
3537 *val = dm_state->underscan_enable;
3538 ret = 0;
07e3a1cf
NK
3539 } else if (property == adev->mode_info.max_bpc_property) {
3540 *val = dm_state->max_bpc;
3541 ret = 0;
c1ee92f9
DF
3542 } else if (property == adev->mode_info.abm_level_property) {
3543 *val = dm_state->abm_level;
3544 ret = 0;
e7b07cee 3545 }
c1ee92f9 3546
e7b07cee
HW
3547 return ret;
3548}
3549
7578ecda 3550static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 3551{
c84dec2f 3552 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
3553 const struct dc_link *link = aconnector->dc_link;
3554 struct amdgpu_device *adev = connector->dev->dev_private;
3555 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 3556
e7b07cee
HW
3557#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3558 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3559
89fc8d4e 3560 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
3561 link->type != dc_connection_none &&
3562 dm->backlight_dev) {
3563 backlight_device_unregister(dm->backlight_dev);
3564 dm->backlight_dev = NULL;
e7b07cee
HW
3565 }
3566#endif
dcd5fb82
MF
3567
3568 if (aconnector->dc_em_sink)
3569 dc_sink_release(aconnector->dc_em_sink);
3570 aconnector->dc_em_sink = NULL;
3571 if (aconnector->dc_sink)
3572 dc_sink_release(aconnector->dc_sink);
3573 aconnector->dc_sink = NULL;
3574
e86e8947 3575 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
3576 drm_connector_unregister(connector);
3577 drm_connector_cleanup(connector);
3578 kfree(connector);
3579}
3580
3581void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3582{
3583 struct dm_connector_state *state =
3584 to_dm_connector_state(connector->state);
3585
df099b9b
LSL
3586 if (connector->state)
3587 __drm_atomic_helper_connector_destroy_state(connector->state);
3588
e7b07cee
HW
3589 kfree(state);
3590
3591 state = kzalloc(sizeof(*state), GFP_KERNEL);
3592
3593 if (state) {
3594 state->scaling = RMX_OFF;
3595 state->underscan_enable = false;
3596 state->underscan_hborder = 0;
3597 state->underscan_vborder = 0;
49f1c44b 3598 state->max_bpc = 8;
e7b07cee 3599
df099b9b 3600 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
3601 }
3602}
3603
3ee6b26b
AD
3604struct drm_connector_state *
3605amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
3606{
3607 struct dm_connector_state *state =
3608 to_dm_connector_state(connector->state);
3609
3610 struct dm_connector_state *new_state =
3611 kmemdup(state, sizeof(*state), GFP_KERNEL);
3612
98e6436d
AK
3613 if (!new_state)
3614 return NULL;
e7b07cee 3615
98e6436d
AK
3616 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3617
3618 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 3619 new_state->abm_level = state->abm_level;
922454c2
NK
3620 new_state->scaling = state->scaling;
3621 new_state->underscan_enable = state->underscan_enable;
3622 new_state->underscan_hborder = state->underscan_hborder;
3623 new_state->underscan_vborder = state->underscan_vborder;
49f1c44b 3624 new_state->max_bpc = state->max_bpc;
98e6436d
AK
3625
3626 return &new_state->base;
e7b07cee
HW
3627}
3628
3629static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3630 .reset = amdgpu_dm_connector_funcs_reset,
3631 .detect = amdgpu_dm_connector_detect,
3632 .fill_modes = drm_helper_probe_single_connector_modes,
3633 .destroy = amdgpu_dm_connector_destroy,
3634 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3635 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3636 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3637 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
3638};
3639
e7b07cee
HW
3640static int get_modes(struct drm_connector *connector)
3641{
3642 return amdgpu_dm_connector_get_modes(connector);
3643}
3644
c84dec2f 3645static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
3646{
3647 struct dc_sink_init_data init_params = {
3648 .link = aconnector->dc_link,
3649 .sink_signal = SIGNAL_TYPE_VIRTUAL
3650 };
70e8ffc5 3651 struct edid *edid;
e7b07cee 3652
a89ff457 3653 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
3654 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3655 aconnector->base.name);
3656
3657 aconnector->base.force = DRM_FORCE_OFF;
3658 aconnector->base.override_edid = false;
3659 return;
3660 }
3661
70e8ffc5
HW
3662 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3663
e7b07cee
HW
3664 aconnector->edid = edid;
3665
3666 aconnector->dc_em_sink = dc_link_add_remote_sink(
3667 aconnector->dc_link,
3668 (uint8_t *)edid,
3669 (edid->extensions + 1) * EDID_LENGTH,
3670 &init_params);
3671
dcd5fb82 3672 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
3673 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3674 aconnector->dc_link->local_sink :
3675 aconnector->dc_em_sink;
dcd5fb82
MF
3676 dc_sink_retain(aconnector->dc_sink);
3677 }
e7b07cee
HW
3678}
3679
c84dec2f 3680static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
3681{
3682 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3683
1f6010a9
DF
3684 /*
3685 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
3686 * Those settings have to be != 0 to get initial modeset
3687 */
3688 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3689 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3690 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3691 }
3692
3693
3694 aconnector->base.override_edid = true;
3695 create_eml_sink(aconnector);
3696}
3697
ba9ca088 3698enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 3699 struct drm_display_mode *mode)
e7b07cee
HW
3700{
3701 int result = MODE_ERROR;
3702 struct dc_sink *dc_sink;
3703 struct amdgpu_device *adev = connector->dev->dev_private;
3704 /* TODO: Unhardcode stream count */
0971c40e 3705 struct dc_stream_state *stream;
c84dec2f 3706 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 3707 enum dc_status dc_result = DC_OK;
e7b07cee
HW
3708
3709 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3710 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3711 return result;
3712
1f6010a9
DF
3713 /*
3714 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
3715 * EDID mgmt
3716 */
3717 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3718 !aconnector->dc_em_sink)
3719 handle_edid_mgmt(aconnector);
3720
c84dec2f 3721 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 3722
b830ebc9 3723 if (dc_sink == NULL) {
e7b07cee
HW
3724 DRM_ERROR("dc_sink is NULL!\n");
3725 goto fail;
3726 }
3727
b333730d 3728 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 3729 if (stream == NULL) {
e7b07cee
HW
3730 DRM_ERROR("Failed to create stream for sink!\n");
3731 goto fail;
3732 }
3733
a39438f0
HW
3734 dc_result = dc_validate_stream(adev->dm.dc, stream);
3735
3736 if (dc_result == DC_OK)
e7b07cee 3737 result = MODE_OK;
a39438f0 3738 else
9f921b14 3739 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0
HW
3740 mode->vdisplay,
3741 mode->hdisplay,
9f921b14
HW
3742 mode->clock,
3743 dc_result);
e7b07cee
HW
3744
3745 dc_stream_release(stream);
3746
3747fail:
3748 /* TODO: error handling*/
3749 return result;
3750}
3751
3752static const struct drm_connector_helper_funcs
3753amdgpu_dm_connector_helper_funcs = {
3754 /*
1f6010a9 3755 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 3756 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 3757 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
3758 * in get_modes call back, not just return the modes count
3759 */
e7b07cee
HW
3760 .get_modes = get_modes,
3761 .mode_valid = amdgpu_dm_connector_mode_valid,
e7b07cee
HW
3762};
3763
3764static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3765{
3766}
3767
3ee6b26b
AD
3768static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3769 struct drm_crtc_state *state)
e7b07cee
HW
3770{
3771 struct amdgpu_device *adev = crtc->dev->dev_private;
3772 struct dc *dc = adev->dm.dc;
3773 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3774 int ret = -EINVAL;
3775
9b690ef3
BL
3776 if (unlikely(!dm_crtc_state->stream &&
3777 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
3778 WARN_ON(1);
3779 return ret;
3780 }
3781
1f6010a9 3782 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
3783 if (!dm_crtc_state->stream)
3784 return 0;
3785
62c933f9 3786 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
3787 return 0;
3788
3789 return ret;
3790}
3791
3ee6b26b
AD
3792static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3793 const struct drm_display_mode *mode,
3794 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
3795{
3796 return true;
3797}
3798
3799static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3800 .disable = dm_crtc_helper_disable,
3801 .atomic_check = dm_crtc_helper_atomic_check,
3802 .mode_fixup = dm_crtc_helper_mode_fixup
3803};
3804
3805static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3806{
3807
3808}
3809
3ee6b26b
AD
3810static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3811 struct drm_crtc_state *crtc_state,
3812 struct drm_connector_state *conn_state)
e7b07cee
HW
3813{
3814 return 0;
3815}
3816
3817const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3818 .disable = dm_encoder_helper_disable,
3819 .atomic_check = dm_encoder_helper_atomic_check
3820};
3821
3822static void dm_drm_plane_reset(struct drm_plane *plane)
3823{
3824 struct dm_plane_state *amdgpu_state = NULL;
3825
3826 if (plane->state)
3827 plane->funcs->atomic_destroy_state(plane, plane->state);
3828
3829 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 3830 WARN_ON(amdgpu_state == NULL);
1f6010a9 3831
7ddaef96
NK
3832 if (amdgpu_state)
3833 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
3834}
3835
3836static struct drm_plane_state *
3837dm_drm_plane_duplicate_state(struct drm_plane *plane)
3838{
3839 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3840
3841 old_dm_plane_state = to_dm_plane_state(plane->state);
3842 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3843 if (!dm_plane_state)
3844 return NULL;
3845
3846 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3847
3be5262e
HW
3848 if (old_dm_plane_state->dc_state) {
3849 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3850 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
3851 }
3852
3853 return &dm_plane_state->base;
3854}
3855
3856void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 3857 struct drm_plane_state *state)
e7b07cee
HW
3858{
3859 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3860
3be5262e
HW
3861 if (dm_plane_state->dc_state)
3862 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 3863
0627bbd3 3864 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
3865}
3866
3867static const struct drm_plane_funcs dm_plane_funcs = {
3868 .update_plane = drm_atomic_helper_update_plane,
3869 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 3870 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
3871 .reset = dm_drm_plane_reset,
3872 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3873 .atomic_destroy_state = dm_drm_plane_destroy_state,
3874};
3875
3ee6b26b
AD
3876static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3877 struct drm_plane_state *new_state)
e7b07cee
HW
3878{
3879 struct amdgpu_framebuffer *afb;
3880 struct drm_gem_object *obj;
5d43be0c 3881 struct amdgpu_device *adev;
e7b07cee 3882 struct amdgpu_bo *rbo;
e7b07cee 3883 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
e0634e8d 3884 uint64_t tiling_flags;
5d43be0c
CK
3885 uint32_t domain;
3886 int r;
e7b07cee
HW
3887
3888 dm_plane_state_old = to_dm_plane_state(plane->state);
3889 dm_plane_state_new = to_dm_plane_state(new_state);
3890
3891 if (!new_state->fb) {
f1ad2f5e 3892 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
3893 return 0;
3894 }
3895
3896 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 3897 obj = new_state->fb->obj[0];
e7b07cee 3898 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 3899 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
e7b07cee
HW
3900 r = amdgpu_bo_reserve(rbo, false);
3901 if (unlikely(r != 0))
3902 return r;
3903
5d43be0c 3904 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1d2361e5 3905 domain = amdgpu_display_supported_domains(adev);
5d43be0c
CK
3906 else
3907 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 3908
7b7c6c81 3909 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 3910 if (unlikely(r != 0)) {
30b7c614
HW
3911 if (r != -ERESTARTSYS)
3912 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
bb812f1e 3913 amdgpu_bo_unreserve(rbo);
e7b07cee
HW
3914 return r;
3915 }
3916
bb812f1e
JZ
3917 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3918 if (unlikely(r != 0)) {
3919 amdgpu_bo_unpin(rbo);
3920 amdgpu_bo_unreserve(rbo);
3921 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
3922 return r;
3923 }
7df7e505
NK
3924
3925 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
3926
bb812f1e
JZ
3927 amdgpu_bo_unreserve(rbo);
3928
7b7c6c81 3929 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
3930
3931 amdgpu_bo_ref(rbo);
3932
3be5262e
HW
3933 if (dm_plane_state_new->dc_state &&
3934 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3935 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 3936
e0634e8d
NK
3937 fill_plane_tiling_attributes(
3938 adev, afb, plane_state, &plane_state->tiling_info,
3939 &plane_state->dcc, &plane_state->address, tiling_flags);
e7b07cee
HW
3940 }
3941
e7b07cee
HW
3942 return 0;
3943}
3944
3ee6b26b
AD
3945static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3946 struct drm_plane_state *old_state)
e7b07cee
HW
3947{
3948 struct amdgpu_bo *rbo;
e7b07cee
HW
3949 int r;
3950
3951 if (!old_state->fb)
3952 return;
3953
e68d14dd 3954 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
3955 r = amdgpu_bo_reserve(rbo, false);
3956 if (unlikely(r)) {
3957 DRM_ERROR("failed to reserve rbo before unpin\n");
3958 return;
b830ebc9
HW
3959 }
3960
3961 amdgpu_bo_unpin(rbo);
3962 amdgpu_bo_unreserve(rbo);
3963 amdgpu_bo_unref(&rbo);
e7b07cee
HW
3964}
3965
7578ecda
AD
3966static int dm_plane_atomic_check(struct drm_plane *plane,
3967 struct drm_plane_state *state)
cbd19488
AG
3968{
3969 struct amdgpu_device *adev = plane->dev->dev_private;
3970 struct dc *dc = adev->dm.dc;
3971 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3972
3be5262e 3973 if (!dm_plane_state->dc_state)
9a3329b1 3974 return 0;
cbd19488 3975
a05bcff1
S
3976 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3977 return -EINVAL;
3978
62c933f9 3979 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
3980 return 0;
3981
3982 return -EINVAL;
3983}
3984
674e78ac
NK
3985static int dm_plane_atomic_async_check(struct drm_plane *plane,
3986 struct drm_plane_state *new_plane_state)
3987{
77acd1cd
NK
3988 struct drm_plane_state *old_plane_state =
3989 drm_atomic_get_old_plane_state(new_plane_state->state, plane);
3990
674e78ac
NK
3991 /* Only support async updates on cursor planes. */
3992 if (plane->type != DRM_PLANE_TYPE_CURSOR)
3993 return -EINVAL;
3994
77acd1cd
NK
3995 /*
3996 * DRM calls prepare_fb and cleanup_fb on new_plane_state for
3997 * async commits so don't allow fb changes.
3998 */
3999 if (old_plane_state->fb != new_plane_state->fb)
4000 return -EINVAL;
4001
674e78ac
NK
4002 return 0;
4003}
4004
4005static void dm_plane_atomic_async_update(struct drm_plane *plane,
4006 struct drm_plane_state *new_state)
4007{
4008 struct drm_plane_state *old_state =
4009 drm_atomic_get_old_plane_state(new_state->state, plane);
4010
4011 if (plane->state->fb != new_state->fb)
4012 drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
4013
4014 plane->state->src_x = new_state->src_x;
4015 plane->state->src_y = new_state->src_y;
4016 plane->state->src_w = new_state->src_w;
4017 plane->state->src_h = new_state->src_h;
4018 plane->state->crtc_x = new_state->crtc_x;
4019 plane->state->crtc_y = new_state->crtc_y;
4020 plane->state->crtc_w = new_state->crtc_w;
4021 plane->state->crtc_h = new_state->crtc_h;
4022
4023 handle_cursor_update(plane, old_state);
4024}
4025
e7b07cee
HW
4026static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4027 .prepare_fb = dm_plane_helper_prepare_fb,
4028 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 4029 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
4030 .atomic_async_check = dm_plane_atomic_async_check,
4031 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
4032};
4033
4034/*
4035 * TODO: these are currently initialized to rgb formats only.
4036 * For future use cases we should either initialize them dynamically based on
4037 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 4038 * check will succeed, and let DC implement proper check
e7b07cee 4039 */
d90371b0 4040static const uint32_t rgb_formats[] = {
e7b07cee
HW
4041 DRM_FORMAT_XRGB8888,
4042 DRM_FORMAT_ARGB8888,
4043 DRM_FORMAT_RGBA8888,
4044 DRM_FORMAT_XRGB2101010,
4045 DRM_FORMAT_XBGR2101010,
4046 DRM_FORMAT_ARGB2101010,
4047 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
4048 DRM_FORMAT_XBGR8888,
4049 DRM_FORMAT_ABGR8888,
e7b07cee
HW
4050};
4051
0d579c7e
NK
4052static const uint32_t overlay_formats[] = {
4053 DRM_FORMAT_XRGB8888,
4054 DRM_FORMAT_ARGB8888,
4055 DRM_FORMAT_RGBA8888,
4056 DRM_FORMAT_XBGR8888,
4057 DRM_FORMAT_ABGR8888,
e7b07cee
HW
4058};
4059
4060static const u32 cursor_formats[] = {
4061 DRM_FORMAT_ARGB8888
4062};
4063
7578ecda 4064static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 4065 struct drm_plane *plane,
7578ecda 4066 unsigned long possible_crtcs)
e7b07cee
HW
4067{
4068 int res = -EPERM;
4069
f180b4bc 4070 switch (plane->type) {
e7b07cee 4071 case DRM_PLANE_TYPE_PRIMARY:
e7b07cee
HW
4072 res = drm_universal_plane_init(
4073 dm->adev->ddev,
f180b4bc 4074 plane,
e7b07cee
HW
4075 possible_crtcs,
4076 &dm_plane_funcs,
4077 rgb_formats,
4078 ARRAY_SIZE(rgb_formats),
f180b4bc 4079 NULL, plane->type, NULL);
e7b07cee
HW
4080 break;
4081 case DRM_PLANE_TYPE_OVERLAY:
4082 res = drm_universal_plane_init(
4083 dm->adev->ddev,
f180b4bc 4084 plane,
e7b07cee
HW
4085 possible_crtcs,
4086 &dm_plane_funcs,
0d579c7e
NK
4087 overlay_formats,
4088 ARRAY_SIZE(overlay_formats),
f180b4bc 4089 NULL, plane->type, NULL);
e7b07cee
HW
4090 break;
4091 case DRM_PLANE_TYPE_CURSOR:
4092 res = drm_universal_plane_init(
4093 dm->adev->ddev,
f180b4bc 4094 plane,
e7b07cee
HW
4095 possible_crtcs,
4096 &dm_plane_funcs,
4097 cursor_formats,
4098 ARRAY_SIZE(cursor_formats),
f180b4bc 4099 NULL, plane->type, NULL);
e7b07cee
HW
4100 break;
4101 }
4102
d74004b6
NK
4103 /* TODO: Check DC plane caps explicitly here for adding propertes */
4104 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
4105 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4106 BIT(DRM_MODE_BLEND_PREMULTI);
4107
4108 drm_plane_create_alpha_property(plane);
4109 drm_plane_create_blend_mode_property(plane, blend_caps);
4110 }
4111
f180b4bc 4112 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 4113
96719c54 4114 /* Create (reset) the plane state */
f180b4bc
HW
4115 if (plane->funcs->reset)
4116 plane->funcs->reset(plane);
96719c54
HW
4117
4118
e7b07cee
HW
4119 return res;
4120}
4121
7578ecda
AD
4122static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4123 struct drm_plane *plane,
4124 uint32_t crtc_index)
e7b07cee
HW
4125{
4126 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 4127 struct drm_plane *cursor_plane;
e7b07cee
HW
4128
4129 int res = -ENOMEM;
4130
4131 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4132 if (!cursor_plane)
4133 goto fail;
4134
f180b4bc 4135 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
e7b07cee
HW
4136 res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
4137
4138 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4139 if (!acrtc)
4140 goto fail;
4141
4142 res = drm_crtc_init_with_planes(
4143 dm->ddev,
4144 &acrtc->base,
4145 plane,
f180b4bc 4146 cursor_plane,
e7b07cee
HW
4147 &amdgpu_dm_crtc_funcs, NULL);
4148
4149 if (res)
4150 goto fail;
4151
4152 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4153
96719c54
HW
4154 /* Create (reset) the plane state */
4155 if (acrtc->base.funcs->reset)
4156 acrtc->base.funcs->reset(&acrtc->base);
4157
e7b07cee
HW
4158 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4159 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4160
4161 acrtc->crtc_id = crtc_index;
4162 acrtc->base.enabled = false;
c37e2d29 4163 acrtc->otg_inst = -1;
e7b07cee
HW
4164
4165 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
4166 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4167 true, MAX_COLOR_LUT_ENTRIES);
086247a4 4168 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
4169
4170 return 0;
4171
4172fail:
b830ebc9
HW
4173 kfree(acrtc);
4174 kfree(cursor_plane);
e7b07cee
HW
4175 return res;
4176}
4177
4178
4179static int to_drm_connector_type(enum signal_type st)
4180{
4181 switch (st) {
4182 case SIGNAL_TYPE_HDMI_TYPE_A:
4183 return DRM_MODE_CONNECTOR_HDMIA;
4184 case SIGNAL_TYPE_EDP:
4185 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
4186 case SIGNAL_TYPE_LVDS:
4187 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
4188 case SIGNAL_TYPE_RGB:
4189 return DRM_MODE_CONNECTOR_VGA;
4190 case SIGNAL_TYPE_DISPLAY_PORT:
4191 case SIGNAL_TYPE_DISPLAY_PORT_MST:
4192 return DRM_MODE_CONNECTOR_DisplayPort;
4193 case SIGNAL_TYPE_DVI_DUAL_LINK:
4194 case SIGNAL_TYPE_DVI_SINGLE_LINK:
4195 return DRM_MODE_CONNECTOR_DVID;
4196 case SIGNAL_TYPE_VIRTUAL:
4197 return DRM_MODE_CONNECTOR_VIRTUAL;
4198
4199 default:
4200 return DRM_MODE_CONNECTOR_Unknown;
4201 }
4202}
4203
2b4c1c05
DV
4204static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4205{
4206 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
4207}
4208
e7b07cee
HW
4209static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4210{
e7b07cee
HW
4211 struct drm_encoder *encoder;
4212 struct amdgpu_encoder *amdgpu_encoder;
4213
2b4c1c05 4214 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
4215
4216 if (encoder == NULL)
4217 return;
4218
4219 amdgpu_encoder = to_amdgpu_encoder(encoder);
4220
4221 amdgpu_encoder->native_mode.clock = 0;
4222
4223 if (!list_empty(&connector->probed_modes)) {
4224 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 4225
e7b07cee 4226 list_for_each_entry(preferred_mode,
b830ebc9
HW
4227 &connector->probed_modes,
4228 head) {
4229 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4230 amdgpu_encoder->native_mode = *preferred_mode;
4231
e7b07cee
HW
4232 break;
4233 }
4234
4235 }
4236}
4237
3ee6b26b
AD
4238static struct drm_display_mode *
4239amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4240 char *name,
4241 int hdisplay, int vdisplay)
e7b07cee
HW
4242{
4243 struct drm_device *dev = encoder->dev;
4244 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4245 struct drm_display_mode *mode = NULL;
4246 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4247
4248 mode = drm_mode_duplicate(dev, native_mode);
4249
b830ebc9 4250 if (mode == NULL)
e7b07cee
HW
4251 return NULL;
4252
4253 mode->hdisplay = hdisplay;
4254 mode->vdisplay = vdisplay;
4255 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 4256 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
4257
4258 return mode;
4259
4260}
4261
4262static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 4263 struct drm_connector *connector)
e7b07cee
HW
4264{
4265 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4266 struct drm_display_mode *mode = NULL;
4267 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
4268 struct amdgpu_dm_connector *amdgpu_dm_connector =
4269 to_amdgpu_dm_connector(connector);
e7b07cee
HW
4270 int i;
4271 int n;
4272 struct mode_size {
4273 char name[DRM_DISPLAY_MODE_LEN];
4274 int w;
4275 int h;
b830ebc9 4276 } common_modes[] = {
e7b07cee
HW
4277 { "640x480", 640, 480},
4278 { "800x600", 800, 600},
4279 { "1024x768", 1024, 768},
4280 { "1280x720", 1280, 720},
4281 { "1280x800", 1280, 800},
4282 {"1280x1024", 1280, 1024},
4283 { "1440x900", 1440, 900},
4284 {"1680x1050", 1680, 1050},
4285 {"1600x1200", 1600, 1200},
4286 {"1920x1080", 1920, 1080},
4287 {"1920x1200", 1920, 1200}
4288 };
4289
b830ebc9 4290 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
4291
4292 for (i = 0; i < n; i++) {
4293 struct drm_display_mode *curmode = NULL;
4294 bool mode_existed = false;
4295
4296 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
4297 common_modes[i].h > native_mode->vdisplay ||
4298 (common_modes[i].w == native_mode->hdisplay &&
4299 common_modes[i].h == native_mode->vdisplay))
4300 continue;
e7b07cee
HW
4301
4302 list_for_each_entry(curmode, &connector->probed_modes, head) {
4303 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 4304 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
4305 mode_existed = true;
4306 break;
4307 }
4308 }
4309
4310 if (mode_existed)
4311 continue;
4312
4313 mode = amdgpu_dm_create_common_mode(encoder,
4314 common_modes[i].name, common_modes[i].w,
4315 common_modes[i].h);
4316 drm_mode_probed_add(connector, mode);
c84dec2f 4317 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
4318 }
4319}
4320
3ee6b26b
AD
4321static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4322 struct edid *edid)
e7b07cee 4323{
c84dec2f
HW
4324 struct amdgpu_dm_connector *amdgpu_dm_connector =
4325 to_amdgpu_dm_connector(connector);
e7b07cee
HW
4326
4327 if (edid) {
4328 /* empty probed_modes */
4329 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 4330 amdgpu_dm_connector->num_modes =
e7b07cee
HW
4331 drm_add_edid_modes(connector, edid);
4332
e7b07cee 4333 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 4334 } else {
c84dec2f 4335 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 4336 }
e7b07cee
HW
4337}
4338
7578ecda 4339static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 4340{
c84dec2f
HW
4341 struct amdgpu_dm_connector *amdgpu_dm_connector =
4342 to_amdgpu_dm_connector(connector);
e7b07cee 4343 struct drm_encoder *encoder;
c84dec2f 4344 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 4345
2b4c1c05 4346 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 4347
85ee15d6 4348 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
4349 amdgpu_dm_connector->num_modes =
4350 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
4351 } else {
4352 amdgpu_dm_connector_ddc_get_modes(connector, edid);
4353 amdgpu_dm_connector_add_common_modes(encoder, connector);
4354 }
3e332d3a 4355 amdgpu_dm_fbc_init(connector);
5099114b 4356
c84dec2f 4357 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
4358}
4359
3ee6b26b
AD
4360void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4361 struct amdgpu_dm_connector *aconnector,
4362 int connector_type,
4363 struct dc_link *link,
4364 int link_index)
e7b07cee
HW
4365{
4366 struct amdgpu_device *adev = dm->ddev->dev_private;
4367
4368 aconnector->connector_id = link_index;
4369 aconnector->dc_link = link;
4370 aconnector->base.interlace_allowed = false;
4371 aconnector->base.doublescan_allowed = false;
4372 aconnector->base.stereo_allowed = false;
4373 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4374 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
e7b07cee
HW
4375 mutex_init(&aconnector->hpd_lock);
4376
1f6010a9
DF
4377 /*
4378 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
4379 * which means HPD hot plug not supported
4380 */
e7b07cee
HW
4381 switch (connector_type) {
4382 case DRM_MODE_CONNECTOR_HDMIA:
4383 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 4384 aconnector->base.ycbcr_420_allowed =
9ea59d5a 4385 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
4386 break;
4387 case DRM_MODE_CONNECTOR_DisplayPort:
4388 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 4389 aconnector->base.ycbcr_420_allowed =
9ea59d5a 4390 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
4391 break;
4392 case DRM_MODE_CONNECTOR_DVID:
4393 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4394 break;
4395 default:
4396 break;
4397 }
4398
4399 drm_object_attach_property(&aconnector->base.base,
4400 dm->ddev->mode_config.scaling_mode_property,
4401 DRM_MODE_SCALE_NONE);
4402
4403 drm_object_attach_property(&aconnector->base.base,
4404 adev->mode_info.underscan_property,
4405 UNDERSCAN_OFF);
4406 drm_object_attach_property(&aconnector->base.base,
4407 adev->mode_info.underscan_hborder_property,
4408 0);
4409 drm_object_attach_property(&aconnector->base.base,
4410 adev->mode_info.underscan_vborder_property,
4411 0);
07e3a1cf
NK
4412 drm_object_attach_property(&aconnector->base.base,
4413 adev->mode_info.max_bpc_property,
4414 0);
e7b07cee 4415
c1ee92f9
DF
4416 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4417 dc_is_dmcu_initialized(adev->dm.dc)) {
4418 drm_object_attach_property(&aconnector->base.base,
4419 adev->mode_info.abm_level_property, 0);
4420 }
bb47de73
NK
4421
4422 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
4423 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4424 connector_type == DRM_MODE_CONNECTOR_eDP) {
bb47de73
NK
4425 drm_connector_attach_vrr_capable_property(
4426 &aconnector->base);
4427 }
e7b07cee
HW
4428}
4429
7578ecda
AD
4430static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4431 struct i2c_msg *msgs, int num)
e7b07cee
HW
4432{
4433 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4434 struct ddc_service *ddc_service = i2c->ddc_service;
4435 struct i2c_command cmd;
4436 int i;
4437 int result = -EIO;
4438
b830ebc9 4439 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
4440
4441 if (!cmd.payloads)
4442 return result;
4443
4444 cmd.number_of_payloads = num;
4445 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4446 cmd.speed = 100;
4447
4448 for (i = 0; i < num; i++) {
4449 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4450 cmd.payloads[i].address = msgs[i].addr;
4451 cmd.payloads[i].length = msgs[i].len;
4452 cmd.payloads[i].data = msgs[i].buf;
4453 }
4454
c85e6e54
DF
4455 if (dc_submit_i2c(
4456 ddc_service->ctx->dc,
4457 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
4458 &cmd))
4459 result = num;
4460
4461 kfree(cmd.payloads);
4462 return result;
4463}
4464
7578ecda 4465static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
4466{
4467 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4468}
4469
4470static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4471 .master_xfer = amdgpu_dm_i2c_xfer,
4472 .functionality = amdgpu_dm_i2c_func,
4473};
4474
3ee6b26b
AD
4475static struct amdgpu_i2c_adapter *
4476create_i2c(struct ddc_service *ddc_service,
4477 int link_index,
4478 int *res)
e7b07cee
HW
4479{
4480 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4481 struct amdgpu_i2c_adapter *i2c;
4482
b830ebc9 4483 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
4484 if (!i2c)
4485 return NULL;
e7b07cee
HW
4486 i2c->base.owner = THIS_MODULE;
4487 i2c->base.class = I2C_CLASS_DDC;
4488 i2c->base.dev.parent = &adev->pdev->dev;
4489 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 4490 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
4491 i2c_set_adapdata(&i2c->base, i2c);
4492 i2c->ddc_service = ddc_service;
c85e6e54 4493 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
4494
4495 return i2c;
4496}
4497
89fc8d4e 4498
1f6010a9
DF
4499/*
4500 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
4501 * dc_link which will be represented by this aconnector.
4502 */
7578ecda
AD
4503static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4504 struct amdgpu_dm_connector *aconnector,
4505 uint32_t link_index,
4506 struct amdgpu_encoder *aencoder)
e7b07cee
HW
4507{
4508 int res = 0;
4509 int connector_type;
4510 struct dc *dc = dm->dc;
4511 struct dc_link *link = dc_get_link_at_index(dc, link_index);
4512 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
4513
4514 link->priv = aconnector;
e7b07cee 4515
f1ad2f5e 4516 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
4517
4518 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
4519 if (!i2c) {
4520 DRM_ERROR("Failed to create i2c adapter data\n");
4521 return -ENOMEM;
4522 }
4523
e7b07cee
HW
4524 aconnector->i2c = i2c;
4525 res = i2c_add_adapter(&i2c->base);
4526
4527 if (res) {
4528 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4529 goto out_free;
4530 }
4531
4532 connector_type = to_drm_connector_type(link->connector_signal);
4533
4534 res = drm_connector_init(
4535 dm->ddev,
4536 &aconnector->base,
4537 &amdgpu_dm_connector_funcs,
4538 connector_type);
4539
4540 if (res) {
4541 DRM_ERROR("connector_init failed\n");
4542 aconnector->connector_id = -1;
4543 goto out_free;
4544 }
4545
4546 drm_connector_helper_add(
4547 &aconnector->base,
4548 &amdgpu_dm_connector_helper_funcs);
4549
96719c54
HW
4550 if (aconnector->base.funcs->reset)
4551 aconnector->base.funcs->reset(&aconnector->base);
4552
e7b07cee
HW
4553 amdgpu_dm_connector_init_helper(
4554 dm,
4555 aconnector,
4556 connector_type,
4557 link,
4558 link_index);
4559
cde4c44d 4560 drm_connector_attach_encoder(
e7b07cee
HW
4561 &aconnector->base, &aencoder->base);
4562
4563 drm_connector_register(&aconnector->base);
dc38fd9d
DF
4564#if defined(CONFIG_DEBUG_FS)
4565 res = connector_debugfs_init(aconnector);
4566 if (res) {
4567 DRM_ERROR("Failed to create debugfs for connector");
4568 goto out_free;
4569 }
f258fee6
DF
4570 aconnector->debugfs_dpcd_address = 0;
4571 aconnector->debugfs_dpcd_size = 0;
dc38fd9d 4572#endif
e7b07cee
HW
4573
4574 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4575 || connector_type == DRM_MODE_CONNECTOR_eDP)
4576 amdgpu_dm_initialize_dp_connector(dm, aconnector);
4577
e7b07cee
HW
4578out_free:
4579 if (res) {
4580 kfree(i2c);
4581 aconnector->i2c = NULL;
4582 }
4583 return res;
4584}
4585
4586int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4587{
4588 switch (adev->mode_info.num_crtc) {
4589 case 1:
4590 return 0x1;
4591 case 2:
4592 return 0x3;
4593 case 3:
4594 return 0x7;
4595 case 4:
4596 return 0xf;
4597 case 5:
4598 return 0x1f;
4599 case 6:
4600 default:
4601 return 0x3f;
4602 }
4603}
4604
7578ecda
AD
4605static int amdgpu_dm_encoder_init(struct drm_device *dev,
4606 struct amdgpu_encoder *aencoder,
4607 uint32_t link_index)
e7b07cee
HW
4608{
4609 struct amdgpu_device *adev = dev->dev_private;
4610
4611 int res = drm_encoder_init(dev,
4612 &aencoder->base,
4613 &amdgpu_dm_encoder_funcs,
4614 DRM_MODE_ENCODER_TMDS,
4615 NULL);
4616
4617 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
4618
4619 if (!res)
4620 aencoder->encoder_id = link_index;
4621 else
4622 aencoder->encoder_id = -1;
4623
4624 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
4625
4626 return res;
4627}
4628
3ee6b26b
AD
4629static void manage_dm_interrupts(struct amdgpu_device *adev,
4630 struct amdgpu_crtc *acrtc,
4631 bool enable)
e7b07cee
HW
4632{
4633 /*
4634 * this is not correct translation but will work as soon as VBLANK
4635 * constant is the same as PFLIP
4636 */
4637 int irq_type =
734dd01d 4638 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
4639 adev,
4640 acrtc->crtc_id);
4641
4642 if (enable) {
4643 drm_crtc_vblank_on(&acrtc->base);
4644 amdgpu_irq_get(
4645 adev,
4646 &adev->pageflip_irq,
4647 irq_type);
4648 } else {
4649
4650 amdgpu_irq_put(
4651 adev,
4652 &adev->pageflip_irq,
4653 irq_type);
4654 drm_crtc_vblank_off(&acrtc->base);
4655 }
4656}
4657
3ee6b26b
AD
4658static bool
4659is_scaling_state_different(const struct dm_connector_state *dm_state,
4660 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
4661{
4662 if (dm_state->scaling != old_dm_state->scaling)
4663 return true;
4664 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
4665 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
4666 return true;
4667 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
4668 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
4669 return true;
b830ebc9
HW
4670 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
4671 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
4672 return true;
e7b07cee
HW
4673 return false;
4674}
4675
3ee6b26b
AD
4676static void remove_stream(struct amdgpu_device *adev,
4677 struct amdgpu_crtc *acrtc,
4678 struct dc_stream_state *stream)
e7b07cee
HW
4679{
4680 /* this is the update mode case */
e7b07cee
HW
4681
4682 acrtc->otg_inst = -1;
4683 acrtc->enabled = false;
4684}
4685
7578ecda
AD
4686static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
4687 struct dc_cursor_position *position)
2a8f6ccb 4688{
f4c2cc43 4689 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
4690 int x, y;
4691 int xorigin = 0, yorigin = 0;
4692
4693 if (!crtc || !plane->state->fb) {
4694 position->enable = false;
4695 position->x = 0;
4696 position->y = 0;
4697 return 0;
4698 }
4699
4700 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
4701 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
4702 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4703 __func__,
4704 plane->state->crtc_w,
4705 plane->state->crtc_h);
4706 return -EINVAL;
4707 }
4708
4709 x = plane->state->crtc_x;
4710 y = plane->state->crtc_y;
4711 /* avivo cursor are offset into the total surface */
4712 x += crtc->primary->state->src_x >> 16;
4713 y += crtc->primary->state->src_y >> 16;
4714 if (x < 0) {
4715 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
4716 x = 0;
4717 }
4718 if (y < 0) {
4719 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4720 y = 0;
4721 }
4722 position->enable = true;
4723 position->x = x;
4724 position->y = y;
4725 position->x_hotspot = xorigin;
4726 position->y_hotspot = yorigin;
4727
4728 return 0;
4729}
4730
3ee6b26b
AD
4731static void handle_cursor_update(struct drm_plane *plane,
4732 struct drm_plane_state *old_plane_state)
e7b07cee 4733{
674e78ac 4734 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
4735 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4736 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4737 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4738 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4739 uint64_t address = afb ? afb->address : 0;
4740 struct dc_cursor_position position;
4741 struct dc_cursor_attributes attributes;
4742 int ret;
4743
e7b07cee
HW
4744 if (!plane->state->fb && !old_plane_state->fb)
4745 return;
4746
f1ad2f5e 4747 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
4748 __func__,
4749 amdgpu_crtc->crtc_id,
4750 plane->state->crtc_w,
4751 plane->state->crtc_h);
2a8f6ccb
HW
4752
4753 ret = get_cursor_position(plane, crtc, &position);
4754 if (ret)
4755 return;
4756
4757 if (!position.enable) {
4758 /* turn off cursor */
674e78ac
NK
4759 if (crtc_state && crtc_state->stream) {
4760 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
4761 dc_stream_set_cursor_position(crtc_state->stream,
4762 &position);
674e78ac
NK
4763 mutex_unlock(&adev->dm.dc_lock);
4764 }
2a8f6ccb 4765 return;
e7b07cee 4766 }
e7b07cee 4767
2a8f6ccb
HW
4768 amdgpu_crtc->cursor_width = plane->state->crtc_w;
4769 amdgpu_crtc->cursor_height = plane->state->crtc_h;
4770
4771 attributes.address.high_part = upper_32_bits(address);
4772 attributes.address.low_part = lower_32_bits(address);
4773 attributes.width = plane->state->crtc_w;
4774 attributes.height = plane->state->crtc_h;
4775 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4776 attributes.rotation_angle = 0;
4777 attributes.attribute_flags.value = 0;
4778
4779 attributes.pitch = attributes.width;
4780
886daac9 4781 if (crtc_state->stream) {
674e78ac 4782 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
4783 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4784 &attributes))
4785 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 4786
2a8f6ccb
HW
4787 if (!dc_stream_set_cursor_position(crtc_state->stream,
4788 &position))
4789 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 4790 mutex_unlock(&adev->dm.dc_lock);
886daac9 4791 }
2a8f6ccb 4792}
e7b07cee
HW
4793
4794static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4795{
4796
4797 assert_spin_locked(&acrtc->base.dev->event_lock);
4798 WARN_ON(acrtc->event);
4799
4800 acrtc->event = acrtc->base.state->event;
4801
4802 /* Set the flip status */
4803 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4804
4805 /* Mark this event as consumed */
4806 acrtc->base.state->event = NULL;
4807
4808 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4809 acrtc->crtc_id);
4810}
4811
bb47de73
NK
4812static void update_freesync_state_on_stream(
4813 struct amdgpu_display_manager *dm,
4814 struct dm_crtc_state *new_crtc_state,
180db303
NK
4815 struct dc_stream_state *new_stream,
4816 struct dc_plane_state *surface,
4817 u32 flip_timestamp_in_us)
bb47de73 4818{
180db303 4819 struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
bb47de73 4820 struct dc_info_packet vrr_infopacket = {0};
bb47de73
NK
4821
4822 if (!new_stream)
4823 return;
4824
4825 /*
4826 * TODO: Determine why min/max totals and vrefresh can be 0 here.
4827 * For now it's sufficient to just guard against these conditions.
4828 */
4829
4830 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4831 return;
4832
180db303
NK
4833 if (surface) {
4834 mod_freesync_handle_preflip(
4835 dm->freesync_module,
4836 surface,
4837 new_stream,
4838 flip_timestamp_in_us,
4839 &vrr_params);
4840 }
bb47de73
NK
4841
4842 mod_freesync_build_vrr_infopacket(
4843 dm->freesync_module,
4844 new_stream,
180db303 4845 &vrr_params,
ecd0136b
HT
4846 PACKET_TYPE_VRR,
4847 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
4848 &vrr_infopacket);
4849
8a48b44c 4850 new_crtc_state->freesync_timing_changed |=
180db303
NK
4851 (memcmp(&new_crtc_state->vrr_params.adjust,
4852 &vrr_params.adjust,
4853 sizeof(vrr_params.adjust)) != 0);
bb47de73 4854
8a48b44c 4855 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
4856 (memcmp(&new_crtc_state->vrr_infopacket,
4857 &vrr_infopacket,
4858 sizeof(vrr_infopacket)) != 0);
4859
180db303 4860 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
4861 new_crtc_state->vrr_infopacket = vrr_infopacket;
4862
180db303 4863 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
4864 new_stream->vrr_infopacket = vrr_infopacket;
4865
4866 if (new_crtc_state->freesync_vrr_info_changed)
4867 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4868 new_crtc_state->base.crtc->base.id,
4869 (int)new_crtc_state->base.vrr_enabled,
180db303 4870 (int)vrr_params.state);
bb47de73
NK
4871}
4872
e854194c
MK
4873static void pre_update_freesync_state_on_stream(
4874 struct amdgpu_display_manager *dm,
4875 struct dm_crtc_state *new_crtc_state)
4876{
4877 struct dc_stream_state *new_stream = new_crtc_state->stream;
4878 struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4879 struct mod_freesync_config config = new_crtc_state->freesync_config;
4880
4881 if (!new_stream)
4882 return;
4883
4884 /*
4885 * TODO: Determine why min/max totals and vrefresh can be 0 here.
4886 * For now it's sufficient to just guard against these conditions.
4887 */
4888 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4889 return;
4890
4891 if (new_crtc_state->vrr_supported &&
4892 config.min_refresh_in_uhz &&
4893 config.max_refresh_in_uhz) {
4894 config.state = new_crtc_state->base.vrr_enabled ?
4895 VRR_STATE_ACTIVE_VARIABLE :
4896 VRR_STATE_INACTIVE;
4897 } else {
4898 config.state = VRR_STATE_UNSUPPORTED;
4899 }
4900
4901 mod_freesync_build_vrr_params(dm->freesync_module,
4902 new_stream,
4903 &config, &vrr_params);
4904
4905 new_crtc_state->freesync_timing_changed |=
4906 (memcmp(&new_crtc_state->vrr_params.adjust,
4907 &vrr_params.adjust,
4908 sizeof(vrr_params.adjust)) != 0);
4909
4910 new_crtc_state->vrr_params = vrr_params;
4911}
4912
66b0c973
MK
4913static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
4914 struct dm_crtc_state *new_state)
4915{
4916 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
4917 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
4918
4919 if (!old_vrr_active && new_vrr_active) {
4920 /* Transition VRR inactive -> active:
4921 * While VRR is active, we must not disable vblank irq, as a
4922 * reenable after disable would compute bogus vblank/pflip
4923 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
4924 *
4925 * We also need vupdate irq for the actual core vblank handling
4926 * at end of vblank.
66b0c973 4927 */
d2574c33 4928 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
4929 drm_crtc_vblank_get(new_state->base.crtc);
4930 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
4931 __func__, new_state->base.crtc->base.id);
4932 } else if (old_vrr_active && !new_vrr_active) {
4933 /* Transition VRR active -> inactive:
4934 * Allow vblank irq disable again for fixed refresh rate.
4935 */
d2574c33 4936 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
4937 drm_crtc_vblank_put(new_state->base.crtc);
4938 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
4939 __func__, new_state->base.crtc->base.id);
4940 }
4941}
4942
3be5262e 4943static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 4944 struct dc_state *dc_state,
3ee6b26b
AD
4945 struct drm_device *dev,
4946 struct amdgpu_display_manager *dm,
4947 struct drm_crtc *pcrtc,
420cd472 4948 bool wait_for_vblank)
e7b07cee 4949{
8a48b44c
DF
4950 uint32_t i, r;
4951 uint64_t timestamp_ns;
e7b07cee 4952 struct drm_plane *plane;
0bc9706d 4953 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 4954 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
4955 struct drm_crtc_state *new_pcrtc_state =
4956 drm_atomic_get_new_crtc_state(state, pcrtc);
4957 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
4958 struct dm_crtc_state *dm_old_crtc_state =
4959 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 4960 int planes_count = 0, vpos, hpos;
e7b07cee 4961 unsigned long flags;
8a48b44c 4962 struct amdgpu_bo *abo;
09e5665a 4963 uint64_t tiling_flags;
fdd1fe57
MK
4964 uint32_t target_vblank, last_flip_vblank;
4965 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 4966 bool pflip_present = false;
bc7f670e
DF
4967 struct {
4968 struct dc_surface_update surface_updates[MAX_SURFACES];
4969 struct dc_plane_info plane_infos[MAX_SURFACES];
4970 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 4971 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 4972 struct dc_stream_update stream_update;
74aa7bd4 4973 } *bundle;
bc7f670e 4974
74aa7bd4 4975 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 4976
74aa7bd4
DF
4977 if (!bundle) {
4978 dm_error("Failed to allocate update bundle\n");
4b510503
NK
4979 goto cleanup;
4980 }
e7b07cee
HW
4981
4982 /* update planes when needed */
0bc9706d
LSL
4983 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4984 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 4985 struct drm_crtc_state *new_crtc_state;
0bc9706d 4986 struct drm_framebuffer *fb = new_plane_state->fb;
8a48b44c 4987 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
34bafd27 4988 bool plane_needs_flip;
c7af5f77 4989 struct dc_plane_state *dc_plane;
54d76575 4990 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 4991
80c218d5
NK
4992 /* Cursor plane is handled after stream updates */
4993 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 4994 continue;
e7b07cee 4995
f5ba60fe
DD
4996 if (!fb || !crtc || pcrtc != crtc)
4997 continue;
4998
4999 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5000 if (!new_crtc_state->active)
e7b07cee
HW
5001 continue;
5002
bc7f670e 5003 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 5004
74aa7bd4 5005 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 5006 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
5007 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5008 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bc7f670e 5009 }
8a48b44c 5010
8a48b44c 5011
74aa7bd4
DF
5012 bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
5013 bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
5014 bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
5015 bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
5016 bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count];
f7c8930d
AG
5017
5018
74aa7bd4
DF
5019 bundle->plane_infos[planes_count].color_space = dc_plane->color_space;
5020 bundle->plane_infos[planes_count].format = dc_plane->format;
5021 bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size;
5022 bundle->plane_infos[planes_count].rotation = dc_plane->rotation;
5023 bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
5024 bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
5025 bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
5026 bundle->plane_infos[planes_count].visible = dc_plane->visible;
d74004b6
NK
5027 bundle->plane_infos[planes_count].global_alpha = dc_plane->global_alpha;
5028 bundle->plane_infos[planes_count].global_alpha_value = dc_plane->global_alpha_value;
74aa7bd4
DF
5029 bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
5030 bundle->plane_infos[planes_count].dcc = dc_plane->dcc;
5031 bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count];
8a48b44c 5032
f5031000 5033 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 5034
f5031000 5035 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 5036
f5031000
DF
5037 if (!plane_needs_flip) {
5038 planes_count += 1;
5039 continue;
5040 }
8a48b44c 5041
f5031000
DF
5042 /*
5043 * TODO This might fail and hence better not used, wait
5044 * explicitly on fences instead
5045 * and in general should be called for
5046 * blocking commit to as per framework helpers
5047 */
5048 abo = gem_to_amdgpu_bo(fb->obj[0]);
5049 r = amdgpu_bo_reserve(abo, true);
5050 if (unlikely(r != 0)) {
5051 DRM_ERROR("failed to reserve buffer before flip\n");
5052 WARN_ON(1);
5053 }
8a48b44c 5054
f5031000
DF
5055 /* Wait for all fences on this FB */
5056 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
5057 MAX_SCHEDULE_TIMEOUT) < 0);
8a48b44c 5058
f5031000 5059 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 5060
f5031000 5061 amdgpu_bo_unreserve(abo);
8a48b44c 5062
09e5665a
NK
5063 fill_plane_tiling_attributes(dm->adev, afb, dc_plane,
5064 &bundle->plane_infos[planes_count].tiling_info,
5065 &bundle->plane_infos[planes_count].dcc,
5066 &bundle->flip_addrs[planes_count].address,
5067 tiling_flags);
8a48b44c 5068
f5031000
DF
5069 bundle->flip_addrs[planes_count].flip_immediate =
5070 (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
8a48b44c 5071
f5031000
DF
5072 timestamp_ns = ktime_get_ns();
5073 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5074 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5075 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 5076
f5031000
DF
5077 if (!bundle->surface_updates[planes_count].surface) {
5078 DRM_ERROR("No surface for CRTC: id=%d\n",
5079 acrtc_attach->crtc_id);
5080 continue;
bc7f670e
DF
5081 }
5082
f5031000
DF
5083 if (plane == pcrtc->primary)
5084 update_freesync_state_on_stream(
5085 dm,
5086 acrtc_state,
5087 acrtc_state->stream,
5088 dc_plane,
5089 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 5090
f5031000
DF
5091 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5092 __func__,
5093 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5094 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
5095
5096 planes_count += 1;
5097
8a48b44c
DF
5098 }
5099
74aa7bd4 5100 if (pflip_present) {
634092b1
MK
5101 if (!vrr_active) {
5102 /* Use old throttling in non-vrr fixed refresh rate mode
5103 * to keep flip scheduling based on target vblank counts
5104 * working in a backwards compatible way, e.g., for
5105 * clients using the GLX_OML_sync_control extension or
5106 * DRI3/Present extension with defined target_msc.
5107 */
fdd1fe57 5108 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
634092b1
MK
5109 }
5110 else {
5111 /* For variable refresh rate mode only:
5112 * Get vblank of last completed flip to avoid > 1 vrr
5113 * flips per video frame by use of throttling, but allow
5114 * flip programming anywhere in the possibly large
5115 * variable vrr vblank interval for fine-grained flip
5116 * timing control and more opportunity to avoid stutter
5117 * on late submission of flips.
5118 */
5119 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5120 last_flip_vblank = acrtc_attach->last_flip_vblank;
5121 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5122 }
5123
fdd1fe57 5124 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
5125
5126 /*
5127 * Wait until we're out of the vertical blank period before the one
5128 * targeted by the flip
5129 */
5130 while ((acrtc_attach->enabled &&
5131 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
5132 0, &vpos, &hpos, NULL,
5133 NULL, &pcrtc->hwmode)
5134 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
5135 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
5136 (int)(target_vblank -
5137 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
5138 usleep_range(1000, 1100);
5139 }
5140
5141 if (acrtc_attach->base.state->event) {
5142 drm_crtc_vblank_get(pcrtc);
5143
5144 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5145
5146 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
5147 prepare_flip_isr(acrtc_attach);
5148
5149 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5150 }
5151
5152 if (acrtc_state->stream) {
5153
5154 if (acrtc_state->freesync_timing_changed)
74aa7bd4 5155 bundle->stream_update.adjust =
8a48b44c
DF
5156 &acrtc_state->stream->adjust;
5157
5158 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 5159 bundle->stream_update.vrr_infopacket =
8a48b44c 5160 &acrtc_state->stream->vrr_infopacket;
e7b07cee 5161 }
e7b07cee
HW
5162 }
5163
5164 if (planes_count) {
bc7f670e 5165 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
5166 bundle->stream_update.src = acrtc_state->stream->src;
5167 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
5168 }
5169
bc7f670e 5170 if (new_pcrtc_state->color_mgmt_changed)
74aa7bd4 5171 bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
bc7f670e 5172
8a48b44c 5173 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 5174 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 5175 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 5176
bc7f670e
DF
5177 mutex_lock(&dm->dc_lock);
5178 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 5179 bundle->surface_updates,
bc7f670e
DF
5180 planes_count,
5181 acrtc_state->stream,
74aa7bd4 5182 &bundle->stream_update,
bc7f670e
DF
5183 dc_state);
5184 mutex_unlock(&dm->dc_lock);
e7b07cee 5185 }
4b510503 5186
80c218d5
NK
5187 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
5188 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5189 handle_cursor_update(plane, old_plane_state);
5190
4b510503 5191cleanup:
74aa7bd4 5192 kfree(bundle);
e7b07cee
HW
5193}
5194
1f6010a9 5195/*
27b3f4fc
LSL
5196 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
5197 * @crtc_state: the DRM CRTC state
5198 * @stream_state: the DC stream state.
5199 *
5200 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
5201 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
5202 */
5203static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
5204 struct dc_stream_state *stream_state)
5205{
b9952f93 5206 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 5207}
e7b07cee 5208
7578ecda
AD
5209static int amdgpu_dm_atomic_commit(struct drm_device *dev,
5210 struct drm_atomic_state *state,
5211 bool nonblock)
e7b07cee
HW
5212{
5213 struct drm_crtc *crtc;
c2cea706 5214 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
5215 struct amdgpu_device *adev = dev->dev_private;
5216 int i;
5217
5218 /*
5219 * We evade vblanks and pflips on crtc that
5220 * should be changed. We do it here to flush & disable
5221 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
5222 * it will update crtc->dm_crtc_state->stream pointer which is used in
5223 * the ISRs.
5224 */
c2cea706 5225 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 5226 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 5227 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
5228 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5229
428da2bd
NK
5230 if (drm_atomic_crtc_needs_modeset(new_crtc_state)
5231 && dm_old_crtc_state->stream) {
5232 /*
43a6a02e
NK
5233 * If the stream is removed and CRC capture was
5234 * enabled on the CRTC the extra vblank reference
5235 * needs to be dropped since CRC capture will be
5236 * disabled.
428da2bd 5237 */
43a6a02e
NK
5238 if (!dm_new_crtc_state->stream
5239 && dm_new_crtc_state->crc_enabled) {
428da2bd
NK
5240 drm_crtc_vblank_put(crtc);
5241 dm_new_crtc_state->crc_enabled = false;
5242 }
5243
e7b07cee 5244 manage_dm_interrupts(adev, acrtc, false);
428da2bd 5245 }
e7b07cee 5246 }
1f6010a9
DF
5247 /*
5248 * Add check here for SoC's that support hardware cursor plane, to
5249 * unset legacy_cursor_update
5250 */
e7b07cee
HW
5251
5252 return drm_atomic_helper_commit(dev, state, nonblock);
5253
5254 /*TODO Handle EINTR, reenable IRQ*/
5255}
5256
b8592b48
LL
5257/**
5258 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
5259 * @state: The atomic state to commit
5260 *
5261 * This will tell DC to commit the constructed DC state from atomic_check,
5262 * programming the hardware. Any failures here implies a hardware failure, since
5263 * atomic check should have filtered anything non-kosher.
5264 */
7578ecda 5265static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
5266{
5267 struct drm_device *dev = state->dev;
5268 struct amdgpu_device *adev = dev->dev_private;
5269 struct amdgpu_display_manager *dm = &adev->dm;
5270 struct dm_atomic_state *dm_state;
eb3dc897 5271 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 5272 uint32_t i, j;
5cc6dcbd 5273 struct drm_crtc *crtc;
0bc9706d 5274 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
5275 unsigned long flags;
5276 bool wait_for_vblank = true;
5277 struct drm_connector *connector;
c2cea706 5278 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 5279 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 5280 int crtc_disable_count = 0;
e7b07cee
HW
5281
5282 drm_atomic_helper_update_legacy_modeset_state(dev, state);
5283
eb3dc897
NK
5284 dm_state = dm_atomic_get_new_state(state);
5285 if (dm_state && dm_state->context) {
5286 dc_state = dm_state->context;
5287 } else {
5288 /* No state changes, retain current state. */
813d20dc 5289 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
5290 ASSERT(dc_state_temp);
5291 dc_state = dc_state_temp;
5292 dc_resource_state_copy_construct_current(dm->dc, dc_state);
5293 }
e7b07cee
HW
5294
5295 /* update changed items */
0bc9706d 5296 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 5297 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 5298
54d76575
LSL
5299 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5300 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 5301
f1ad2f5e 5302 DRM_DEBUG_DRIVER(
e7b07cee
HW
5303 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5304 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5305 "connectors_changed:%d\n",
5306 acrtc->crtc_id,
0bc9706d
LSL
5307 new_crtc_state->enable,
5308 new_crtc_state->active,
5309 new_crtc_state->planes_changed,
5310 new_crtc_state->mode_changed,
5311 new_crtc_state->active_changed,
5312 new_crtc_state->connectors_changed);
e7b07cee 5313
27b3f4fc
LSL
5314 /* Copy all transient state flags into dc state */
5315 if (dm_new_crtc_state->stream) {
5316 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5317 dm_new_crtc_state->stream);
5318 }
5319
e7b07cee
HW
5320 /* handles headless hotplug case, updating new_state and
5321 * aconnector as needed
5322 */
5323
54d76575 5324 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 5325
f1ad2f5e 5326 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 5327
54d76575 5328 if (!dm_new_crtc_state->stream) {
e7b07cee 5329 /*
b830ebc9
HW
5330 * this could happen because of issues with
5331 * userspace notifications delivery.
5332 * In this case userspace tries to set mode on
1f6010a9
DF
5333 * display which is disconnected in fact.
5334 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
5335 * We expect reset mode will come soon.
5336 *
5337 * This can also happen when unplug is done
5338 * during resume sequence ended
5339 *
5340 * In this case, we want to pretend we still
5341 * have a sink to keep the pipe running so that
5342 * hw state is consistent with the sw state
5343 */
f1ad2f5e 5344 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
5345 __func__, acrtc->base.base.id);
5346 continue;
5347 }
5348
54d76575
LSL
5349 if (dm_old_crtc_state->stream)
5350 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 5351
97028037
LP
5352 pm_runtime_get_noresume(dev->dev);
5353
e7b07cee 5354 acrtc->enabled = true;
0bc9706d
LSL
5355 acrtc->hw_mode = new_crtc_state->mode;
5356 crtc->hwmode = new_crtc_state->mode;
5357 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 5358 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee
HW
5359
5360 /* i.e. reset mode */
54d76575
LSL
5361 if (dm_old_crtc_state->stream)
5362 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee
HW
5363 }
5364 } /* for_each_crtc_in_state() */
5365
eb3dc897
NK
5366 if (dc_state) {
5367 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 5368 mutex_lock(&dm->dc_lock);
eb3dc897 5369 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 5370 mutex_unlock(&dm->dc_lock);
fa2123db 5371 }
e7b07cee 5372
0bc9706d 5373 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 5374 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 5375
54d76575 5376 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 5377
54d76575 5378 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 5379 const struct dc_stream_status *status =
54d76575 5380 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 5381
eb3dc897 5382 if (!status)
09f609c3
LL
5383 status = dc_stream_get_status_from_state(dc_state,
5384 dm_new_crtc_state->stream);
eb3dc897 5385
e7b07cee 5386 if (!status)
54d76575 5387 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
5388 else
5389 acrtc->otg_inst = status->primary_otg_inst;
5390 }
5391 }
5392
02d6a6fc 5393 /* Handle connector state changes */
c2cea706 5394 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
5395 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5396 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5397 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
5398 struct dc_surface_update dummy_updates[MAX_SURFACES];
5399 struct dc_stream_update stream_update;
e7b07cee
HW
5400 struct dc_stream_status *status = NULL;
5401
19afd799
NC
5402 memset(&dummy_updates, 0, sizeof(dummy_updates));
5403 memset(&stream_update, 0, sizeof(stream_update));
5404
44d09c6a 5405 if (acrtc) {
0bc9706d 5406 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
5407 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5408 }
0bc9706d 5409
e7b07cee 5410 /* Skip any modesets/resets */
0bc9706d 5411 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
5412 continue;
5413
54d76575 5414 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
5415 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5416
c1ee92f9
DF
5417 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5418 (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5419 continue;
e7b07cee 5420
02d6a6fc
DF
5421 if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) {
5422 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5423 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
e7b07cee 5424
02d6a6fc
DF
5425 stream_update.src = dm_new_crtc_state->stream->src;
5426 stream_update.dst = dm_new_crtc_state->stream->dst;
5427 }
5428
5429 if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
5430 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5431
5432 stream_update.abm_level = &dm_new_crtc_state->abm_level;
5433 }
70e8ffc5 5434
54d76575 5435 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 5436 WARN_ON(!status);
3be5262e 5437 WARN_ON(!status->plane_count);
e7b07cee 5438
02d6a6fc
DF
5439 /*
5440 * TODO: DC refuses to perform stream updates without a dc_surface_update.
5441 * Here we create an empty update on each plane.
5442 * To fix this, DC should permit updating only stream properties.
5443 */
5444 for (j = 0; j < status->plane_count; j++)
5445 dummy_updates[j].surface = status->plane_states[0];
5446
5447
5448 mutex_lock(&dm->dc_lock);
5449 dc_commit_updates_for_stream(dm->dc,
5450 dummy_updates,
5451 status->plane_count,
5452 dm_new_crtc_state->stream,
5453 &stream_update,
5454 dc_state);
5455 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
5456 }
5457
e854194c
MK
5458 /* Update freesync state before amdgpu_dm_handle_vrr_transition(). */
5459 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5460 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5461 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
5462 }
5463
e1fc2dca
LSL
5464 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5465 new_crtc_state, i) {
e7b07cee
HW
5466 /*
5467 * loop to enable interrupts on newly arrived crtc
5468 */
e1fc2dca
LSL
5469 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5470 bool modeset_needed;
b830ebc9 5471
fe2a1965
LP
5472 if (old_crtc_state->active && !new_crtc_state->active)
5473 crtc_disable_count++;
5474
54d76575 5475 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 5476 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973
MK
5477
5478 /* Handle vrr on->off / off->on transitions */
5479 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
5480 dm_new_crtc_state);
5481
e1fc2dca
LSL
5482 modeset_needed = modeset_required(
5483 new_crtc_state,
5484 dm_new_crtc_state->stream,
5485 dm_old_crtc_state->stream);
5486
5487 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
5488 continue;
e7b07cee 5489
e7b07cee 5490 manage_dm_interrupts(adev, acrtc, true);
43a6a02e 5491
cc7e422d 5492#ifdef CONFIG_DEBUG_FS
43a6a02e
NK
5493 /* The stream has changed so CRC capture needs to re-enabled. */
5494 if (dm_new_crtc_state->crc_enabled)
5495 amdgpu_dm_crtc_set_crc_source(crtc, "auto");
cc7e422d 5496#endif
e7b07cee
HW
5497 }
5498
420cd472
DF
5499 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
5500 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
5501 wait_for_vblank = false;
5502
e7b07cee 5503 /* update planes when needed per crtc*/
5cc6dcbd 5504 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 5505 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 5506
54d76575 5507 if (dm_new_crtc_state->stream)
eb3dc897 5508 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 5509 dm, crtc, wait_for_vblank);
e7b07cee
HW
5510 }
5511
5512
5513 /*
5514 * send vblank event on all events not handled in flip and
5515 * mark consumed event for drm_atomic_helper_commit_hw_done
5516 */
5517 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 5518 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 5519
0bc9706d
LSL
5520 if (new_crtc_state->event)
5521 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 5522
0bc9706d 5523 new_crtc_state->event = NULL;
e7b07cee
HW
5524 }
5525 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5526
29c8f234
LL
5527 /* Signal HW programming completion */
5528 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
5529
5530 if (wait_for_vblank)
320a1274 5531 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
5532
5533 drm_atomic_helper_cleanup_planes(dev, state);
97028037 5534
1f6010a9
DF
5535 /*
5536 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
5537 * so we can put the GPU into runtime suspend if we're not driving any
5538 * displays anymore
5539 */
fe2a1965
LP
5540 for (i = 0; i < crtc_disable_count; i++)
5541 pm_runtime_put_autosuspend(dev->dev);
97028037 5542 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
5543
5544 if (dc_state_temp)
5545 dc_release_state(dc_state_temp);
e7b07cee
HW
5546}
5547
5548
5549static int dm_force_atomic_commit(struct drm_connector *connector)
5550{
5551 int ret = 0;
5552 struct drm_device *ddev = connector->dev;
5553 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
5554 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5555 struct drm_plane *plane = disconnected_acrtc->base.primary;
5556 struct drm_connector_state *conn_state;
5557 struct drm_crtc_state *crtc_state;
5558 struct drm_plane_state *plane_state;
5559
5560 if (!state)
5561 return -ENOMEM;
5562
5563 state->acquire_ctx = ddev->mode_config.acquire_ctx;
5564
5565 /* Construct an atomic state to restore previous display setting */
5566
5567 /*
5568 * Attach connectors to drm_atomic_state
5569 */
5570 conn_state = drm_atomic_get_connector_state(state, connector);
5571
5572 ret = PTR_ERR_OR_ZERO(conn_state);
5573 if (ret)
5574 goto err;
5575
5576 /* Attach crtc to drm_atomic_state*/
5577 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
5578
5579 ret = PTR_ERR_OR_ZERO(crtc_state);
5580 if (ret)
5581 goto err;
5582
5583 /* force a restore */
5584 crtc_state->mode_changed = true;
5585
5586 /* Attach plane to drm_atomic_state */
5587 plane_state = drm_atomic_get_plane_state(state, plane);
5588
5589 ret = PTR_ERR_OR_ZERO(plane_state);
5590 if (ret)
5591 goto err;
5592
5593
5594 /* Call commit internally with the state we just constructed */
5595 ret = drm_atomic_commit(state);
5596 if (!ret)
5597 return 0;
5598
5599err:
5600 DRM_ERROR("Restoring old state failed with %i\n", ret);
5601 drm_atomic_state_put(state);
5602
5603 return ret;
5604}
5605
5606/*
1f6010a9
DF
5607 * This function handles all cases when set mode does not come upon hotplug.
5608 * This includes when a display is unplugged then plugged back into the
5609 * same port and when running without usermode desktop manager supprot
e7b07cee 5610 */
3ee6b26b
AD
5611void dm_restore_drm_connector_state(struct drm_device *dev,
5612 struct drm_connector *connector)
e7b07cee 5613{
c84dec2f 5614 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5615 struct amdgpu_crtc *disconnected_acrtc;
5616 struct dm_crtc_state *acrtc_state;
5617
5618 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
5619 return;
5620
5621 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
5622 if (!disconnected_acrtc)
5623 return;
e7b07cee 5624
70e8ffc5
HW
5625 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
5626 if (!acrtc_state->stream)
e7b07cee
HW
5627 return;
5628
5629 /*
5630 * If the previous sink is not released and different from the current,
5631 * we deduce we are in a state where we can not rely on usermode call
5632 * to turn on the display, so we do it here
5633 */
5634 if (acrtc_state->stream->sink != aconnector->dc_sink)
5635 dm_force_atomic_commit(&aconnector->base);
5636}
5637
1f6010a9 5638/*
e7b07cee
HW
5639 * Grabs all modesetting locks to serialize against any blocking commits,
5640 * Waits for completion of all non blocking commits.
5641 */
3ee6b26b
AD
5642static int do_aquire_global_lock(struct drm_device *dev,
5643 struct drm_atomic_state *state)
e7b07cee
HW
5644{
5645 struct drm_crtc *crtc;
5646 struct drm_crtc_commit *commit;
5647 long ret;
5648
1f6010a9
DF
5649 /*
5650 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
5651 * ensure that when the framework release it the
5652 * extra locks we are locking here will get released to
5653 */
5654 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
5655 if (ret)
5656 return ret;
5657
5658 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5659 spin_lock(&crtc->commit_lock);
5660 commit = list_first_entry_or_null(&crtc->commit_list,
5661 struct drm_crtc_commit, commit_entry);
5662 if (commit)
5663 drm_crtc_commit_get(commit);
5664 spin_unlock(&crtc->commit_lock);
5665
5666 if (!commit)
5667 continue;
5668
1f6010a9
DF
5669 /*
5670 * Make sure all pending HW programming completed and
e7b07cee
HW
5671 * page flips done
5672 */
5673 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
5674
5675 if (ret > 0)
5676 ret = wait_for_completion_interruptible_timeout(
5677 &commit->flip_done, 10*HZ);
5678
5679 if (ret == 0)
5680 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 5681 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
5682
5683 drm_crtc_commit_put(commit);
5684 }
5685
5686 return ret < 0 ? ret : 0;
5687}
5688
bb47de73
NK
5689static void get_freesync_config_for_crtc(
5690 struct dm_crtc_state *new_crtc_state,
5691 struct dm_connector_state *new_con_state)
98e6436d
AK
5692{
5693 struct mod_freesync_config config = {0};
98e6436d
AK
5694 struct amdgpu_dm_connector *aconnector =
5695 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 5696 struct drm_display_mode *mode = &new_crtc_state->base.mode;
98e6436d 5697
a057ec46
IB
5698 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
5699 aconnector->min_vfreq <= drm_mode_vrefresh(mode);
bb47de73 5700
a057ec46
IB
5701 if (new_crtc_state->vrr_supported) {
5702 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 5703 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
5704 VRR_STATE_ACTIVE_VARIABLE :
5705 VRR_STATE_INACTIVE;
5706 config.min_refresh_in_uhz =
5707 aconnector->min_vfreq * 1000000;
5708 config.max_refresh_in_uhz =
5709 aconnector->max_vfreq * 1000000;
69ff8845 5710 config.vsif_supported = true;
180db303 5711 config.btr = true;
98e6436d
AK
5712 }
5713
bb47de73
NK
5714 new_crtc_state->freesync_config = config;
5715}
98e6436d 5716
bb47de73
NK
5717static void reset_freesync_config_for_crtc(
5718 struct dm_crtc_state *new_crtc_state)
5719{
5720 new_crtc_state->vrr_supported = false;
98e6436d 5721
180db303
NK
5722 memset(&new_crtc_state->vrr_params, 0,
5723 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
5724 memset(&new_crtc_state->vrr_infopacket, 0,
5725 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
5726}
5727
4b9674e5
LL
5728static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
5729 struct drm_atomic_state *state,
5730 struct drm_crtc *crtc,
5731 struct drm_crtc_state *old_crtc_state,
5732 struct drm_crtc_state *new_crtc_state,
5733 bool enable,
5734 bool *lock_and_validation_needed)
e7b07cee 5735{
eb3dc897 5736 struct dm_atomic_state *dm_state = NULL;
54d76575 5737 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 5738 struct dc_stream_state *new_stream;
62f55537 5739 int ret = 0;
d4d4a645 5740
1f6010a9
DF
5741 /*
5742 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5743 * update changed items
5744 */
4b9674e5
LL
5745 struct amdgpu_crtc *acrtc = NULL;
5746 struct amdgpu_dm_connector *aconnector = NULL;
5747 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5748 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5749 struct drm_plane_state *new_plane_state = NULL;
e7b07cee 5750
4b9674e5 5751 new_stream = NULL;
9635b754 5752
4b9674e5
LL
5753 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5754 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5755 acrtc = to_amdgpu_crtc(crtc);
e7b07cee 5756
4b9674e5 5757 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
f2877656 5758
4b9674e5
LL
5759 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5760 ret = -EINVAL;
5761 goto fail;
5762 }
f2877656 5763
4b9674e5 5764 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 5765
4b9674e5
LL
5766 /* TODO This hack should go away */
5767 if (aconnector && enable) {
5768 /* Make sure fake sink is created in plug-in scenario */
5769 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5770 &aconnector->base);
5771 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5772 &aconnector->base);
19f89e23 5773
4b9674e5
LL
5774 if (IS_ERR(drm_new_conn_state)) {
5775 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5776 goto fail;
5777 }
19f89e23 5778
4b9674e5
LL
5779 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5780 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 5781
02d35a67
JFZ
5782 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5783 goto skip_modeset;
5784
4b9674e5
LL
5785 new_stream = create_stream_for_sink(aconnector,
5786 &new_crtc_state->mode,
5787 dm_new_conn_state,
5788 dm_old_crtc_state->stream);
19f89e23 5789
4b9674e5
LL
5790 /*
5791 * we can have no stream on ACTION_SET if a display
5792 * was disconnected during S3, in this case it is not an
5793 * error, the OS will be updated after detection, and
5794 * will do the right thing on next atomic commit
5795 */
19f89e23 5796
4b9674e5
LL
5797 if (!new_stream) {
5798 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5799 __func__, acrtc->base.base.id);
5800 ret = -ENOMEM;
5801 goto fail;
5802 }
e7b07cee 5803
4b9674e5 5804 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 5805
4b9674e5
LL
5806 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5807 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5808 new_crtc_state->mode_changed = false;
5809 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5810 new_crtc_state->mode_changed);
62f55537 5811 }
4b9674e5 5812 }
b830ebc9 5813
02d35a67 5814 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
5815 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5816 goto skip_modeset;
e7b07cee 5817
4b9674e5
LL
5818 DRM_DEBUG_DRIVER(
5819 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5820 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5821 "connectors_changed:%d\n",
5822 acrtc->crtc_id,
5823 new_crtc_state->enable,
5824 new_crtc_state->active,
5825 new_crtc_state->planes_changed,
5826 new_crtc_state->mode_changed,
5827 new_crtc_state->active_changed,
5828 new_crtc_state->connectors_changed);
62f55537 5829
4b9674e5
LL
5830 /* Remove stream for any changed/disabled CRTC */
5831 if (!enable) {
62f55537 5832
4b9674e5
LL
5833 if (!dm_old_crtc_state->stream)
5834 goto skip_modeset;
eb3dc897 5835
4b9674e5
LL
5836 ret = dm_atomic_get_state(state, &dm_state);
5837 if (ret)
5838 goto fail;
e7b07cee 5839
4b9674e5
LL
5840 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5841 crtc->base.id);
62f55537 5842
4b9674e5
LL
5843 /* i.e. reset mode */
5844 if (dc_remove_stream_from_ctx(
5845 dm->dc,
5846 dm_state->context,
5847 dm_old_crtc_state->stream) != DC_OK) {
5848 ret = -EINVAL;
5849 goto fail;
5850 }
62f55537 5851
4b9674e5
LL
5852 dc_stream_release(dm_old_crtc_state->stream);
5853 dm_new_crtc_state->stream = NULL;
bb47de73 5854
4b9674e5 5855 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 5856
4b9674e5 5857 *lock_and_validation_needed = true;
62f55537 5858
4b9674e5
LL
5859 } else {/* Add stream for any updated/enabled CRTC */
5860 /*
5861 * Quick fix to prevent NULL pointer on new_stream when
5862 * added MST connectors not found in existing crtc_state in the chained mode
5863 * TODO: need to dig out the root cause of that
5864 */
5865 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5866 goto skip_modeset;
62f55537 5867
4b9674e5
LL
5868 if (modereset_required(new_crtc_state))
5869 goto skip_modeset;
62f55537 5870
4b9674e5
LL
5871 if (modeset_required(new_crtc_state, new_stream,
5872 dm_old_crtc_state->stream)) {
62f55537 5873
4b9674e5 5874 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 5875
4b9674e5
LL
5876 ret = dm_atomic_get_state(state, &dm_state);
5877 if (ret)
5878 goto fail;
27b3f4fc 5879
4b9674e5 5880 dm_new_crtc_state->stream = new_stream;
62f55537 5881
4b9674e5 5882 dc_stream_retain(new_stream);
1dc90497 5883
4b9674e5
LL
5884 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5885 crtc->base.id);
1dc90497 5886
4b9674e5
LL
5887 if (dc_add_stream_to_ctx(
5888 dm->dc,
5889 dm_state->context,
5890 dm_new_crtc_state->stream) != DC_OK) {
5891 ret = -EINVAL;
5892 goto fail;
9b690ef3
BL
5893 }
5894
4b9674e5
LL
5895 *lock_and_validation_needed = true;
5896 }
5897 }
e277adc5 5898
4b9674e5
LL
5899skip_modeset:
5900 /* Release extra reference */
5901 if (new_stream)
5902 dc_stream_release(new_stream);
e277adc5 5903
4b9674e5
LL
5904 /*
5905 * We want to do dc stream updates that do not require a
5906 * full modeset below.
5907 */
5908 if (!(enable && aconnector && new_crtc_state->enable &&
5909 new_crtc_state->active))
5910 return 0;
5911 /*
5912 * Given above conditions, the dc state cannot be NULL because:
5913 * 1. We're in the process of enabling CRTCs (just been added
5914 * to the dc context, or already is on the context)
5915 * 2. Has a valid connector attached, and
5916 * 3. Is currently active and enabled.
5917 * => The dc stream state currently exists.
5918 */
5919 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 5920
4b9674e5
LL
5921 /* Scaling or underscan settings */
5922 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5923 update_stream_scaling_settings(
5924 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 5925
b05e2c5e
DF
5926 /* ABM settings */
5927 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5928
4b9674e5
LL
5929 /*
5930 * Color management settings. We also update color properties
5931 * when a modeset is needed, to ensure it gets reprogrammed.
5932 */
5933 if (dm_new_crtc_state->base.color_mgmt_changed ||
5934 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5935 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5936 if (ret)
5937 goto fail;
5938 amdgpu_dm_set_ctm(dm_new_crtc_state);
62f55537 5939 }
e7b07cee 5940
4b9674e5
LL
5941 /* Update Freesync settings. */
5942 get_freesync_config_for_crtc(dm_new_crtc_state,
5943 dm_new_conn_state);
5944
62f55537 5945 return ret;
9635b754
DS
5946
5947fail:
5948 if (new_stream)
5949 dc_stream_release(new_stream);
5950 return ret;
62f55537 5951}
9b690ef3 5952
9e869063
LL
5953static int dm_update_plane_state(struct dc *dc,
5954 struct drm_atomic_state *state,
5955 struct drm_plane *plane,
5956 struct drm_plane_state *old_plane_state,
5957 struct drm_plane_state *new_plane_state,
5958 bool enable,
5959 bool *lock_and_validation_needed)
62f55537 5960{
eb3dc897
NK
5961
5962 struct dm_atomic_state *dm_state = NULL;
62f55537 5963 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 5964 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 5965 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 5966 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
62f55537
AG
5967 /* TODO return page_flip_needed() function */
5968 bool pflip_needed = !state->allow_modeset;
5969 int ret = 0;
e7b07cee 5970
9b690ef3 5971
9e869063
LL
5972 new_plane_crtc = new_plane_state->crtc;
5973 old_plane_crtc = old_plane_state->crtc;
5974 dm_new_plane_state = to_dm_plane_state(new_plane_state);
5975 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 5976
9e869063
LL
5977 /*TODO Implement atomic check for cursor plane */
5978 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5979 return 0;
9b690ef3 5980
9e869063
LL
5981 /* Remove any changed/removed planes */
5982 if (!enable) {
5983 if (pflip_needed &&
5984 plane->type != DRM_PLANE_TYPE_OVERLAY)
5985 return 0;
a7b06724 5986
9e869063
LL
5987 if (!old_plane_crtc)
5988 return 0;
62f55537 5989
9e869063
LL
5990 old_crtc_state = drm_atomic_get_old_crtc_state(
5991 state, old_plane_crtc);
5992 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 5993
9e869063
LL
5994 if (!dm_old_crtc_state->stream)
5995 return 0;
62f55537 5996
9e869063
LL
5997 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5998 plane->base.id, old_plane_crtc->base.id);
9b690ef3 5999
9e869063
LL
6000 ret = dm_atomic_get_state(state, &dm_state);
6001 if (ret)
6002 return ret;
eb3dc897 6003
9e869063
LL
6004 if (!dc_remove_plane_from_context(
6005 dc,
6006 dm_old_crtc_state->stream,
6007 dm_old_plane_state->dc_state,
6008 dm_state->context)) {
62f55537 6009
9e869063
LL
6010 ret = EINVAL;
6011 return ret;
6012 }
e7b07cee 6013
9b690ef3 6014
9e869063
LL
6015 dc_plane_state_release(dm_old_plane_state->dc_state);
6016 dm_new_plane_state->dc_state = NULL;
1dc90497 6017
9e869063 6018 *lock_and_validation_needed = true;
1dc90497 6019
9e869063
LL
6020 } else { /* Add new planes */
6021 struct dc_plane_state *dc_new_plane_state;
1dc90497 6022
9e869063
LL
6023 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
6024 return 0;
e7b07cee 6025
9e869063
LL
6026 if (!new_plane_crtc)
6027 return 0;
e7b07cee 6028
9e869063
LL
6029 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
6030 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 6031
9e869063
LL
6032 if (!dm_new_crtc_state->stream)
6033 return 0;
62f55537 6034
9e869063
LL
6035 if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
6036 return 0;
62f55537 6037
9e869063 6038 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 6039
9e869063
LL
6040 dc_new_plane_state = dc_create_plane_state(dc);
6041 if (!dc_new_plane_state)
6042 return -ENOMEM;
62f55537 6043
9e869063
LL
6044 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
6045 plane->base.id, new_plane_crtc->base.id);
8c45c5db 6046
9e869063
LL
6047 ret = fill_plane_attributes(
6048 new_plane_crtc->dev->dev_private,
6049 dc_new_plane_state,
6050 new_plane_state,
6051 new_crtc_state);
6052 if (ret) {
6053 dc_plane_state_release(dc_new_plane_state);
6054 return ret;
6055 }
62f55537 6056
9e869063
LL
6057 ret = dm_atomic_get_state(state, &dm_state);
6058 if (ret) {
6059 dc_plane_state_release(dc_new_plane_state);
6060 return ret;
6061 }
eb3dc897 6062
9e869063
LL
6063 /*
6064 * Any atomic check errors that occur after this will
6065 * not need a release. The plane state will be attached
6066 * to the stream, and therefore part of the atomic
6067 * state. It'll be released when the atomic state is
6068 * cleaned.
6069 */
6070 if (!dc_add_plane_to_context(
6071 dc,
6072 dm_new_crtc_state->stream,
6073 dc_new_plane_state,
6074 dm_state->context)) {
62f55537 6075
9e869063
LL
6076 dc_plane_state_release(dc_new_plane_state);
6077 return -EINVAL;
6078 }
8c45c5db 6079
9e869063 6080 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 6081
9e869063
LL
6082 /* Tell DC to do a full surface update every time there
6083 * is a plane change. Inefficient, but works for now.
6084 */
6085 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
6086
6087 *lock_and_validation_needed = true;
62f55537 6088 }
e7b07cee
HW
6089
6090
62f55537
AG
6091 return ret;
6092}
a87fa993 6093
eb3dc897
NK
6094static int
6095dm_determine_update_type_for_commit(struct dc *dc,
6096 struct drm_atomic_state *state,
6097 enum surface_update_type *out_type)
6098{
6099 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
6100 int i, j, num_plane, ret = 0;
a87fa993
BL
6101 struct drm_plane_state *old_plane_state, *new_plane_state;
6102 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
6103 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6104 struct drm_plane *plane;
6105
6106 struct drm_crtc *crtc;
6107 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
6108 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
6109 struct dc_stream_status *status = NULL;
6110
fe96b99d
GS
6111 struct dc_surface_update *updates;
6112 struct dc_plane_state *surface;
a87fa993
BL
6113 enum surface_update_type update_type = UPDATE_TYPE_FAST;
6114
fe96b99d
GS
6115 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
6116 surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
6117
4f712911
BL
6118 if (!updates || !surface) {
6119 DRM_ERROR("Plane or surface update failed to allocate");
6120 /* Set type to FULL to avoid crashing in DC*/
6121 update_type = UPDATE_TYPE_FULL;
eb3dc897 6122 goto cleanup;
4f712911 6123 }
a87fa993
BL
6124
6125 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
c448a53a
NK
6126 struct dc_stream_update stream_update = { 0 };
6127
a87fa993
BL
6128 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6129 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
6130 num_plane = 0;
6131
6836d239
NK
6132 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
6133 update_type = UPDATE_TYPE_FULL;
6134 goto cleanup;
6135 }
a87fa993 6136
6836d239 6137 if (!new_dm_crtc_state->stream)
c744e974 6138 continue;
eb3dc897 6139
c744e974
NK
6140 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
6141 new_plane_crtc = new_plane_state->crtc;
6142 old_plane_crtc = old_plane_state->crtc;
6143 new_dm_plane_state = to_dm_plane_state(new_plane_state);
6144 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 6145
c744e974
NK
6146 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6147 continue;
eb3dc897 6148
6836d239
NK
6149 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
6150 update_type = UPDATE_TYPE_FULL;
6151 goto cleanup;
6152 }
6153
c744e974
NK
6154 if (!state->allow_modeset)
6155 continue;
a87fa993 6156
c744e974
NK
6157 if (crtc != new_plane_crtc)
6158 continue;
6159
6160 updates[num_plane].surface = &surface[num_plane];
6161
6162 if (new_crtc_state->mode_changed) {
6163 updates[num_plane].surface->src_rect =
6164 new_dm_plane_state->dc_state->src_rect;
6165 updates[num_plane].surface->dst_rect =
6166 new_dm_plane_state->dc_state->dst_rect;
6167 updates[num_plane].surface->rotation =
6168 new_dm_plane_state->dc_state->rotation;
6169 updates[num_plane].surface->in_transfer_func =
6170 new_dm_plane_state->dc_state->in_transfer_func;
6171 stream_update.dst = new_dm_crtc_state->stream->dst;
6172 stream_update.src = new_dm_crtc_state->stream->src;
6173 }
6174
6175 if (new_crtc_state->color_mgmt_changed) {
6176 updates[num_plane].gamma =
6177 new_dm_plane_state->dc_state->gamma_correction;
6178 updates[num_plane].in_transfer_func =
6179 new_dm_plane_state->dc_state->in_transfer_func;
6180 stream_update.gamut_remap =
6181 &new_dm_crtc_state->stream->gamut_remap_matrix;
6182 stream_update.out_transfer_func =
6183 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
6184 }
6185
c744e974
NK
6186 num_plane++;
6187 }
6188
6189 if (num_plane == 0)
6190 continue;
6191
6192 ret = dm_atomic_get_state(state, &dm_state);
6193 if (ret)
6194 goto cleanup;
6195
6196 old_dm_state = dm_atomic_get_old_state(state);
6197 if (!old_dm_state) {
6198 ret = -EINVAL;
6199 goto cleanup;
6200 }
6201
6202 status = dc_stream_get_status_from_state(old_dm_state->context,
6203 new_dm_crtc_state->stream);
6204
6205 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
6206 &stream_update, status);
6207
6208 if (update_type > UPDATE_TYPE_MED) {
a87fa993 6209 update_type = UPDATE_TYPE_FULL;
eb3dc897 6210 goto cleanup;
a87fa993
BL
6211 }
6212 }
6213
eb3dc897 6214cleanup:
a87fa993
BL
6215 kfree(updates);
6216 kfree(surface);
6217
eb3dc897
NK
6218 *out_type = update_type;
6219 return ret;
a87fa993 6220}
62f55537 6221
b8592b48
LL
6222/**
6223 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
6224 * @dev: The DRM device
6225 * @state: The atomic state to commit
6226 *
6227 * Validate that the given atomic state is programmable by DC into hardware.
6228 * This involves constructing a &struct dc_state reflecting the new hardware
6229 * state we wish to commit, then querying DC to see if it is programmable. It's
6230 * important not to modify the existing DC state. Otherwise, atomic_check
6231 * may unexpectedly commit hardware changes.
6232 *
6233 * When validating the DC state, it's important that the right locks are
6234 * acquired. For full updates case which removes/adds/updates streams on one
6235 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
6236 * that any such full update commit will wait for completion of any outstanding
6237 * flip using DRMs synchronization events. See
6238 * dm_determine_update_type_for_commit()
6239 *
6240 * Note that DM adds the affected connectors for all CRTCs in state, when that
6241 * might not seem necessary. This is because DC stream creation requires the
6242 * DC sink, which is tied to the DRM connector state. Cleaning this up should
6243 * be possible but non-trivial - a possible TODO item.
6244 *
6245 * Return: -Error code if validation failed.
6246 */
7578ecda
AD
6247static int amdgpu_dm_atomic_check(struct drm_device *dev,
6248 struct drm_atomic_state *state)
62f55537 6249{
62f55537 6250 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 6251 struct dm_atomic_state *dm_state = NULL;
62f55537 6252 struct dc *dc = adev->dm.dc;
62f55537 6253 struct drm_connector *connector;
c2cea706 6254 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 6255 struct drm_crtc *crtc;
fc9e9920 6256 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
6257 struct drm_plane *plane;
6258 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
6259 enum surface_update_type update_type = UPDATE_TYPE_FAST;
6260 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
6261
1e88ad0a 6262 int ret, i;
e7b07cee 6263
62f55537
AG
6264 /*
6265 * This bool will be set for true for any modeset/reset
6266 * or plane update which implies non fast surface update.
6267 */
6268 bool lock_and_validation_needed = false;
6269
6270 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
6271 if (ret)
6272 goto fail;
62f55537 6273
1e88ad0a
S
6274 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6275 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 6276 !new_crtc_state->color_mgmt_changed &&
a93587b3 6277 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 6278 continue;
7bef1af3 6279
1e88ad0a
S
6280 if (!new_crtc_state->enable)
6281 continue;
fc9e9920 6282
1e88ad0a
S
6283 ret = drm_atomic_add_affected_connectors(state, crtc);
6284 if (ret)
6285 return ret;
fc9e9920 6286
1e88ad0a
S
6287 ret = drm_atomic_add_affected_planes(state, crtc);
6288 if (ret)
6289 goto fail;
e7b07cee
HW
6290 }
6291
2d9e6431
NK
6292 /*
6293 * Add all primary and overlay planes on the CRTC to the state
6294 * whenever a plane is enabled to maintain correct z-ordering
6295 * and to enable fast surface updates.
6296 */
6297 drm_for_each_crtc(crtc, dev) {
6298 bool modified = false;
6299
6300 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6301 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6302 continue;
6303
6304 if (new_plane_state->crtc == crtc ||
6305 old_plane_state->crtc == crtc) {
6306 modified = true;
6307 break;
6308 }
6309 }
6310
6311 if (!modified)
6312 continue;
6313
6314 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
6315 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6316 continue;
6317
6318 new_plane_state =
6319 drm_atomic_get_plane_state(state, plane);
6320
6321 if (IS_ERR(new_plane_state)) {
6322 ret = PTR_ERR(new_plane_state);
6323 goto fail;
6324 }
6325 }
6326 }
6327
62f55537 6328 /* Remove exiting planes if they are modified */
9e869063
LL
6329 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6330 ret = dm_update_plane_state(dc, state, plane,
6331 old_plane_state,
6332 new_plane_state,
6333 false,
6334 &lock_and_validation_needed);
6335 if (ret)
6336 goto fail;
62f55537
AG
6337 }
6338
6339 /* Disable all crtcs which require disable */
4b9674e5
LL
6340 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6341 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6342 old_crtc_state,
6343 new_crtc_state,
6344 false,
6345 &lock_and_validation_needed);
6346 if (ret)
6347 goto fail;
62f55537
AG
6348 }
6349
6350 /* Enable all crtcs which require enable */
4b9674e5
LL
6351 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6352 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6353 old_crtc_state,
6354 new_crtc_state,
6355 true,
6356 &lock_and_validation_needed);
6357 if (ret)
6358 goto fail;
62f55537
AG
6359 }
6360
6361 /* Add new/modified planes */
9e869063
LL
6362 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6363 ret = dm_update_plane_state(dc, state, plane,
6364 old_plane_state,
6365 new_plane_state,
6366 true,
6367 &lock_and_validation_needed);
6368 if (ret)
6369 goto fail;
62f55537
AG
6370 }
6371
b349f76e
ES
6372 /* Run this here since we want to validate the streams we created */
6373 ret = drm_atomic_helper_check_planes(dev, state);
6374 if (ret)
6375 goto fail;
62f55537 6376
ebdd27e1 6377 /* Check scaling and underscan changes*/
1f6010a9 6378 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
6379 * new stream into context w\o causing full reset. Need to
6380 * decide how to handle.
6381 */
c2cea706 6382 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
6383 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6384 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6385 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
6386
6387 /* Skip any modesets/resets */
0bc9706d
LSL
6388 if (!acrtc || drm_atomic_crtc_needs_modeset(
6389 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
6390 continue;
6391
b830ebc9 6392 /* Skip any thing not scale or underscan changes */
54d76575 6393 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
6394 continue;
6395
a87fa993 6396 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
6397 lock_and_validation_needed = true;
6398 }
6399
eb3dc897
NK
6400 ret = dm_determine_update_type_for_commit(dc, state, &update_type);
6401 if (ret)
6402 goto fail;
a87fa993
BL
6403
6404 if (overall_update_type < update_type)
6405 overall_update_type = update_type;
6406
6407 /*
6408 * lock_and_validation_needed was an old way to determine if we need to set
6409 * the global lock. Leaving it in to check if we broke any corner cases
6410 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6411 * lock_and_validation_needed false = UPDATE_TYPE_FAST
6412 */
6413 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6414 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6415 else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
6416 WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
e7b07cee 6417
e7b07cee 6418
a87fa993 6419 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
6420 ret = dm_atomic_get_state(state, &dm_state);
6421 if (ret)
6422 goto fail;
e7b07cee
HW
6423
6424 ret = do_aquire_global_lock(dev, state);
6425 if (ret)
6426 goto fail;
1dc90497 6427
e750d56d 6428 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
e7b07cee
HW
6429 ret = -EINVAL;
6430 goto fail;
6431 }
674e78ac
NK
6432 } else if (state->legacy_cursor_update) {
6433 /*
6434 * This is a fast cursor update coming from the plane update
6435 * helper, check if it can be done asynchronously for better
6436 * performance.
6437 */
6438 state->async_update = !drm_atomic_helper_async_check(dev, state);
e7b07cee
HW
6439 }
6440
6441 /* Must be success */
6442 WARN_ON(ret);
6443 return ret;
6444
6445fail:
6446 if (ret == -EDEADLK)
01e28f9c 6447 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 6448 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 6449 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 6450 else
01e28f9c 6451 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
6452
6453 return ret;
6454}
6455
3ee6b26b
AD
6456static bool is_dp_capable_without_timing_msa(struct dc *dc,
6457 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
6458{
6459 uint8_t dpcd_data;
6460 bool capable = false;
6461
c84dec2f 6462 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
6463 dm_helpers_dp_read_dpcd(
6464 NULL,
c84dec2f 6465 amdgpu_dm_connector->dc_link,
e7b07cee
HW
6466 DP_DOWN_STREAM_PORT_COUNT,
6467 &dpcd_data,
6468 sizeof(dpcd_data))) {
6469 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
6470 }
6471
6472 return capable;
6473}
98e6436d
AK
6474void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
6475 struct edid *edid)
e7b07cee
HW
6476{
6477 int i;
e7b07cee
HW
6478 bool edid_check_required;
6479 struct detailed_timing *timing;
6480 struct detailed_non_pixel *data;
6481 struct detailed_data_monitor_range *range;
c84dec2f
HW
6482 struct amdgpu_dm_connector *amdgpu_dm_connector =
6483 to_amdgpu_dm_connector(connector);
bb47de73 6484 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
6485
6486 struct drm_device *dev = connector->dev;
6487 struct amdgpu_device *adev = dev->dev_private;
bb47de73 6488 bool freesync_capable = false;
b830ebc9 6489
8218d7f1
HW
6490 if (!connector->state) {
6491 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 6492 goto update;
8218d7f1
HW
6493 }
6494
98e6436d
AK
6495 if (!edid) {
6496 dm_con_state = to_dm_connector_state(connector->state);
6497
6498 amdgpu_dm_connector->min_vfreq = 0;
6499 amdgpu_dm_connector->max_vfreq = 0;
6500 amdgpu_dm_connector->pixel_clock_mhz = 0;
6501
bb47de73 6502 goto update;
98e6436d
AK
6503 }
6504
8218d7f1
HW
6505 dm_con_state = to_dm_connector_state(connector->state);
6506
e7b07cee 6507 edid_check_required = false;
c84dec2f 6508 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 6509 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 6510 goto update;
e7b07cee
HW
6511 }
6512 if (!adev->dm.freesync_module)
bb47de73 6513 goto update;
e7b07cee
HW
6514 /*
6515 * if edid non zero restrict freesync only for dp and edp
6516 */
6517 if (edid) {
c84dec2f
HW
6518 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
6519 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
6520 edid_check_required = is_dp_capable_without_timing_msa(
6521 adev->dm.dc,
c84dec2f 6522 amdgpu_dm_connector);
e7b07cee
HW
6523 }
6524 }
e7b07cee
HW
6525 if (edid_check_required == true && (edid->version > 1 ||
6526 (edid->version == 1 && edid->revision > 1))) {
6527 for (i = 0; i < 4; i++) {
6528
6529 timing = &edid->detailed_timings[i];
6530 data = &timing->data.other_data;
6531 range = &data->data.range;
6532 /*
6533 * Check if monitor has continuous frequency mode
6534 */
6535 if (data->type != EDID_DETAIL_MONITOR_RANGE)
6536 continue;
6537 /*
6538 * Check for flag range limits only. If flag == 1 then
6539 * no additional timing information provided.
6540 * Default GTF, GTF Secondary curve and CVT are not
6541 * supported
6542 */
6543 if (range->flags != 1)
6544 continue;
6545
c84dec2f
HW
6546 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
6547 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
6548 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
6549 range->pixel_clock_mhz * 10;
6550 break;
6551 }
6552
c84dec2f 6553 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
6554 amdgpu_dm_connector->min_vfreq > 10) {
6555
bb47de73 6556 freesync_capable = true;
e7b07cee
HW
6557 }
6558 }
bb47de73
NK
6559
6560update:
6561 if (dm_con_state)
6562 dm_con_state->freesync_capable = freesync_capable;
6563
6564 if (connector->vrr_capable_property)
6565 drm_connector_set_vrr_capable_property(connector,
6566 freesync_capable);
e7b07cee
HW
6567}
6568