drm/amd/display: Fix deadlock when flushing irq
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
1dc90497 28#include "dc/inc/core_types.h"
4562236b
HW
29
30#include "vid.h"
31#include "amdgpu.h"
a49dcb88 32#include "amdgpu_display.h"
4562236b
HW
33#include "atom.h"
34#include "amdgpu_dm.h"
e7b07cee 35#include "amdgpu_pm.h"
4562236b
HW
36
37#include "amd_shared.h"
38#include "amdgpu_dm_irq.h"
39#include "dm_helpers.h"
e7b07cee
HW
40#include "dm_services_types.h"
41#include "amdgpu_dm_mst_types.h"
4562236b
HW
42
43#include "ivsrcid/ivsrcid_vislands30.h"
44
45#include <linux/module.h>
46#include <linux/moduleparam.h>
47#include <linux/version.h>
e7b07cee 48#include <linux/types.h>
4562236b 49
e7b07cee 50#include <drm/drmP.h>
4562236b
HW
51#include <drm/drm_atomic.h>
52#include <drm/drm_atomic_helper.h>
53#include <drm/drm_dp_mst_helper.h>
e7b07cee
HW
54#include <drm/drm_fb_helper.h>
55#include <drm/drm_edid.h>
4562236b
HW
56
57#include "modules/inc/mod_freesync.h"
58
ff5ef992
AD
59#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60#include "ivsrcid/irqsrcs_dcn_1_0.h"
61
ad941f7a
FX
62#include "dcn/dcn_1_0_offset.h"
63#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
64#include "soc15_hw_ip.h"
65#include "vega10_ip_offset.h"
ff5ef992
AD
66
67#include "soc15_common.h"
68#endif
69
e7b07cee
HW
70#include "modules/inc/mod_freesync.h"
71
72#include "i2caux_interface.h"
73
7578ecda
AD
74/* basic init/fini API */
75static int amdgpu_dm_init(struct amdgpu_device *adev);
76static void amdgpu_dm_fini(struct amdgpu_device *adev);
77
78/* initializes drm_device display related structures, based on the information
79 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
80 * drm_encoder, drm_mode_config
81 *
82 * Returns 0 on success
83 */
84static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
85/* removes and deallocates the drm structures, created by the above function */
86static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
87
88static void
89amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
90
91static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
92 struct amdgpu_plane *aplane,
93 unsigned long possible_crtcs);
94static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
95 struct drm_plane *plane,
96 uint32_t link_index);
97static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
98 struct amdgpu_dm_connector *amdgpu_dm_connector,
99 uint32_t link_index,
100 struct amdgpu_encoder *amdgpu_encoder);
101static int amdgpu_dm_encoder_init(struct drm_device *dev,
102 struct amdgpu_encoder *aencoder,
103 uint32_t link_index);
104
105static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
106
107static int amdgpu_dm_atomic_commit(struct drm_device *dev,
108 struct drm_atomic_state *state,
109 bool nonblock);
110
111static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
112
113static int amdgpu_dm_atomic_check(struct drm_device *dev,
114 struct drm_atomic_state *state);
115
116
117
e7b07cee 118
e04a6123 119static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
d4e13b0d
AD
120 DRM_PLANE_TYPE_PRIMARY,
121 DRM_PLANE_TYPE_PRIMARY,
122 DRM_PLANE_TYPE_PRIMARY,
123 DRM_PLANE_TYPE_PRIMARY,
124 DRM_PLANE_TYPE_PRIMARY,
125 DRM_PLANE_TYPE_PRIMARY,
126};
127
e04a6123 128static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
d4e13b0d
AD
129 DRM_PLANE_TYPE_PRIMARY,
130 DRM_PLANE_TYPE_PRIMARY,
131 DRM_PLANE_TYPE_PRIMARY,
132 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
133};
134
e04a6123 135static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
d4e13b0d
AD
136 DRM_PLANE_TYPE_PRIMARY,
137 DRM_PLANE_TYPE_PRIMARY,
138 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
139};
140
4562236b
HW
141/*
142 * dm_vblank_get_counter
143 *
144 * @brief
145 * Get counter for number of vertical blanks
146 *
147 * @param
148 * struct amdgpu_device *adev - [in] desired amdgpu device
149 * int disp_idx - [in] which CRTC to get the counter from
150 *
151 * @return
152 * Counter for vertical blanks
153 */
154static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
155{
156 if (crtc >= adev->mode_info.num_crtc)
157 return 0;
158 else {
159 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
160 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
161 acrtc->base.state);
4562236b 162
da5c47f6
AG
163
164 if (acrtc_state->stream == NULL) {
0971c40e
HW
165 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
166 crtc);
4562236b
HW
167 return 0;
168 }
169
da5c47f6 170 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
171 }
172}
173
174static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 175 u32 *vbl, u32 *position)
4562236b 176{
81c50963
ST
177 uint32_t v_blank_start, v_blank_end, h_position, v_position;
178
4562236b
HW
179 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
180 return -EINVAL;
181 else {
182 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
183 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
184 acrtc->base.state);
4562236b 185
da5c47f6 186 if (acrtc_state->stream == NULL) {
0971c40e
HW
187 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
188 crtc);
4562236b
HW
189 return 0;
190 }
191
81c50963
ST
192 /*
193 * TODO rework base driver to use values directly.
194 * for now parse it back into reg-format
195 */
da5c47f6 196 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
197 &v_blank_start,
198 &v_blank_end,
199 &h_position,
200 &v_position);
201
e806208d
AG
202 *position = v_position | (h_position << 16);
203 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
204 }
205
206 return 0;
207}
208
209static bool dm_is_idle(void *handle)
210{
211 /* XXX todo */
212 return true;
213}
214
215static int dm_wait_for_idle(void *handle)
216{
217 /* XXX todo */
218 return 0;
219}
220
221static bool dm_check_soft_reset(void *handle)
222{
223 return false;
224}
225
226static int dm_soft_reset(void *handle)
227{
228 /* XXX todo */
229 return 0;
230}
231
3ee6b26b
AD
232static struct amdgpu_crtc *
233get_crtc_by_otg_inst(struct amdgpu_device *adev,
234 int otg_inst)
4562236b
HW
235{
236 struct drm_device *dev = adev->ddev;
237 struct drm_crtc *crtc;
238 struct amdgpu_crtc *amdgpu_crtc;
239
240 /*
241 * following if is check inherited from both functions where this one is
242 * used now. Need to be checked why it could happen.
243 */
244 if (otg_inst == -1) {
245 WARN_ON(1);
246 return adev->mode_info.crtcs[0];
247 }
248
249 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
250 amdgpu_crtc = to_amdgpu_crtc(crtc);
251
252 if (amdgpu_crtc->otg_inst == otg_inst)
253 return amdgpu_crtc;
254 }
255
256 return NULL;
257}
258
259static void dm_pflip_high_irq(void *interrupt_params)
260{
4562236b
HW
261 struct amdgpu_crtc *amdgpu_crtc;
262 struct common_irq_params *irq_params = interrupt_params;
263 struct amdgpu_device *adev = irq_params->adev;
264 unsigned long flags;
265
266 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
267
268 /* IRQ could occur when in initial stage */
269 /*TODO work and BO cleanup */
270 if (amdgpu_crtc == NULL) {
271 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
272 return;
273 }
274
275 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
276
277 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
278 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
279 amdgpu_crtc->pflip_status,
280 AMDGPU_FLIP_SUBMITTED,
281 amdgpu_crtc->crtc_id,
282 amdgpu_crtc);
283 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
284 return;
285 }
286
4562236b
HW
287
288 /* wakeup usersapce */
1159898a 289 if (amdgpu_crtc->event) {
753c66c9
MK
290 /* Update to correct count/ts if racing with vblank irq */
291 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
292
54f5499a 293 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
1159898a 294
54f5499a
AG
295 /* page flip completed. clean up */
296 amdgpu_crtc->event = NULL;
1159898a 297
54f5499a
AG
298 } else
299 WARN_ON(1);
4562236b 300
54f5499a 301 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
302 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
303
54f5499a
AG
304 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
305 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
4562236b
HW
306
307 drm_crtc_vblank_put(&amdgpu_crtc->base);
4562236b
HW
308}
309
310static void dm_crtc_high_irq(void *interrupt_params)
311{
312 struct common_irq_params *irq_params = interrupt_params;
313 struct amdgpu_device *adev = irq_params->adev;
314 uint8_t crtc_index = 0;
315 struct amdgpu_crtc *acrtc;
316
b57de80a 317 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b
HW
318
319 if (acrtc)
320 crtc_index = acrtc->crtc_id;
321
322 drm_handle_vblank(adev->ddev, crtc_index);
31aec354 323 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
4562236b
HW
324}
325
326static int dm_set_clockgating_state(void *handle,
327 enum amd_clockgating_state state)
328{
329 return 0;
330}
331
332static int dm_set_powergating_state(void *handle,
333 enum amd_powergating_state state)
334{
335 return 0;
336}
337
338/* Prototypes of private functions */
339static int dm_early_init(void* handle);
340
341static void hotplug_notify_work_func(struct work_struct *work)
342{
343 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
344 struct drm_device *dev = dm->ddev;
345
346 drm_kms_helper_hotplug_event(dev);
347}
348
6ef39a62 349#if defined(CONFIG_DRM_AMD_DC_FBC)
a32e24b4 350/* Allocate memory for FBC compressed data */
3e332d3a 351static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 352{
3e332d3a
RL
353 struct drm_device *dev = connector->dev;
354 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 355 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
356 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
357 struct drm_display_mode *mode;
42e67c3b
RL
358 unsigned long max_size = 0;
359
360 if (adev->dm.dc->fbc_compressor == NULL)
361 return;
a32e24b4 362
3e332d3a 363 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
364 return;
365
3e332d3a
RL
366 if (compressor->bo_ptr)
367 return;
42e67c3b 368
42e67c3b 369
3e332d3a
RL
370 list_for_each_entry(mode, &connector->modes, head) {
371 if (max_size < mode->htotal * mode->vtotal)
372 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
373 }
374
375 if (max_size) {
376 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 377 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 378 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
379
380 if (r)
42e67c3b
RL
381 DRM_ERROR("DM: Failed to initialize FBC\n");
382 else {
383 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
384 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
385 }
386
a32e24b4
RL
387 }
388
389}
390#endif
391
392
4562236b
HW
393/* Init display KMS
394 *
395 * Returns 0 on success
396 */
7578ecda 397static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
398{
399 struct dc_init_data init_data;
400 adev->dm.ddev = adev->ddev;
401 adev->dm.adev = adev;
402
4562236b
HW
403 /* Zero all the fields */
404 memset(&init_data, 0, sizeof(init_data));
405
4562236b
HW
406 if(amdgpu_dm_irq_init(adev)) {
407 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
408 goto error;
409 }
410
411 init_data.asic_id.chip_family = adev->family;
412
413 init_data.asic_id.pci_revision_id = adev->rev_id;
414 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
415
770d13b1 416 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
417 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
418 init_data.asic_id.atombios_base_address =
419 adev->mode_info.atom_context->bios;
420
421 init_data.driver = adev;
422
423 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
424
425 if (!adev->dm.cgs_device) {
426 DRM_ERROR("amdgpu: failed to create cgs device.\n");
427 goto error;
428 }
429
430 init_data.cgs_device = adev->dm.cgs_device;
431
432 adev->dm.dal = NULL;
433
434 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
435
6e227308
HW
436 /*
437 * TODO debug why this doesn't work on Raven
438 */
439 if (adev->flags & AMD_IS_APU &&
440 adev->asic_type >= CHIP_CARRIZO &&
441 adev->asic_type < CHIP_RAVEN)
442 init_data.flags.gpu_vm_support = true;
443
4562236b
HW
444 /* Display Core create. */
445 adev->dm.dc = dc_create(&init_data);
446
423788c7 447 if (adev->dm.dc) {
76121231 448 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 449 } else {
76121231 450 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
451 goto error;
452 }
4562236b
HW
453
454 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
455
456 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
457 if (!adev->dm.freesync_module) {
458 DRM_ERROR(
459 "amdgpu: failed to initialize freesync_module.\n");
460 } else
f1ad2f5e 461 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
462 adev->dm.freesync_module);
463
e277adc5
LSL
464 amdgpu_dm_init_color_mod();
465
4562236b
HW
466 if (amdgpu_dm_initialize_drm_device(adev)) {
467 DRM_ERROR(
468 "amdgpu: failed to initialize sw for display support.\n");
469 goto error;
470 }
471
472 /* Update the actual used number of crtc */
473 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
474
475 /* TODO: Add_display_info? */
476
477 /* TODO use dynamic cursor width */
ce75805e
AG
478 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
479 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
480
481 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
482 DRM_ERROR(
483 "amdgpu: failed to initialize sw for display support.\n");
484 goto error;
485 }
486
f1ad2f5e 487 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
488
489 return 0;
490error:
491 amdgpu_dm_fini(adev);
492
493 return -1;
494}
495
7578ecda 496static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b
HW
497{
498 amdgpu_dm_destroy_drm_device(&adev->dm);
499 /*
500 * TODO: pageflip, vlank interrupt
501 *
502 * amdgpu_dm_irq_fini(adev);
503 */
504
505 if (adev->dm.cgs_device) {
506 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
507 adev->dm.cgs_device = NULL;
508 }
509 if (adev->dm.freesync_module) {
510 mod_freesync_destroy(adev->dm.freesync_module);
511 adev->dm.freesync_module = NULL;
512 }
513 /* DC Destroy TODO: Replace destroy DAL */
21de3396 514 if (adev->dm.dc)
4562236b 515 dc_destroy(&adev->dm.dc);
4562236b
HW
516 return;
517}
518
4562236b
HW
519static int dm_sw_init(void *handle)
520{
521 return 0;
522}
523
524static int dm_sw_fini(void *handle)
525{
526 return 0;
527}
528
7abcf6b5 529static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 530{
c84dec2f 531 struct amdgpu_dm_connector *aconnector;
4562236b 532 struct drm_connector *connector;
7abcf6b5 533 int ret = 0;
4562236b
HW
534
535 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
536
537 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
b349f76e 538 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
539 if (aconnector->dc_link->type == dc_connection_mst_branch &&
540 aconnector->mst_mgr.aux) {
f1ad2f5e 541 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
7abcf6b5
AG
542 aconnector, aconnector->base.base.id);
543
544 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
545 if (ret < 0) {
546 DRM_ERROR("DM_MST: Failed to start MST\n");
547 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
548 return ret;
4562236b 549 }
7abcf6b5 550 }
4562236b
HW
551 }
552
553 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
554 return ret;
555}
556
557static int dm_late_init(void *handle)
558{
42e67c3b 559 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 560
42e67c3b 561 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
562}
563
564static void s3_handle_mst(struct drm_device *dev, bool suspend)
565{
c84dec2f 566 struct amdgpu_dm_connector *aconnector;
4562236b
HW
567 struct drm_connector *connector;
568
569 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
570
571 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
c84dec2f 572 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
573 if (aconnector->dc_link->type == dc_connection_mst_branch &&
574 !aconnector->mst_port) {
575
576 if (suspend)
577 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
578 else
579 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
580 }
581 }
582
583 drm_modeset_unlock(&dev->mode_config.connection_mutex);
584}
585
586static int dm_hw_init(void *handle)
587{
588 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
589 /* Create DAL display manager */
590 amdgpu_dm_init(adev);
4562236b
HW
591 amdgpu_dm_hpd_init(adev);
592
4562236b
HW
593 return 0;
594}
595
596static int dm_hw_fini(void *handle)
597{
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
599
600 amdgpu_dm_hpd_fini(adev);
601
602 amdgpu_dm_irq_fini(adev);
21de3396 603 amdgpu_dm_fini(adev);
4562236b
HW
604 return 0;
605}
606
607static int dm_suspend(void *handle)
608{
609 struct amdgpu_device *adev = handle;
610 struct amdgpu_display_manager *dm = &adev->dm;
611 int ret = 0;
4562236b
HW
612
613 s3_handle_mst(adev->ddev, true);
614
4562236b
HW
615 amdgpu_dm_irq_suspend(adev);
616
0a214e2f 617 WARN_ON(adev->dm.cached_state);
a3621485
AG
618 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
619
32f5062d 620 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
621
622 return ret;
623}
624
1daf8c63
AD
625static struct amdgpu_dm_connector *
626amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
627 struct drm_crtc *crtc)
4562236b
HW
628{
629 uint32_t i;
c2cea706 630 struct drm_connector_state *new_con_state;
4562236b
HW
631 struct drm_connector *connector;
632 struct drm_crtc *crtc_from_state;
633
c2cea706
LSL
634 for_each_new_connector_in_state(state, connector, new_con_state, i) {
635 crtc_from_state = new_con_state->crtc;
4562236b
HW
636
637 if (crtc_from_state == crtc)
c84dec2f 638 return to_amdgpu_dm_connector(connector);
4562236b
HW
639 }
640
641 return NULL;
642}
643
4562236b
HW
644static int dm_resume(void *handle)
645{
646 struct amdgpu_device *adev = handle;
647 struct amdgpu_display_manager *dm = &adev->dm;
15b9bc9a 648 int ret = 0;
4562236b
HW
649
650 /* power on hardware */
1fb0c9cc 651 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
4562236b 652
15b9bc9a
ML
653 ret = amdgpu_dm_display_resume(adev);
654 return ret;
4562236b
HW
655}
656
1ecfc3da 657int amdgpu_dm_display_resume(struct amdgpu_device *adev)
4562236b
HW
658{
659 struct drm_device *ddev = adev->ddev;
660 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 661 struct amdgpu_dm_connector *aconnector;
4562236b 662 struct drm_connector *connector;
4562236b 663 struct drm_crtc *crtc;
c2cea706 664 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
665 struct dm_crtc_state *dm_new_crtc_state;
666 struct drm_plane *plane;
667 struct drm_plane_state *new_plane_state;
668 struct dm_plane_state *dm_new_plane_state;
669
a3621485
AG
670 int ret = 0;
671 int i;
4562236b
HW
672
673 /* program HPD filter */
674 dc_resume(dm->dc);
675
676 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
677 s3_handle_mst(ddev, false);
678
679 /*
680 * early enable HPD Rx IRQ, should be done before set mode as short
681 * pulse interrupts are used for MST
682 */
683 amdgpu_dm_irq_resume_early(adev);
684
4562236b
HW
685 /* Do detection*/
686 list_for_each_entry(connector,
687 &ddev->mode_config.connector_list, head) {
c84dec2f 688 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
689
690 /*
691 * this is the case when traversing through already created
692 * MST connectors, should be skipped
693 */
694 if (aconnector->mst_port)
695 continue;
696
03ea364c 697 mutex_lock(&aconnector->hpd_lock);
8f38b66c 698 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
699
700 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
701 aconnector->fake_enable = false;
702
4562236b
HW
703 aconnector->dc_sink = NULL;
704 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 705 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
706 }
707
a3621485 708 /* Force mode set in atomic comit */
c2cea706
LSL
709 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
710 new_crtc_state->active_changed = true;
4f346e65 711
fcb4019e
LSL
712 /*
713 * atomic_check is expected to create the dc states. We need to release
714 * them here, since they were duplicated as part of the suspend
715 * procedure.
716 */
717 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
718 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
719 if (dm_new_crtc_state->stream) {
720 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
721 dc_stream_release(dm_new_crtc_state->stream);
722 dm_new_crtc_state->stream = NULL;
723 }
724 }
725
726 for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
727 dm_new_plane_state = to_dm_plane_state(new_plane_state);
728 if (dm_new_plane_state->dc_state) {
729 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
730 dc_plane_state_release(dm_new_plane_state->dc_state);
731 dm_new_plane_state->dc_state = NULL;
732 }
733 }
734
a3621485 735 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
4562236b 736
0a214e2f
AG
737 adev->dm.cached_state = NULL;
738
9faa4237 739 amdgpu_dm_irq_resume_late(adev);
4562236b
HW
740
741 return ret;
742}
743
744static const struct amd_ip_funcs amdgpu_dm_funcs = {
745 .name = "dm",
746 .early_init = dm_early_init,
7abcf6b5 747 .late_init = dm_late_init,
4562236b
HW
748 .sw_init = dm_sw_init,
749 .sw_fini = dm_sw_fini,
750 .hw_init = dm_hw_init,
751 .hw_fini = dm_hw_fini,
752 .suspend = dm_suspend,
753 .resume = dm_resume,
754 .is_idle = dm_is_idle,
755 .wait_for_idle = dm_wait_for_idle,
756 .check_soft_reset = dm_check_soft_reset,
757 .soft_reset = dm_soft_reset,
758 .set_clockgating_state = dm_set_clockgating_state,
759 .set_powergating_state = dm_set_powergating_state,
760};
761
762const struct amdgpu_ip_block_version dm_ip_block =
763{
764 .type = AMD_IP_BLOCK_TYPE_DCE,
765 .major = 1,
766 .minor = 0,
767 .rev = 0,
768 .funcs = &amdgpu_dm_funcs,
769};
770
ca3268c4 771
7578ecda 772static struct drm_atomic_state *
ca3268c4
HW
773dm_atomic_state_alloc(struct drm_device *dev)
774{
775 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
776
1dc90497 777 if (!state)
ca3268c4 778 return NULL;
1dc90497
AG
779
780 if (drm_atomic_state_init(dev, &state->base) < 0)
781 goto fail;
782
ca3268c4 783 return &state->base;
1dc90497
AG
784
785fail:
786 kfree(state);
787 return NULL;
ca3268c4
HW
788}
789
0a323b84
AG
790static void
791dm_atomic_state_clear(struct drm_atomic_state *state)
792{
793 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
794
795 if (dm_state->context) {
608ac7bb 796 dc_release_state(dm_state->context);
0a323b84
AG
797 dm_state->context = NULL;
798 }
799
800 drm_atomic_state_default_clear(state);
801}
802
803static void
804dm_atomic_state_alloc_free(struct drm_atomic_state *state)
805{
806 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
807 drm_atomic_state_default_release(state);
808 kfree(dm_state);
809}
810
b3663f70 811static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 812 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 813 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 814 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 815 .atomic_commit = amdgpu_dm_atomic_commit,
ca3268c4 816 .atomic_state_alloc = dm_atomic_state_alloc,
0a323b84
AG
817 .atomic_state_clear = dm_atomic_state_clear,
818 .atomic_state_free = dm_atomic_state_alloc_free
54f5499a
AG
819};
820
821static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
822 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
823};
824
7578ecda 825static void
3ee6b26b 826amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
4562236b
HW
827{
828 struct drm_connector *connector = &aconnector->base;
829 struct drm_device *dev = connector->dev;
b73a22d3 830 struct dc_sink *sink;
4562236b
HW
831
832 /* MST handled by drm_mst framework */
833 if (aconnector->mst_mgr.mst_state == true)
834 return;
835
836
837 sink = aconnector->dc_link->local_sink;
838
839 /* Edid mgmt connector gets first update only in mode_valid hook and then
840 * the connector sink is set to either fake or physical sink depends on link status.
841 * don't do it here if u are during boot
842 */
843 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
844 && aconnector->dc_em_sink) {
845
ab2541b6 846 /* For S3 resume with headless use eml_sink to fake stream
4562236b
HW
847 * because on resume connecotr->sink is set ti NULL
848 */
849 mutex_lock(&dev->mode_config.mutex);
850
851 if (sink) {
922aa1e1 852 if (aconnector->dc_sink) {
4562236b
HW
853 amdgpu_dm_remove_sink_from_freesync_module(
854 connector);
922aa1e1
AG
855 /* retain and release bellow are used for
856 * bump up refcount for sink because the link don't point
857 * to it anymore after disconnect so on next crtc to connector
858 * reshuffle by UMD we will get into unwanted dc_sink release
859 */
860 if (aconnector->dc_sink != aconnector->dc_em_sink)
861 dc_sink_release(aconnector->dc_sink);
862 }
4562236b
HW
863 aconnector->dc_sink = sink;
864 amdgpu_dm_add_sink_to_freesync_module(
865 connector, aconnector->edid);
866 } else {
867 amdgpu_dm_remove_sink_from_freesync_module(connector);
868 if (!aconnector->dc_sink)
869 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
870 else if (aconnector->dc_sink != aconnector->dc_em_sink)
871 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
872 }
873
874 mutex_unlock(&dev->mode_config.mutex);
875 return;
876 }
877
878 /*
879 * TODO: temporary guard to look for proper fix
880 * if this sink is MST sink, we should not do anything
881 */
882 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
883 return;
884
885 if (aconnector->dc_sink == sink) {
886 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
887 * Do nothing!! */
f1ad2f5e 888 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b
HW
889 aconnector->connector_id);
890 return;
891 }
892
f1ad2f5e 893 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
894 aconnector->connector_id, aconnector->dc_sink, sink);
895
896 mutex_lock(&dev->mode_config.mutex);
897
898 /* 1. Update status of the drm connector
899 * 2. Send an event and let userspace tell us what to do */
900 if (sink) {
901 /* TODO: check if we still need the S3 mode update workaround.
902 * If yes, put it here. */
903 if (aconnector->dc_sink)
904 amdgpu_dm_remove_sink_from_freesync_module(
905 connector);
906
907 aconnector->dc_sink = sink;
900b3cb1 908 if (sink->dc_edid.length == 0) {
4562236b 909 aconnector->edid = NULL;
900b3cb1 910 } else {
4562236b
HW
911 aconnector->edid =
912 (struct edid *) sink->dc_edid.raw_edid;
913
914
915 drm_mode_connector_update_edid_property(connector,
916 aconnector->edid);
917 }
918 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
919
920 } else {
921 amdgpu_dm_remove_sink_from_freesync_module(connector);
922 drm_mode_connector_update_edid_property(connector, NULL);
923 aconnector->num_modes = 0;
924 aconnector->dc_sink = NULL;
925 }
926
927 mutex_unlock(&dev->mode_config.mutex);
928}
929
930static void handle_hpd_irq(void *param)
931{
c84dec2f 932 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
933 struct drm_connector *connector = &aconnector->base;
934 struct drm_device *dev = connector->dev;
935
936 /* In case of failure or MST no need to update connector status or notify the OS
937 * since (for MST case) MST does this in it's own context.
938 */
939 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6
HW
940
941 if (aconnector->fake_enable)
942 aconnector->fake_enable = false;
943
8f38b66c 944 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
945 amdgpu_dm_update_connector_after_detect(aconnector);
946
947
948 drm_modeset_lock_all(dev);
949 dm_restore_drm_connector_state(dev, connector);
950 drm_modeset_unlock_all(dev);
951
952 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
953 drm_kms_helper_hotplug_event(dev);
954 }
955 mutex_unlock(&aconnector->hpd_lock);
956
957}
958
c84dec2f 959static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
960{
961 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
962 uint8_t dret;
963 bool new_irq_handled = false;
964 int dpcd_addr;
965 int dpcd_bytes_to_read;
966
967 const int max_process_count = 30;
968 int process_count = 0;
969
970 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
971
972 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
973 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
974 /* DPCD 0x200 - 0x201 for downstream IRQ */
975 dpcd_addr = DP_SINK_COUNT;
976 } else {
977 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
978 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
979 dpcd_addr = DP_SINK_COUNT_ESI;
980 }
981
982 dret = drm_dp_dpcd_read(
983 &aconnector->dm_dp_aux.aux,
984 dpcd_addr,
985 esi,
986 dpcd_bytes_to_read);
987
988 while (dret == dpcd_bytes_to_read &&
989 process_count < max_process_count) {
990 uint8_t retry;
991 dret = 0;
992
993 process_count++;
994
f1ad2f5e 995 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
996 /* handle HPD short pulse irq */
997 if (aconnector->mst_mgr.mst_state)
998 drm_dp_mst_hpd_irq(
999 &aconnector->mst_mgr,
1000 esi,
1001 &new_irq_handled);
4562236b
HW
1002
1003 if (new_irq_handled) {
1004 /* ACK at DPCD to notify down stream */
1005 const int ack_dpcd_bytes_to_write =
1006 dpcd_bytes_to_read - 1;
1007
1008 for (retry = 0; retry < 3; retry++) {
1009 uint8_t wret;
1010
1011 wret = drm_dp_dpcd_write(
1012 &aconnector->dm_dp_aux.aux,
1013 dpcd_addr + 1,
1014 &esi[1],
1015 ack_dpcd_bytes_to_write);
1016 if (wret == ack_dpcd_bytes_to_write)
1017 break;
1018 }
1019
1020 /* check if there is new irq to be handle */
1021 dret = drm_dp_dpcd_read(
1022 &aconnector->dm_dp_aux.aux,
1023 dpcd_addr,
1024 esi,
1025 dpcd_bytes_to_read);
1026
1027 new_irq_handled = false;
d4a6e8a9 1028 } else {
4562236b 1029 break;
d4a6e8a9 1030 }
4562236b
HW
1031 }
1032
1033 if (process_count == max_process_count)
f1ad2f5e 1034 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
1035}
1036
1037static void handle_hpd_rx_irq(void *param)
1038{
c84dec2f 1039 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
1040 struct drm_connector *connector = &aconnector->base;
1041 struct drm_device *dev = connector->dev;
53cbf65c 1042 struct dc_link *dc_link = aconnector->dc_link;
4562236b
HW
1043 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1044
1045 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1046 * conflict, after implement i2c helper, this mutex should be
1047 * retired.
1048 */
53cbf65c 1049 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
1050 mutex_lock(&aconnector->hpd_lock);
1051
53cbf65c 1052 if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
4562236b
HW
1053 !is_mst_root_connector) {
1054 /* Downstream Port status changed. */
53cbf65c 1055 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
1056
1057 if (aconnector->fake_enable)
1058 aconnector->fake_enable = false;
1059
4562236b
HW
1060 amdgpu_dm_update_connector_after_detect(aconnector);
1061
1062
1063 drm_modeset_lock_all(dev);
1064 dm_restore_drm_connector_state(dev, connector);
1065 drm_modeset_unlock_all(dev);
1066
1067 drm_kms_helper_hotplug_event(dev);
1068 }
1069 }
1070 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 1071 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
1072 dm_handle_hpd_rx_irq(aconnector);
1073
53cbf65c 1074 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
1075 mutex_unlock(&aconnector->hpd_lock);
1076}
1077
1078static void register_hpd_handlers(struct amdgpu_device *adev)
1079{
1080 struct drm_device *dev = adev->ddev;
1081 struct drm_connector *connector;
c84dec2f 1082 struct amdgpu_dm_connector *aconnector;
4562236b
HW
1083 const struct dc_link *dc_link;
1084 struct dc_interrupt_params int_params = {0};
1085
1086 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1087 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1088
1089 list_for_each_entry(connector,
1090 &dev->mode_config.connector_list, head) {
1091
c84dec2f 1092 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1093 dc_link = aconnector->dc_link;
1094
1095 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1096 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1097 int_params.irq_source = dc_link->irq_source_hpd;
1098
1099 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1100 handle_hpd_irq,
1101 (void *) aconnector);
1102 }
1103
1104 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1105
1106 /* Also register for DP short pulse (hpd_rx). */
1107 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1108 int_params.irq_source = dc_link->irq_source_hpd_rx;
1109
1110 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1111 handle_hpd_rx_irq,
1112 (void *) aconnector);
1113 }
1114 }
1115}
1116
1117/* Register IRQ sources and initialize IRQ callbacks */
1118static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1119{
1120 struct dc *dc = adev->dm.dc;
1121 struct common_irq_params *c_irq_params;
1122 struct dc_interrupt_params int_params = {0};
1123 int r;
1124 int i;
2c8ad2d5
AD
1125 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1126
ff5ef992 1127 if (adev->asic_type == CHIP_VEGA10 ||
2325ff30 1128 adev->asic_type == CHIP_VEGA12 ||
ff5ef992 1129 adev->asic_type == CHIP_RAVEN)
3760f76c 1130 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
1131
1132 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1133 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1134
1135 /* Actions of amdgpu_irq_add_id():
1136 * 1. Register a set() function with base driver.
1137 * Base driver will call set() function to enable/disable an
1138 * interrupt in DC hardware.
1139 * 2. Register amdgpu_dm_irq_handler().
1140 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1141 * coming from DC hardware.
1142 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1143 * for acknowledging and handling. */
1144
b57de80a 1145 /* Use VBLANK interrupt */
e9029155 1146 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 1147 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
1148 if (r) {
1149 DRM_ERROR("Failed to add crtc irq id!\n");
1150 return r;
1151 }
1152
1153 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1154 int_params.irq_source =
3d761e79 1155 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 1156
b57de80a 1157 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
1158
1159 c_irq_params->adev = adev;
1160 c_irq_params->irq_src = int_params.irq_source;
1161
1162 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1163 dm_crtc_high_irq, c_irq_params);
1164 }
1165
3d761e79 1166 /* Use GRPH_PFLIP interrupt */
4562236b
HW
1167 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1168 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 1169 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
1170 if (r) {
1171 DRM_ERROR("Failed to add page flip irq id!\n");
1172 return r;
1173 }
1174
1175 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1176 int_params.irq_source =
1177 dc_interrupt_to_irq_source(dc, i, 0);
1178
1179 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1180
1181 c_irq_params->adev = adev;
1182 c_irq_params->irq_src = int_params.irq_source;
1183
1184 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1185 dm_pflip_high_irq, c_irq_params);
1186
1187 }
1188
1189 /* HPD */
2c8ad2d5
AD
1190 r = amdgpu_irq_add_id(adev, client_id,
1191 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
1192 if (r) {
1193 DRM_ERROR("Failed to add hpd irq id!\n");
1194 return r;
1195 }
1196
1197 register_hpd_handlers(adev);
1198
1199 return 0;
1200}
1201
ff5ef992
AD
1202#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1203/* Register IRQ sources and initialize IRQ callbacks */
1204static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1205{
1206 struct dc *dc = adev->dm.dc;
1207 struct common_irq_params *c_irq_params;
1208 struct dc_interrupt_params int_params = {0};
1209 int r;
1210 int i;
1211
1212 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1213 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1214
1215 /* Actions of amdgpu_irq_add_id():
1216 * 1. Register a set() function with base driver.
1217 * Base driver will call set() function to enable/disable an
1218 * interrupt in DC hardware.
1219 * 2. Register amdgpu_dm_irq_handler().
1220 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1221 * coming from DC hardware.
1222 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1223 * for acknowledging and handling.
1224 * */
1225
1226 /* Use VSTARTUP interrupt */
1227 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1228 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1229 i++) {
3760f76c 1230 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
1231
1232 if (r) {
1233 DRM_ERROR("Failed to add crtc irq id!\n");
1234 return r;
1235 }
1236
1237 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1238 int_params.irq_source =
1239 dc_interrupt_to_irq_source(dc, i, 0);
1240
1241 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1242
1243 c_irq_params->adev = adev;
1244 c_irq_params->irq_src = int_params.irq_source;
1245
1246 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1247 dm_crtc_high_irq, c_irq_params);
1248 }
1249
1250 /* Use GRPH_PFLIP interrupt */
1251 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1252 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1253 i++) {
3760f76c 1254 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
1255 if (r) {
1256 DRM_ERROR("Failed to add page flip irq id!\n");
1257 return r;
1258 }
1259
1260 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1261 int_params.irq_source =
1262 dc_interrupt_to_irq_source(dc, i, 0);
1263
1264 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1265
1266 c_irq_params->adev = adev;
1267 c_irq_params->irq_src = int_params.irq_source;
1268
1269 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1270 dm_pflip_high_irq, c_irq_params);
1271
1272 }
1273
1274 /* HPD */
3760f76c 1275 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
1276 &adev->hpd_irq);
1277 if (r) {
1278 DRM_ERROR("Failed to add hpd irq id!\n");
1279 return r;
1280 }
1281
1282 register_hpd_handlers(adev);
1283
1284 return 0;
1285}
1286#endif
1287
4562236b
HW
1288static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1289{
1290 int r;
1291
1292 adev->mode_info.mode_config_initialized = true;
1293
4562236b 1294 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 1295 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
1296
1297 adev->ddev->mode_config.max_width = 16384;
1298 adev->ddev->mode_config.max_height = 16384;
1299
1300 adev->ddev->mode_config.preferred_depth = 24;
1301 adev->ddev->mode_config.prefer_shadow = 1;
1302 /* indicate support of immediate flip */
1303 adev->ddev->mode_config.async_page_flip = true;
1304
770d13b1 1305 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 1306
3dc9b1ce 1307 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
1308 if (r)
1309 return r;
1310
1311 return 0;
1312}
1313
1314#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1315 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1316
1317static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1318{
1319 struct amdgpu_display_manager *dm = bl_get_data(bd);
1320
1321 if (dc_link_set_backlight_level(dm->backlight_link,
1322 bd->props.brightness, 0, 0))
1323 return 0;
1324 else
1325 return 1;
1326}
1327
1328static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1329{
1330 return bd->props.brightness;
1331}
1332
1333static const struct backlight_ops amdgpu_dm_backlight_ops = {
1334 .get_brightness = amdgpu_dm_backlight_get_brightness,
1335 .update_status = amdgpu_dm_backlight_update_status,
1336};
1337
7578ecda
AD
1338static void
1339amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
1340{
1341 char bl_name[16];
1342 struct backlight_properties props = { 0 };
1343
1344 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1345 props.type = BACKLIGHT_RAW;
1346
1347 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1348 dm->adev->ddev->primary->index);
1349
1350 dm->backlight_dev = backlight_device_register(bl_name,
1351 dm->adev->ddev->dev,
1352 dm,
1353 &amdgpu_dm_backlight_ops,
1354 &props);
1355
74baea42 1356 if (IS_ERR(dm->backlight_dev))
4562236b
HW
1357 DRM_ERROR("DM: Backlight registration failed!\n");
1358 else
f1ad2f5e 1359 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
1360}
1361
1362#endif
1363
df534fff
S
1364static int initialize_plane(struct amdgpu_display_manager *dm,
1365 struct amdgpu_mode_info *mode_info,
1366 int plane_id)
1367{
1368 struct amdgpu_plane *plane;
1369 unsigned long possible_crtcs;
1370 int ret = 0;
1371
1372 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1373 mode_info->planes[plane_id] = plane;
1374
1375 if (!plane) {
1376 DRM_ERROR("KMS: Failed to allocate plane\n");
1377 return -ENOMEM;
1378 }
1379 plane->base.type = mode_info->plane_type[plane_id];
1380
1381 /*
1382 * HACK: IGT tests expect that each plane can only have one
1383 * one possible CRTC. For now, set one CRTC for each
1384 * plane that is not an underlay, but still allow multiple
1385 * CRTCs for underlay planes.
1386 */
1387 possible_crtcs = 1 << plane_id;
1388 if (plane_id >= dm->dc->caps.max_streams)
1389 possible_crtcs = 0xff;
1390
1391 ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1392
1393 if (ret) {
1394 DRM_ERROR("KMS: Failed to initialize plane\n");
1395 return ret;
1396 }
1397
1398 return ret;
1399}
1400
742811b7
HW
1401
1402static void register_backlight_device(struct amdgpu_display_manager *dm,
1403 struct dc_link *link)
1404{
1405#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1406 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1407
1408 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1409 link->type != dc_connection_none) {
1410 /* Event if registration failed, we should continue with
1411 * DM initialization because not having a backlight control
1412 * is better then a black screen.
1413 */
1414 amdgpu_dm_register_backlight_device(dm);
1415
1416 if (dm->backlight_dev)
1417 dm->backlight_link = link;
1418 }
1419#endif
1420}
1421
1422
4562236b
HW
1423/* In this architecture, the association
1424 * connector -> encoder -> crtc
1425 * id not really requried. The crtc and connector will hold the
1426 * display_index as an abstraction to use with DAL component
1427 *
1428 * Returns 0 on success
1429 */
7578ecda 1430static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
1431{
1432 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 1433 int32_t i;
c84dec2f 1434 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 1435 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 1436 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 1437 uint32_t link_cnt;
df534fff 1438 int32_t total_overlay_planes, total_primary_planes;
4562236b
HW
1439
1440 link_cnt = dm->dc->caps.max_links;
4562236b
HW
1441 if (amdgpu_dm_mode_config_init(dm->adev)) {
1442 DRM_ERROR("DM: Failed to initialize mode config\n");
f2a0f5e6 1443 return -1;
4562236b
HW
1444 }
1445
df534fff
S
1446 /* Identify the number of planes to be initialized */
1447 total_overlay_planes = dm->dc->caps.max_slave_planes;
1448 total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
efa6a8b7 1449
df534fff
S
1450 /* First initialize overlay planes, index starting after primary planes */
1451 for (i = (total_overlay_planes - 1); i >= 0; i--) {
1452 if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1453 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 1454 goto fail;
d4e13b0d 1455 }
df534fff 1456 }
92f3ac40 1457
df534fff
S
1458 /* Initialize primary planes */
1459 for (i = (total_primary_planes - 1); i >= 0; i--) {
1460 if (initialize_plane(dm, mode_info, i)) {
1461 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 1462 goto fail;
d4e13b0d
AD
1463 }
1464 }
4562236b 1465
d4e13b0d
AD
1466 for (i = 0; i < dm->dc->caps.max_streams; i++)
1467 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
4562236b 1468 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 1469 goto fail;
4562236b 1470 }
4562236b 1471
ab2541b6 1472 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
1473
1474 /* loops over all connectors on the board */
1475 for (i = 0; i < link_cnt; i++) {
742811b7 1476 struct dc_link *link = NULL;
4562236b
HW
1477
1478 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1479 DRM_ERROR(
1480 "KMS: Cannot support more than %d display indexes\n",
1481 AMDGPU_DM_MAX_DISPLAY_INDEX);
1482 continue;
1483 }
1484
1485 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1486 if (!aconnector)
cd8a2ae8 1487 goto fail;
4562236b
HW
1488
1489 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 1490 if (!aencoder)
cd8a2ae8 1491 goto fail;
4562236b
HW
1492
1493 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1494 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 1495 goto fail;
4562236b
HW
1496 }
1497
1498 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1499 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 1500 goto fail;
4562236b
HW
1501 }
1502
742811b7
HW
1503 link = dc_get_link_at_index(dm->dc, i);
1504
1505 if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 1506 amdgpu_dm_update_connector_after_detect(aconnector);
742811b7
HW
1507 register_backlight_device(dm, link);
1508 }
1509
1510
4562236b
HW
1511 }
1512
1513 /* Software is initialized. Now we can register interrupt handlers. */
1514 switch (adev->asic_type) {
1515 case CHIP_BONAIRE:
1516 case CHIP_HAWAII:
cd4b356f
AD
1517 case CHIP_KAVERI:
1518 case CHIP_KABINI:
1519 case CHIP_MULLINS:
4562236b
HW
1520 case CHIP_TONGA:
1521 case CHIP_FIJI:
1522 case CHIP_CARRIZO:
1523 case CHIP_STONEY:
1524 case CHIP_POLARIS11:
1525 case CHIP_POLARIS10:
b264d345 1526 case CHIP_POLARIS12:
7737de91
JFZ
1527#if defined(CONFIG_DRM_AMD_DC_VEGAM)
1528 case CHIP_VEGAM:
1529#endif
2c8ad2d5 1530 case CHIP_VEGA10:
2325ff30 1531 case CHIP_VEGA12:
4562236b
HW
1532 if (dce110_register_irq_handlers(dm->adev)) {
1533 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 1534 goto fail;
4562236b
HW
1535 }
1536 break;
ff5ef992
AD
1537#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1538 case CHIP_RAVEN:
1539 if (dcn10_register_irq_handlers(dm->adev)) {
1540 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 1541 goto fail;
ff5ef992 1542 }
79c24086
BL
1543 /*
1544 * Temporary disable until pplib/smu interaction is implemented
1545 */
1546 dm->dc->debug.disable_stutter = true;
ff5ef992
AD
1547 break;
1548#endif
4562236b 1549 default:
e63f8673 1550 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 1551 goto fail;
4562236b
HW
1552 }
1553
4562236b 1554 return 0;
cd8a2ae8 1555fail:
4562236b 1556 kfree(aencoder);
4562236b 1557 kfree(aconnector);
3be5262e 1558 for (i = 0; i < dm->dc->caps.max_planes; i++)
d4e13b0d 1559 kfree(mode_info->planes[i]);
4562236b
HW
1560 return -1;
1561}
1562
7578ecda 1563static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
1564{
1565 drm_mode_config_cleanup(dm->ddev);
1566 return;
1567}
1568
1569/******************************************************************************
1570 * amdgpu_display_funcs functions
1571 *****************************************************************************/
1572
1573/**
1574 * dm_bandwidth_update - program display watermarks
1575 *
1576 * @adev: amdgpu_device pointer
1577 *
1578 * Calculate and program the display watermarks and line buffer allocation.
1579 */
1580static void dm_bandwidth_update(struct amdgpu_device *adev)
1581{
49c07a99 1582 /* TODO: implement later */
4562236b
HW
1583}
1584
1585static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1586 u8 level)
1587{
1588 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1589}
1590
1591static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1592{
1593 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1594 return 0;
1595}
1596
4562236b
HW
1597static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1598 struct drm_file *filp)
1599{
1600 struct mod_freesync_params freesync_params;
ab2541b6 1601 uint8_t num_streams;
4562236b 1602 uint8_t i;
4562236b
HW
1603
1604 struct amdgpu_device *adev = dev->dev_private;
1605 int r = 0;
1606
1607 /* Get freesync enable flag from DRM */
1608
ab2541b6 1609 num_streams = dc_get_current_stream_count(adev->dm.dc);
4562236b 1610
ab2541b6 1611 for (i = 0; i < num_streams; i++) {
0971c40e 1612 struct dc_stream_state *stream;
ab2541b6 1613 stream = dc_get_stream_at_index(adev->dm.dc, i);
4562236b
HW
1614
1615 mod_freesync_update_state(adev->dm.freesync_module,
ab2541b6 1616 &stream, 1, &freesync_params);
4562236b
HW
1617 }
1618
1619 return r;
1620}
1621
39cc5be2 1622static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1623 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1624 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4562236b
HW
1625 .backlight_set_level =
1626 dm_set_backlight_level,/* called unconditionally */
1627 .backlight_get_level =
1628 dm_get_backlight_level,/* called unconditionally */
1629 .hpd_sense = NULL,/* called unconditionally */
1630 .hpd_set_polarity = NULL, /* called unconditionally */
1631 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
1632 .page_flip_get_scanoutpos =
1633 dm_crtc_get_scanoutpos,/* called unconditionally */
1634 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1635 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1636 .notify_freesync = amdgpu_notify_freesync,
1637
1638};
1639
1640#if defined(CONFIG_DEBUG_KERNEL_DC)
1641
3ee6b26b
AD
1642static ssize_t s3_debug_store(struct device *device,
1643 struct device_attribute *attr,
1644 const char *buf,
1645 size_t count)
4562236b
HW
1646{
1647 int ret;
1648 int s3_state;
1649 struct pci_dev *pdev = to_pci_dev(device);
1650 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1651 struct amdgpu_device *adev = drm_dev->dev_private;
1652
1653 ret = kstrtoint(buf, 0, &s3_state);
1654
1655 if (ret == 0) {
1656 if (s3_state) {
1657 dm_resume(adev);
4562236b
HW
1658 drm_kms_helper_hotplug_event(adev->ddev);
1659 } else
1660 dm_suspend(adev);
1661 }
1662
1663 return ret == 0 ? count : 0;
1664}
1665
1666DEVICE_ATTR_WO(s3_debug);
1667
1668#endif
1669
1670static int dm_early_init(void *handle)
1671{
1672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673
4562236b
HW
1674 switch (adev->asic_type) {
1675 case CHIP_BONAIRE:
1676 case CHIP_HAWAII:
1677 adev->mode_info.num_crtc = 6;
1678 adev->mode_info.num_hpd = 6;
1679 adev->mode_info.num_dig = 6;
3be5262e 1680 adev->mode_info.plane_type = dm_plane_type_default;
4562236b 1681 break;
cd4b356f
AD
1682 case CHIP_KAVERI:
1683 adev->mode_info.num_crtc = 4;
1684 adev->mode_info.num_hpd = 6;
1685 adev->mode_info.num_dig = 7;
1686 adev->mode_info.plane_type = dm_plane_type_default;
1687 break;
1688 case CHIP_KABINI:
1689 case CHIP_MULLINS:
1690 adev->mode_info.num_crtc = 2;
1691 adev->mode_info.num_hpd = 6;
1692 adev->mode_info.num_dig = 6;
1693 adev->mode_info.plane_type = dm_plane_type_default;
1694 break;
4562236b
HW
1695 case CHIP_FIJI:
1696 case CHIP_TONGA:
1697 adev->mode_info.num_crtc = 6;
1698 adev->mode_info.num_hpd = 6;
1699 adev->mode_info.num_dig = 7;
3be5262e 1700 adev->mode_info.plane_type = dm_plane_type_default;
4562236b
HW
1701 break;
1702 case CHIP_CARRIZO:
1703 adev->mode_info.num_crtc = 3;
1704 adev->mode_info.num_hpd = 6;
1705 adev->mode_info.num_dig = 9;
3be5262e 1706 adev->mode_info.plane_type = dm_plane_type_carizzo;
4562236b
HW
1707 break;
1708 case CHIP_STONEY:
1709 adev->mode_info.num_crtc = 2;
1710 adev->mode_info.num_hpd = 6;
1711 adev->mode_info.num_dig = 9;
3be5262e 1712 adev->mode_info.plane_type = dm_plane_type_stoney;
4562236b
HW
1713 break;
1714 case CHIP_POLARIS11:
b264d345 1715 case CHIP_POLARIS12:
4562236b
HW
1716 adev->mode_info.num_crtc = 5;
1717 adev->mode_info.num_hpd = 5;
1718 adev->mode_info.num_dig = 5;
3be5262e 1719 adev->mode_info.plane_type = dm_plane_type_default;
4562236b
HW
1720 break;
1721 case CHIP_POLARIS10:
7737de91
JFZ
1722#if defined(CONFIG_DRM_AMD_DC_VEGAM)
1723 case CHIP_VEGAM:
1724#endif
4562236b
HW
1725 adev->mode_info.num_crtc = 6;
1726 adev->mode_info.num_hpd = 6;
1727 adev->mode_info.num_dig = 6;
3be5262e 1728 adev->mode_info.plane_type = dm_plane_type_default;
4562236b 1729 break;
2c8ad2d5 1730 case CHIP_VEGA10:
2325ff30 1731 case CHIP_VEGA12:
2c8ad2d5
AD
1732 adev->mode_info.num_crtc = 6;
1733 adev->mode_info.num_hpd = 6;
1734 adev->mode_info.num_dig = 6;
3be5262e 1735 adev->mode_info.plane_type = dm_plane_type_default;
2c8ad2d5 1736 break;
ff5ef992
AD
1737#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1738 case CHIP_RAVEN:
1739 adev->mode_info.num_crtc = 4;
1740 adev->mode_info.num_hpd = 4;
1741 adev->mode_info.num_dig = 4;
3be5262e 1742 adev->mode_info.plane_type = dm_plane_type_default;
ff5ef992
AD
1743 break;
1744#endif
4562236b 1745 default:
e63f8673 1746 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
1747 return -EINVAL;
1748 }
1749
c8dd5715
MD
1750 amdgpu_dm_set_irq_funcs(adev);
1751
39cc5be2
AD
1752 if (adev->mode_info.funcs == NULL)
1753 adev->mode_info.funcs = &dm_display_funcs;
1754
4562236b
HW
1755 /* Note: Do NOT change adev->audio_endpt_rreg and
1756 * adev->audio_endpt_wreg because they are initialised in
1757 * amdgpu_device_init() */
1758#if defined(CONFIG_DEBUG_KERNEL_DC)
1759 device_create_file(
1760 adev->ddev->dev,
1761 &dev_attr_s3_debug);
1762#endif
1763
1764 return 0;
1765}
1766
9b690ef3 1767static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
1768 struct dc_stream_state *new_stream,
1769 struct dc_stream_state *old_stream)
9b690ef3 1770{
e7b07cee
HW
1771 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1772 return false;
1773
1774 if (!crtc_state->enable)
1775 return false;
1776
1777 return crtc_state->active;
1778}
1779
1780static bool modereset_required(struct drm_crtc_state *crtc_state)
1781{
1782 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1783 return false;
1784
1785 return !crtc_state->enable || !crtc_state->active;
1786}
1787
7578ecda 1788static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
1789{
1790 drm_encoder_cleanup(encoder);
1791 kfree(encoder);
1792}
1793
1794static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1795 .destroy = amdgpu_dm_encoder_destroy,
1796};
1797
3ee6b26b
AD
1798static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1799 struct dc_plane_state *plane_state)
e7b07cee 1800{
3be5262e
HW
1801 plane_state->src_rect.x = state->src_x >> 16;
1802 plane_state->src_rect.y = state->src_y >> 16;
e7b07cee 1803 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
3be5262e 1804 plane_state->src_rect.width = state->src_w >> 16;
e7b07cee 1805
3be5262e 1806 if (plane_state->src_rect.width == 0)
e7b07cee
HW
1807 return false;
1808
3be5262e
HW
1809 plane_state->src_rect.height = state->src_h >> 16;
1810 if (plane_state->src_rect.height == 0)
e7b07cee
HW
1811 return false;
1812
3be5262e
HW
1813 plane_state->dst_rect.x = state->crtc_x;
1814 plane_state->dst_rect.y = state->crtc_y;
e7b07cee
HW
1815
1816 if (state->crtc_w == 0)
1817 return false;
1818
3be5262e 1819 plane_state->dst_rect.width = state->crtc_w;
e7b07cee
HW
1820
1821 if (state->crtc_h == 0)
1822 return false;
1823
3be5262e 1824 plane_state->dst_rect.height = state->crtc_h;
e7b07cee 1825
3be5262e 1826 plane_state->clip_rect = plane_state->dst_rect;
e7b07cee
HW
1827
1828 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1829 case DRM_MODE_ROTATE_0:
3be5262e 1830 plane_state->rotation = ROTATION_ANGLE_0;
e7b07cee
HW
1831 break;
1832 case DRM_MODE_ROTATE_90:
3be5262e 1833 plane_state->rotation = ROTATION_ANGLE_90;
e7b07cee
HW
1834 break;
1835 case DRM_MODE_ROTATE_180:
3be5262e 1836 plane_state->rotation = ROTATION_ANGLE_180;
e7b07cee
HW
1837 break;
1838 case DRM_MODE_ROTATE_270:
3be5262e 1839 plane_state->rotation = ROTATION_ANGLE_270;
e7b07cee
HW
1840 break;
1841 default:
3be5262e 1842 plane_state->rotation = ROTATION_ANGLE_0;
e7b07cee
HW
1843 break;
1844 }
1845
4562236b
HW
1846 return true;
1847}
3ee6b26b 1848static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 1849 uint64_t *tiling_flags)
e7b07cee 1850{
e68d14dd 1851 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 1852 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 1853
e7b07cee 1854 if (unlikely(r)) {
9bbc3031
JZ
1855 // Don't show error msg. when return -ERESTARTSYS
1856 if (r != -ERESTARTSYS)
1857 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
1858 return r;
1859 }
1860
e7b07cee
HW
1861 if (tiling_flags)
1862 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1863
1864 amdgpu_bo_unreserve(rbo);
1865
1866 return r;
1867}
1868
3ee6b26b
AD
1869static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1870 struct dc_plane_state *plane_state,
9817d5f5 1871 const struct amdgpu_framebuffer *amdgpu_fb)
e7b07cee
HW
1872{
1873 uint64_t tiling_flags;
e7b07cee
HW
1874 unsigned int awidth;
1875 const struct drm_framebuffer *fb = &amdgpu_fb->base;
1876 int ret = 0;
1877 struct drm_format_name_buf format_name;
1878
1879 ret = get_fb_info(
1880 amdgpu_fb,
9817d5f5 1881 &tiling_flags);
e7b07cee
HW
1882
1883 if (ret)
1884 return ret;
1885
1886 switch (fb->format->format) {
1887 case DRM_FORMAT_C8:
3be5262e 1888 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
e7b07cee
HW
1889 break;
1890 case DRM_FORMAT_RGB565:
3be5262e 1891 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
e7b07cee
HW
1892 break;
1893 case DRM_FORMAT_XRGB8888:
1894 case DRM_FORMAT_ARGB8888:
3be5262e 1895 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
e7b07cee
HW
1896 break;
1897 case DRM_FORMAT_XRGB2101010:
1898 case DRM_FORMAT_ARGB2101010:
3be5262e 1899 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
e7b07cee
HW
1900 break;
1901 case DRM_FORMAT_XBGR2101010:
1902 case DRM_FORMAT_ABGR2101010:
3be5262e 1903 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
e7b07cee
HW
1904 break;
1905 case DRM_FORMAT_NV21:
3be5262e 1906 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
e7b07cee
HW
1907 break;
1908 case DRM_FORMAT_NV12:
3be5262e 1909 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
e7b07cee
HW
1910 break;
1911 default:
1912 DRM_ERROR("Unsupported screen format %s\n",
1ecfc3da 1913 drm_get_format_name(fb->format->format, &format_name));
e7b07cee
HW
1914 return -EINVAL;
1915 }
1916
3be5262e
HW
1917 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1918 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
3be5262e
HW
1919 plane_state->plane_size.grph.surface_size.x = 0;
1920 plane_state->plane_size.grph.surface_size.y = 0;
1921 plane_state->plane_size.grph.surface_size.width = fb->width;
1922 plane_state->plane_size.grph.surface_size.height = fb->height;
1923 plane_state->plane_size.grph.surface_pitch =
e7b07cee
HW
1924 fb->pitches[0] / fb->format->cpp[0];
1925 /* TODO: unhardcode */
3be5262e 1926 plane_state->color_space = COLOR_SPACE_SRGB;
e7b07cee
HW
1927
1928 } else {
1929 awidth = ALIGN(fb->width, 64);
3be5262e 1930 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3be5262e
HW
1931 plane_state->plane_size.video.luma_size.x = 0;
1932 plane_state->plane_size.video.luma_size.y = 0;
1933 plane_state->plane_size.video.luma_size.width = awidth;
1934 plane_state->plane_size.video.luma_size.height = fb->height;
e7b07cee 1935 /* TODO: unhardcode */
3be5262e 1936 plane_state->plane_size.video.luma_pitch = awidth;
e7b07cee 1937
3be5262e
HW
1938 plane_state->plane_size.video.chroma_size.x = 0;
1939 plane_state->plane_size.video.chroma_size.y = 0;
1940 plane_state->plane_size.video.chroma_size.width = awidth;
1941 plane_state->plane_size.video.chroma_size.height = fb->height;
1942 plane_state->plane_size.video.chroma_pitch = awidth / 2;
e7b07cee
HW
1943
1944 /* TODO: unhardcode */
3be5262e 1945 plane_state->color_space = COLOR_SPACE_YCBCR709;
e7b07cee
HW
1946 }
1947
3be5262e 1948 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
e7b07cee 1949
b830ebc9
HW
1950 /* Fill GFX8 params */
1951 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1952 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
e7b07cee
HW
1953
1954 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1955 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1956 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1957 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1958 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1959
1960 /* XXX fix me for VI */
3be5262e
HW
1961 plane_state->tiling_info.gfx8.num_banks = num_banks;
1962 plane_state->tiling_info.gfx8.array_mode =
e7b07cee 1963 DC_ARRAY_2D_TILED_THIN1;
3be5262e
HW
1964 plane_state->tiling_info.gfx8.tile_split = tile_split;
1965 plane_state->tiling_info.gfx8.bank_width = bankw;
1966 plane_state->tiling_info.gfx8.bank_height = bankh;
1967 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1968 plane_state->tiling_info.gfx8.tile_mode =
e7b07cee
HW
1969 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1970 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1971 == DC_ARRAY_1D_TILED_THIN1) {
3be5262e 1972 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
1973 }
1974
3be5262e 1975 plane_state->tiling_info.gfx8.pipe_config =
e7b07cee
HW
1976 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1977
1978 if (adev->asic_type == CHIP_VEGA10 ||
2325ff30 1979 adev->asic_type == CHIP_VEGA12 ||
e7b07cee
HW
1980 adev->asic_type == CHIP_RAVEN) {
1981 /* Fill GFX9 params */
3be5262e 1982 plane_state->tiling_info.gfx9.num_pipes =
e7b07cee 1983 adev->gfx.config.gb_addr_config_fields.num_pipes;
3be5262e 1984 plane_state->tiling_info.gfx9.num_banks =
e7b07cee 1985 adev->gfx.config.gb_addr_config_fields.num_banks;
3be5262e 1986 plane_state->tiling_info.gfx9.pipe_interleave =
e7b07cee 1987 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3be5262e 1988 plane_state->tiling_info.gfx9.num_shader_engines =
e7b07cee 1989 adev->gfx.config.gb_addr_config_fields.num_se;
3be5262e 1990 plane_state->tiling_info.gfx9.max_compressed_frags =
e7b07cee 1991 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3be5262e 1992 plane_state->tiling_info.gfx9.num_rb_per_se =
e7b07cee 1993 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3be5262e 1994 plane_state->tiling_info.gfx9.swizzle =
e7b07cee 1995 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3be5262e 1996 plane_state->tiling_info.gfx9.shaderEnable = 1;
e7b07cee
HW
1997 }
1998
3be5262e
HW
1999 plane_state->visible = true;
2000 plane_state->scaling_quality.h_taps_c = 0;
2001 plane_state->scaling_quality.v_taps_c = 0;
e7b07cee 2002
3be5262e
HW
2003 /* is this needed? is plane_state zeroed at allocation? */
2004 plane_state->scaling_quality.h_taps = 0;
2005 plane_state->scaling_quality.v_taps = 0;
2006 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
e7b07cee
HW
2007
2008 return ret;
2009
2010}
2011
3ee6b26b
AD
2012static int fill_plane_attributes(struct amdgpu_device *adev,
2013 struct dc_plane_state *dc_plane_state,
2014 struct drm_plane_state *plane_state,
9817d5f5 2015 struct drm_crtc_state *crtc_state)
e7b07cee
HW
2016{
2017 const struct amdgpu_framebuffer *amdgpu_fb =
2018 to_amdgpu_framebuffer(plane_state->fb);
2019 const struct drm_crtc *crtc = plane_state->crtc;
e7b07cee
HW
2020 int ret = 0;
2021
3be5262e 2022 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
e7b07cee
HW
2023 return -EINVAL;
2024
2025 ret = fill_plane_attributes_from_fb(
2026 crtc->dev->dev_private,
3be5262e 2027 dc_plane_state,
9817d5f5 2028 amdgpu_fb);
e7b07cee
HW
2029
2030 if (ret)
2031 return ret;
2032
e277adc5
LSL
2033 /*
2034 * Always set input transfer function, since plane state is refreshed
2035 * every time.
2036 */
2037 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
8c45c5db
LSL
2038 if (ret) {
2039 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2040 dc_plane_state->in_transfer_func = NULL;
2041 }
e7b07cee
HW
2042
2043 return ret;
2044}
2045
2046/*****************************************************************************/
2047
3ee6b26b
AD
2048static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2049 const struct dm_connector_state *dm_state,
2050 struct dc_stream_state *stream)
e7b07cee
HW
2051{
2052 enum amdgpu_rmx_type rmx_type;
2053
2054 struct rect src = { 0 }; /* viewport in composition space*/
2055 struct rect dst = { 0 }; /* stream addressable area */
2056
2057 /* no mode. nothing to be done */
2058 if (!mode)
2059 return;
2060
2061 /* Full screen scaling by default */
2062 src.width = mode->hdisplay;
2063 src.height = mode->vdisplay;
2064 dst.width = stream->timing.h_addressable;
2065 dst.height = stream->timing.v_addressable;
2066
f4791779
HW
2067 if (dm_state) {
2068 rmx_type = dm_state->scaling;
2069 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2070 if (src.width * dst.height <
2071 src.height * dst.width) {
2072 /* height needs less upscaling/more downscaling */
2073 dst.width = src.width *
2074 dst.height / src.height;
2075 } else {
2076 /* width needs less upscaling/more downscaling */
2077 dst.height = src.height *
2078 dst.width / src.width;
2079 }
2080 } else if (rmx_type == RMX_CENTER) {
2081 dst = src;
e7b07cee 2082 }
e7b07cee 2083
f4791779
HW
2084 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2085 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 2086
f4791779
HW
2087 if (dm_state->underscan_enable) {
2088 dst.x += dm_state->underscan_hborder / 2;
2089 dst.y += dm_state->underscan_vborder / 2;
2090 dst.width -= dm_state->underscan_hborder;
2091 dst.height -= dm_state->underscan_vborder;
2092 }
e7b07cee
HW
2093 }
2094
2095 stream->src = src;
2096 stream->dst = dst;
2097
f1ad2f5e 2098 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
2099 dst.x, dst.y, dst.width, dst.height);
2100
2101}
2102
3ee6b26b
AD
2103static enum dc_color_depth
2104convert_color_depth_from_display_info(const struct drm_connector *connector)
e7b07cee
HW
2105{
2106 uint32_t bpc = connector->display_info.bpc;
2107
2108 /* Limited color depth to 8bit
b830ebc9
HW
2109 * TODO: Still need to handle deep color
2110 */
e7b07cee
HW
2111 if (bpc > 8)
2112 bpc = 8;
2113
2114 switch (bpc) {
2115 case 0:
2116 /* Temporary Work around, DRM don't parse color depth for
2117 * EDID revision before 1.4
2118 * TODO: Fix edid parsing
2119 */
2120 return COLOR_DEPTH_888;
2121 case 6:
2122 return COLOR_DEPTH_666;
2123 case 8:
2124 return COLOR_DEPTH_888;
2125 case 10:
2126 return COLOR_DEPTH_101010;
2127 case 12:
2128 return COLOR_DEPTH_121212;
2129 case 14:
2130 return COLOR_DEPTH_141414;
2131 case 16:
2132 return COLOR_DEPTH_161616;
2133 default:
2134 return COLOR_DEPTH_UNDEFINED;
2135 }
2136}
2137
3ee6b26b
AD
2138static enum dc_aspect_ratio
2139get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee
HW
2140{
2141 int32_t width = mode_in->crtc_hdisplay * 9;
2142 int32_t height = mode_in->crtc_vdisplay * 16;
b830ebc9 2143
e7b07cee
HW
2144 if ((width - height) < 10 && (width - height) > -10)
2145 return ASPECT_RATIO_16_9;
2146 else
2147 return ASPECT_RATIO_4_3;
2148}
2149
3ee6b26b
AD
2150static enum dc_color_space
2151get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
2152{
2153 enum dc_color_space color_space = COLOR_SPACE_SRGB;
2154
2155 switch (dc_crtc_timing->pixel_encoding) {
2156 case PIXEL_ENCODING_YCBCR422:
2157 case PIXEL_ENCODING_YCBCR444:
2158 case PIXEL_ENCODING_YCBCR420:
2159 {
2160 /*
2161 * 27030khz is the separation point between HDTV and SDTV
2162 * according to HDMI spec, we use YCbCr709 and YCbCr601
2163 * respectively
2164 */
2165 if (dc_crtc_timing->pix_clk_khz > 27030) {
2166 if (dc_crtc_timing->flags.Y_ONLY)
2167 color_space =
2168 COLOR_SPACE_YCBCR709_LIMITED;
2169 else
2170 color_space = COLOR_SPACE_YCBCR709;
2171 } else {
2172 if (dc_crtc_timing->flags.Y_ONLY)
2173 color_space =
2174 COLOR_SPACE_YCBCR601_LIMITED;
2175 else
2176 color_space = COLOR_SPACE_YCBCR601;
2177 }
2178
2179 }
2180 break;
2181 case PIXEL_ENCODING_RGB:
2182 color_space = COLOR_SPACE_SRGB;
2183 break;
2184
2185 default:
2186 WARN_ON(1);
2187 break;
2188 }
2189
2190 return color_space;
2191}
2192
2193/*****************************************************************************/
2194
3ee6b26b
AD
2195static void
2196fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2197 const struct drm_display_mode *mode_in,
2198 const struct drm_connector *connector)
e7b07cee
HW
2199{
2200 struct dc_crtc_timing *timing_out = &stream->timing;
b830ebc9 2201
e7b07cee
HW
2202 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2203
2204 timing_out->h_border_left = 0;
2205 timing_out->h_border_right = 0;
2206 timing_out->v_border_top = 0;
2207 timing_out->v_border_bottom = 0;
2208 /* TODO: un-hardcode */
2209
2210 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2211 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2212 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2213 else
2214 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2215
2216 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2217 timing_out->display_color_depth = convert_color_depth_from_display_info(
2218 connector);
2219 timing_out->scan_type = SCANNING_TYPE_NODATA;
2220 timing_out->hdmi_vic = 0;
2221 timing_out->vic = drm_match_cea_mode(mode_in);
2222
2223 timing_out->h_addressable = mode_in->crtc_hdisplay;
2224 timing_out->h_total = mode_in->crtc_htotal;
2225 timing_out->h_sync_width =
2226 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2227 timing_out->h_front_porch =
2228 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2229 timing_out->v_total = mode_in->crtc_vtotal;
2230 timing_out->v_addressable = mode_in->crtc_vdisplay;
2231 timing_out->v_front_porch =
2232 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2233 timing_out->v_sync_width =
2234 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2235 timing_out->pix_clk_khz = mode_in->crtc_clock;
2236 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2237 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2238 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2239 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2240 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2241
2242 stream->output_color_space = get_output_color_space(timing_out);
2243
e43a432c
AK
2244 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2245 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
e7b07cee
HW
2246}
2247
3ee6b26b
AD
2248static void fill_audio_info(struct audio_info *audio_info,
2249 const struct drm_connector *drm_connector,
2250 const struct dc_sink *dc_sink)
e7b07cee
HW
2251{
2252 int i = 0;
2253 int cea_revision = 0;
2254 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2255
2256 audio_info->manufacture_id = edid_caps->manufacturer_id;
2257 audio_info->product_id = edid_caps->product_id;
2258
2259 cea_revision = drm_connector->display_info.cea_rev;
2260
d2b2562c
TSD
2261 strncpy(audio_info->display_name,
2262 edid_caps->display_name,
2263 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
e7b07cee 2264
b830ebc9 2265 if (cea_revision >= 3) {
e7b07cee
HW
2266 audio_info->mode_count = edid_caps->audio_mode_count;
2267
2268 for (i = 0; i < audio_info->mode_count; ++i) {
2269 audio_info->modes[i].format_code =
2270 (enum audio_format_code)
2271 (edid_caps->audio_modes[i].format_code);
2272 audio_info->modes[i].channel_count =
2273 edid_caps->audio_modes[i].channel_count;
2274 audio_info->modes[i].sample_rates.all =
2275 edid_caps->audio_modes[i].sample_rate;
2276 audio_info->modes[i].sample_size =
2277 edid_caps->audio_modes[i].sample_size;
2278 }
2279 }
2280
2281 audio_info->flags.all = edid_caps->speaker_flags;
2282
2283 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 2284 if (drm_connector->latency_present[0]) {
e7b07cee
HW
2285 audio_info->video_latency = drm_connector->video_latency[0];
2286 audio_info->audio_latency = drm_connector->audio_latency[0];
2287 }
2288
2289 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2290
2291}
2292
3ee6b26b
AD
2293static void
2294copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2295 struct drm_display_mode *dst_mode)
e7b07cee
HW
2296{
2297 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2298 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2299 dst_mode->crtc_clock = src_mode->crtc_clock;
2300 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2301 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 2302 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
2303 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2304 dst_mode->crtc_htotal = src_mode->crtc_htotal;
2305 dst_mode->crtc_hskew = src_mode->crtc_hskew;
2306 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2307 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2308 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2309 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2310 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2311}
2312
3ee6b26b
AD
2313static void
2314decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2315 const struct drm_display_mode *native_mode,
2316 bool scale_enabled)
e7b07cee
HW
2317{
2318 if (scale_enabled) {
2319 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2320 } else if (native_mode->clock == drm_mode->clock &&
2321 native_mode->htotal == drm_mode->htotal &&
2322 native_mode->vtotal == drm_mode->vtotal) {
2323 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2324 } else {
2325 /* no scaling nor amdgpu inserted, no need to patch */
2326 }
2327}
2328
423788c7 2329static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6
HW
2330{
2331 struct dc_sink *sink = NULL;
2332 struct dc_sink_init_data sink_init_data = { 0 };
2333
2334 sink_init_data.link = aconnector->dc_link;
2335 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2336
2337 sink = dc_sink_create(&sink_init_data);
423788c7 2338 if (!sink) {
2e0ac3d6 2339 DRM_ERROR("Failed to create sink!\n");
423788c7
ES
2340 return -ENOMEM;
2341 }
2e0ac3d6
HW
2342
2343 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2344 aconnector->fake_enable = true;
2345
2346 aconnector->dc_sink = sink;
2347 aconnector->dc_link->local_sink = sink;
423788c7
ES
2348
2349 return 0;
2e0ac3d6
HW
2350}
2351
fa2123db
ML
2352static void set_multisync_trigger_params(
2353 struct dc_stream_state *stream)
2354{
2355 if (stream->triggered_crtc_reset.enabled) {
2356 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2357 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2358 }
2359}
2360
2361static void set_master_stream(struct dc_stream_state *stream_set[],
2362 int stream_count)
2363{
2364 int j, highest_rfr = 0, master_stream = 0;
2365
2366 for (j = 0; j < stream_count; j++) {
2367 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2368 int refresh_rate = 0;
2369
2370 refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2371 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2372 if (refresh_rate > highest_rfr) {
2373 highest_rfr = refresh_rate;
2374 master_stream = j;
2375 }
2376 }
2377 }
2378 for (j = 0; j < stream_count; j++) {
03736f4c 2379 if (stream_set[j])
fa2123db
ML
2380 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2381 }
2382}
2383
2384static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2385{
2386 int i = 0;
2387
2388 if (context->stream_count < 2)
2389 return;
2390 for (i = 0; i < context->stream_count ; i++) {
2391 if (!context->streams[i])
2392 continue;
2393 /* TODO: add a function to read AMD VSDB bits and will set
2394 * crtc_sync_master.multi_sync_enabled flag
2395 * For now its set to false
2396 */
2397 set_multisync_trigger_params(context->streams[i]);
2398 }
2399 set_master_stream(context->streams, context->stream_count);
2400}
2401
3ee6b26b
AD
2402static struct dc_stream_state *
2403create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2404 const struct drm_display_mode *drm_mode,
2405 const struct dm_connector_state *dm_state)
e7b07cee
HW
2406{
2407 struct drm_display_mode *preferred_mode = NULL;
391ef035 2408 struct drm_connector *drm_connector;
0971c40e 2409 struct dc_stream_state *stream = NULL;
e7b07cee
HW
2410 struct drm_display_mode mode = *drm_mode;
2411 bool native_mode_found = false;
2412
b830ebc9 2413 if (aconnector == NULL) {
e7b07cee 2414 DRM_ERROR("aconnector is NULL!\n");
64245fa7 2415 return stream;
e7b07cee
HW
2416 }
2417
e7b07cee 2418 drm_connector = &aconnector->base;
2e0ac3d6 2419
f4ac176e
JZ
2420 if (!aconnector->dc_sink) {
2421 /*
391ef035
JFZ
2422 * Create dc_sink when necessary to MST
2423 * Don't apply fake_sink to MST
f4ac176e 2424 */
391ef035
JFZ
2425 if (aconnector->mst_port) {
2426 dm_dp_mst_dc_sink_create(drm_connector);
64245fa7 2427 return stream;
391ef035 2428 }
f4ac176e 2429
423788c7 2430 if (create_fake_sink(aconnector))
64245fa7 2431 return stream;
f4ac176e 2432 }
2e0ac3d6 2433
e7b07cee 2434 stream = dc_create_stream_for_sink(aconnector->dc_sink);
4562236b 2435
b830ebc9 2436 if (stream == NULL) {
e7b07cee 2437 DRM_ERROR("Failed to create stream for sink!\n");
64245fa7 2438 return stream;
e7b07cee
HW
2439 }
2440
2441 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2442 /* Search for preferred mode */
2443 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2444 native_mode_found = true;
2445 break;
2446 }
2447 }
2448 if (!native_mode_found)
2449 preferred_mode = list_first_entry_or_null(
2450 &aconnector->base.modes,
2451 struct drm_display_mode,
2452 head);
2453
b830ebc9 2454 if (preferred_mode == NULL) {
e7b07cee
HW
2455 /* This may not be an error, the use case is when we we have no
2456 * usermode calls to reset and set mode upon hotplug. In this
2457 * case, we call set mode ourselves to restore the previous mode
2458 * and the modelist may not be filled in in time.
2459 */
f1ad2f5e 2460 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
2461 } else {
2462 decide_crtc_timing_for_drm_display_mode(
2463 &mode, preferred_mode,
f4791779 2464 dm_state ? (dm_state->scaling != RMX_OFF) : false);
e7b07cee
HW
2465 }
2466
f783577c
JFZ
2467 if (!dm_state)
2468 drm_mode_set_crtcinfo(&mode, 0);
2469
e7b07cee
HW
2470 fill_stream_properties_from_drm_display_mode(stream,
2471 &mode, &aconnector->base);
2472 update_stream_scaling_settings(&mode, dm_state, stream);
2473
2474 fill_audio_info(
2475 &stream->audio_info,
2476 drm_connector,
2477 aconnector->dc_sink);
2478
9182b4cb
HW
2479 update_stream_signal(stream);
2480
9e3efe3e
HW
2481 if (dm_state && dm_state->freesync_capable)
2482 stream->ignore_msa_timing_param = true;
2483
e7b07cee
HW
2484 return stream;
2485}
2486
7578ecda 2487static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
2488{
2489 drm_crtc_cleanup(crtc);
2490 kfree(crtc);
2491}
2492
2493static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 2494 struct drm_crtc_state *state)
e7b07cee
HW
2495{
2496 struct dm_crtc_state *cur = to_dm_crtc_state(state);
2497
2498 /* TODO Destroy dc_stream objects are stream object is flattened */
2499 if (cur->stream)
2500 dc_stream_release(cur->stream);
2501
2502
2503 __drm_atomic_helper_crtc_destroy_state(state);
2504
2505
2506 kfree(state);
2507}
2508
2509static void dm_crtc_reset_state(struct drm_crtc *crtc)
2510{
2511 struct dm_crtc_state *state;
2512
2513 if (crtc->state)
2514 dm_crtc_destroy_state(crtc, crtc->state);
2515
2516 state = kzalloc(sizeof(*state), GFP_KERNEL);
2517 if (WARN_ON(!state))
2518 return;
2519
2520 crtc->state = &state->base;
2521 crtc->state->crtc = crtc;
2522
2523}
2524
2525static struct drm_crtc_state *
2526dm_crtc_duplicate_state(struct drm_crtc *crtc)
2527{
2528 struct dm_crtc_state *state, *cur;
2529
2530 cur = to_dm_crtc_state(crtc->state);
2531
2532 if (WARN_ON(!crtc->state))
2533 return NULL;
2534
2004f45e 2535 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
2536 if (!state)
2537 return NULL;
e7b07cee
HW
2538
2539 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2540
2541 if (cur->stream) {
2542 state->stream = cur->stream;
2543 dc_stream_retain(state->stream);
2544 }
2545
2546 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2547
2548 return &state->base;
2549}
2550
589d2739
HW
2551
2552static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2553{
2554 enum dc_irq_source irq_source;
2555 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2556 struct amdgpu_device *adev = crtc->dev->dev_private;
2557
2558 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 2559 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
2560}
2561
2562static int dm_enable_vblank(struct drm_crtc *crtc)
2563{
2564 return dm_set_vblank(crtc, true);
2565}
2566
2567static void dm_disable_vblank(struct drm_crtc *crtc)
2568{
2569 dm_set_vblank(crtc, false);
2570}
2571
e7b07cee
HW
2572/* Implemented only the options currently availible for the driver */
2573static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2574 .reset = dm_crtc_reset_state,
2575 .destroy = amdgpu_dm_crtc_destroy,
2576 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2577 .set_config = drm_atomic_helper_set_config,
2578 .page_flip = drm_atomic_helper_page_flip,
2579 .atomic_duplicate_state = dm_crtc_duplicate_state,
2580 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 2581 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
589d2739
HW
2582 .enable_vblank = dm_enable_vblank,
2583 .disable_vblank = dm_disable_vblank,
e7b07cee
HW
2584};
2585
2586static enum drm_connector_status
2587amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2588{
2589 bool connected;
c84dec2f 2590 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
2591
2592 /* Notes:
2593 * 1. This interface is NOT called in context of HPD irq.
2594 * 2. This interface *is called* in context of user-mode ioctl. Which
2595 * makes it a bad place for *any* MST-related activit. */
2596
8580d60b
HW
2597 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2598 !aconnector->fake_enable)
e7b07cee
HW
2599 connected = (aconnector->dc_sink != NULL);
2600 else
2601 connected = (aconnector->base.force == DRM_FORCE_ON);
2602
2603 return (connected ? connector_status_connected :
2604 connector_status_disconnected);
2605}
2606
3ee6b26b
AD
2607int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2608 struct drm_connector_state *connector_state,
2609 struct drm_property *property,
2610 uint64_t val)
e7b07cee
HW
2611{
2612 struct drm_device *dev = connector->dev;
2613 struct amdgpu_device *adev = dev->dev_private;
2614 struct dm_connector_state *dm_old_state =
2615 to_dm_connector_state(connector->state);
2616 struct dm_connector_state *dm_new_state =
2617 to_dm_connector_state(connector_state);
2618
2619 int ret = -EINVAL;
2620
2621 if (property == dev->mode_config.scaling_mode_property) {
2622 enum amdgpu_rmx_type rmx_type;
2623
2624 switch (val) {
2625 case DRM_MODE_SCALE_CENTER:
2626 rmx_type = RMX_CENTER;
2627 break;
2628 case DRM_MODE_SCALE_ASPECT:
2629 rmx_type = RMX_ASPECT;
2630 break;
2631 case DRM_MODE_SCALE_FULLSCREEN:
2632 rmx_type = RMX_FULL;
2633 break;
2634 case DRM_MODE_SCALE_NONE:
2635 default:
2636 rmx_type = RMX_OFF;
2637 break;
2638 }
2639
2640 if (dm_old_state->scaling == rmx_type)
2641 return 0;
2642
2643 dm_new_state->scaling = rmx_type;
2644 ret = 0;
2645 } else if (property == adev->mode_info.underscan_hborder_property) {
2646 dm_new_state->underscan_hborder = val;
2647 ret = 0;
2648 } else if (property == adev->mode_info.underscan_vborder_property) {
2649 dm_new_state->underscan_vborder = val;
2650 ret = 0;
2651 } else if (property == adev->mode_info.underscan_property) {
2652 dm_new_state->underscan_enable = val;
2653 ret = 0;
2654 }
2655
2656 return ret;
2657}
2658
3ee6b26b
AD
2659int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2660 const struct drm_connector_state *state,
2661 struct drm_property *property,
2662 uint64_t *val)
e7b07cee
HW
2663{
2664 struct drm_device *dev = connector->dev;
2665 struct amdgpu_device *adev = dev->dev_private;
2666 struct dm_connector_state *dm_state =
2667 to_dm_connector_state(state);
2668 int ret = -EINVAL;
2669
2670 if (property == dev->mode_config.scaling_mode_property) {
2671 switch (dm_state->scaling) {
2672 case RMX_CENTER:
2673 *val = DRM_MODE_SCALE_CENTER;
2674 break;
2675 case RMX_ASPECT:
2676 *val = DRM_MODE_SCALE_ASPECT;
2677 break;
2678 case RMX_FULL:
2679 *val = DRM_MODE_SCALE_FULLSCREEN;
2680 break;
2681 case RMX_OFF:
2682 default:
2683 *val = DRM_MODE_SCALE_NONE;
2684 break;
2685 }
2686 ret = 0;
2687 } else if (property == adev->mode_info.underscan_hborder_property) {
2688 *val = dm_state->underscan_hborder;
2689 ret = 0;
2690 } else if (property == adev->mode_info.underscan_vborder_property) {
2691 *val = dm_state->underscan_vborder;
2692 ret = 0;
2693 } else if (property == adev->mode_info.underscan_property) {
2694 *val = dm_state->underscan_enable;
2695 ret = 0;
2696 }
2697 return ret;
2698}
2699
7578ecda 2700static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 2701{
c84dec2f 2702 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
2703 const struct dc_link *link = aconnector->dc_link;
2704 struct amdgpu_device *adev = connector->dev->dev_private;
2705 struct amdgpu_display_manager *dm = &adev->dm;
2706#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2707 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2708
742811b7 2709 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
2710 link->type != dc_connection_none &&
2711 dm->backlight_dev) {
2712 backlight_device_unregister(dm->backlight_dev);
2713 dm->backlight_dev = NULL;
e7b07cee
HW
2714 }
2715#endif
2716 drm_connector_unregister(connector);
2717 drm_connector_cleanup(connector);
2718 kfree(connector);
2719}
2720
2721void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2722{
2723 struct dm_connector_state *state =
2724 to_dm_connector_state(connector->state);
2725
2726 kfree(state);
2727
2728 state = kzalloc(sizeof(*state), GFP_KERNEL);
2729
2730 if (state) {
2731 state->scaling = RMX_OFF;
2732 state->underscan_enable = false;
2733 state->underscan_hborder = 0;
2734 state->underscan_vborder = 0;
2735
2736 connector->state = &state->base;
2737 connector->state->connector = connector;
2738 }
2739}
2740
3ee6b26b
AD
2741struct drm_connector_state *
2742amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
2743{
2744 struct dm_connector_state *state =
2745 to_dm_connector_state(connector->state);
2746
2747 struct dm_connector_state *new_state =
2748 kmemdup(state, sizeof(*state), GFP_KERNEL);
2749
2750 if (new_state) {
2751 __drm_atomic_helper_connector_duplicate_state(connector,
1ecfc3da 2752 &new_state->base);
e7b07cee
HW
2753 return &new_state->base;
2754 }
2755
2756 return NULL;
2757}
2758
2759static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2760 .reset = amdgpu_dm_connector_funcs_reset,
2761 .detect = amdgpu_dm_connector_detect,
2762 .fill_modes = drm_helper_probe_single_connector_modes,
2763 .destroy = amdgpu_dm_connector_destroy,
2764 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2765 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2766 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2767 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2768};
2769
2770static struct drm_encoder *best_encoder(struct drm_connector *connector)
2771{
2772 int enc_id = connector->encoder_ids[0];
2773 struct drm_mode_object *obj;
2774 struct drm_encoder *encoder;
2775
f1ad2f5e 2776 DRM_DEBUG_DRIVER("Finding the best encoder\n");
e7b07cee
HW
2777
2778 /* pick the encoder ids */
2779 if (enc_id) {
bd21a37d 2780 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
e7b07cee
HW
2781 if (!obj) {
2782 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2783 return NULL;
2784 }
2785 encoder = obj_to_encoder(obj);
2786 return encoder;
2787 }
2788 DRM_ERROR("No encoder id\n");
2789 return NULL;
2790}
2791
2792static int get_modes(struct drm_connector *connector)
2793{
2794 return amdgpu_dm_connector_get_modes(connector);
2795}
2796
c84dec2f 2797static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
2798{
2799 struct dc_sink_init_data init_params = {
2800 .link = aconnector->dc_link,
2801 .sink_signal = SIGNAL_TYPE_VIRTUAL
2802 };
70e8ffc5 2803 struct edid *edid;
e7b07cee 2804
a89ff457 2805 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
2806 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2807 aconnector->base.name);
2808
2809 aconnector->base.force = DRM_FORCE_OFF;
2810 aconnector->base.override_edid = false;
2811 return;
2812 }
2813
70e8ffc5
HW
2814 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2815
e7b07cee
HW
2816 aconnector->edid = edid;
2817
2818 aconnector->dc_em_sink = dc_link_add_remote_sink(
2819 aconnector->dc_link,
2820 (uint8_t *)edid,
2821 (edid->extensions + 1) * EDID_LENGTH,
2822 &init_params);
2823
a68d90e7 2824 if (aconnector->base.force == DRM_FORCE_ON)
e7b07cee
HW
2825 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2826 aconnector->dc_link->local_sink :
2827 aconnector->dc_em_sink;
2828}
2829
c84dec2f 2830static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
2831{
2832 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2833
2834 /* In case of headless boot with force on for DP managed connector
2835 * Those settings have to be != 0 to get initial modeset
2836 */
2837 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2838 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2839 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2840 }
2841
2842
2843 aconnector->base.override_edid = true;
2844 create_eml_sink(aconnector);
2845}
2846
ba9ca088 2847enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 2848 struct drm_display_mode *mode)
e7b07cee
HW
2849{
2850 int result = MODE_ERROR;
2851 struct dc_sink *dc_sink;
2852 struct amdgpu_device *adev = connector->dev->dev_private;
2853 /* TODO: Unhardcode stream count */
0971c40e 2854 struct dc_stream_state *stream;
c84dec2f 2855 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 2856 enum dc_status dc_result = DC_OK;
e7b07cee
HW
2857
2858 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2859 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2860 return result;
2861
2862 /* Only run this the first time mode_valid is called to initilialize
2863 * EDID mgmt
2864 */
2865 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2866 !aconnector->dc_em_sink)
2867 handle_edid_mgmt(aconnector);
2868
c84dec2f 2869 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 2870
b830ebc9 2871 if (dc_sink == NULL) {
e7b07cee
HW
2872 DRM_ERROR("dc_sink is NULL!\n");
2873 goto fail;
2874 }
2875
f4791779 2876 stream = create_stream_for_sink(aconnector, mode, NULL);
b830ebc9 2877 if (stream == NULL) {
e7b07cee
HW
2878 DRM_ERROR("Failed to create stream for sink!\n");
2879 goto fail;
2880 }
2881
a39438f0
HW
2882 dc_result = dc_validate_stream(adev->dm.dc, stream);
2883
2884 if (dc_result == DC_OK)
e7b07cee 2885 result = MODE_OK;
a39438f0 2886 else
9f921b14 2887 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0
HW
2888 mode->vdisplay,
2889 mode->hdisplay,
9f921b14
HW
2890 mode->clock,
2891 dc_result);
e7b07cee
HW
2892
2893 dc_stream_release(stream);
2894
2895fail:
2896 /* TODO: error handling*/
2897 return result;
2898}
2899
2900static const struct drm_connector_helper_funcs
2901amdgpu_dm_connector_helper_funcs = {
2902 /*
b830ebc9
HW
2903 * If hotplug a second bigger display in FB Con mode, bigger resolution
2904 * modes will be filtered by drm_mode_validate_size(), and those modes
2905 * is missing after user start lightdm. So we need to renew modes list.
2906 * in get_modes call back, not just return the modes count
2907 */
e7b07cee
HW
2908 .get_modes = get_modes,
2909 .mode_valid = amdgpu_dm_connector_mode_valid,
2910 .best_encoder = best_encoder
2911};
2912
2913static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2914{
2915}
2916
3ee6b26b
AD
2917static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2918 struct drm_crtc_state *state)
e7b07cee
HW
2919{
2920 struct amdgpu_device *adev = crtc->dev->dev_private;
2921 struct dc *dc = adev->dm.dc;
2922 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2923 int ret = -EINVAL;
2924
9b690ef3
BL
2925 if (unlikely(!dm_crtc_state->stream &&
2926 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
2927 WARN_ON(1);
2928 return ret;
2929 }
2930
2931 /* In some use cases, like reset, no stream is attached */
2932 if (!dm_crtc_state->stream)
2933 return 0;
2934
62c933f9 2935 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
2936 return 0;
2937
2938 return ret;
2939}
2940
3ee6b26b
AD
2941static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2942 const struct drm_display_mode *mode,
2943 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
2944{
2945 return true;
2946}
2947
2948static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2949 .disable = dm_crtc_helper_disable,
2950 .atomic_check = dm_crtc_helper_atomic_check,
2951 .mode_fixup = dm_crtc_helper_mode_fixup
2952};
2953
2954static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2955{
2956
2957}
2958
3ee6b26b
AD
2959static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2960 struct drm_crtc_state *crtc_state,
2961 struct drm_connector_state *conn_state)
e7b07cee
HW
2962{
2963 return 0;
2964}
2965
2966const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2967 .disable = dm_encoder_helper_disable,
2968 .atomic_check = dm_encoder_helper_atomic_check
2969};
2970
2971static void dm_drm_plane_reset(struct drm_plane *plane)
2972{
2973 struct dm_plane_state *amdgpu_state = NULL;
2974
2975 if (plane->state)
2976 plane->funcs->atomic_destroy_state(plane, plane->state);
2977
2978 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d
TSD
2979 WARN_ON(amdgpu_state == NULL);
2980
e7b07cee
HW
2981 if (amdgpu_state) {
2982 plane->state = &amdgpu_state->base;
2983 plane->state->plane = plane;
2984 plane->state->rotation = DRM_MODE_ROTATE_0;
f922237d 2985 }
e7b07cee
HW
2986}
2987
2988static struct drm_plane_state *
2989dm_drm_plane_duplicate_state(struct drm_plane *plane)
2990{
2991 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2992
2993 old_dm_plane_state = to_dm_plane_state(plane->state);
2994 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2995 if (!dm_plane_state)
2996 return NULL;
2997
2998 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2999
3be5262e
HW
3000 if (old_dm_plane_state->dc_state) {
3001 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3002 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
3003 }
3004
3005 return &dm_plane_state->base;
3006}
3007
3008void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 3009 struct drm_plane_state *state)
e7b07cee
HW
3010{
3011 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3012
3be5262e
HW
3013 if (dm_plane_state->dc_state)
3014 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 3015
0627bbd3 3016 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
3017}
3018
3019static const struct drm_plane_funcs dm_plane_funcs = {
3020 .update_plane = drm_atomic_helper_update_plane,
3021 .disable_plane = drm_atomic_helper_disable_plane,
3022 .destroy = drm_plane_cleanup,
3023 .reset = dm_drm_plane_reset,
3024 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3025 .atomic_destroy_state = dm_drm_plane_destroy_state,
3026};
3027
3ee6b26b
AD
3028static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3029 struct drm_plane_state *new_state)
e7b07cee
HW
3030{
3031 struct amdgpu_framebuffer *afb;
3032 struct drm_gem_object *obj;
5d43be0c 3033 struct amdgpu_device *adev;
e7b07cee 3034 struct amdgpu_bo *rbo;
56087b31 3035 uint64_t chroma_addr = 0;
e7b07cee
HW
3036 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3037 unsigned int awidth;
5d43be0c
CK
3038 uint32_t domain;
3039 int r;
e7b07cee
HW
3040
3041 dm_plane_state_old = to_dm_plane_state(plane->state);
3042 dm_plane_state_new = to_dm_plane_state(new_state);
3043
3044 if (!new_state->fb) {
f1ad2f5e 3045 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
3046 return 0;
3047 }
3048
3049 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 3050 obj = new_state->fb->obj[0];
e7b07cee 3051 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 3052 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
e7b07cee
HW
3053 r = amdgpu_bo_reserve(rbo, false);
3054 if (unlikely(r != 0))
3055 return r;
3056
5d43be0c 3057 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1d2361e5 3058 domain = amdgpu_display_supported_domains(adev);
5d43be0c
CK
3059 else
3060 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 3061
5d43be0c 3062 r = amdgpu_bo_pin(rbo, domain, &afb->address);
e7b07cee
HW
3063 amdgpu_bo_unreserve(rbo);
3064
3065 if (unlikely(r != 0)) {
30b7c614
HW
3066 if (r != -ERESTARTSYS)
3067 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
e7b07cee
HW
3068 return r;
3069 }
3070
3071 amdgpu_bo_ref(rbo);
3072
3be5262e
HW
3073 if (dm_plane_state_new->dc_state &&
3074 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3075 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 3076
3be5262e
HW
3077 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3078 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3079 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
e7b07cee
HW
3080 } else {
3081 awidth = ALIGN(new_state->fb->width, 64);
56087b31 3082 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3be5262e 3083 plane_state->address.video_progressive.luma_addr.low_part
e7b07cee 3084 = lower_32_bits(afb->address);
56087b31
S
3085 plane_state->address.video_progressive.luma_addr.high_part
3086 = upper_32_bits(afb->address);
35888630 3087 chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3be5262e 3088 plane_state->address.video_progressive.chroma_addr.low_part
56087b31
S
3089 = lower_32_bits(chroma_addr);
3090 plane_state->address.video_progressive.chroma_addr.high_part
3091 = upper_32_bits(chroma_addr);
e7b07cee
HW
3092 }
3093 }
3094
3095 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3096 * prepare and cleanup in drm_atomic_helper_prepare_planes
3097 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3098 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3099 * code touching fram buffers should be avoided for DC.
3100 */
3101 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3102 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3103
3104 acrtc->cursor_bo = obj;
3105 }
3106 return 0;
3107}
3108
3ee6b26b
AD
3109static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3110 struct drm_plane_state *old_state)
e7b07cee
HW
3111{
3112 struct amdgpu_bo *rbo;
e7b07cee
HW
3113 int r;
3114
3115 if (!old_state->fb)
3116 return;
3117
e68d14dd 3118 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
3119 r = amdgpu_bo_reserve(rbo, false);
3120 if (unlikely(r)) {
3121 DRM_ERROR("failed to reserve rbo before unpin\n");
3122 return;
b830ebc9
HW
3123 }
3124
3125 amdgpu_bo_unpin(rbo);
3126 amdgpu_bo_unreserve(rbo);
3127 amdgpu_bo_unref(&rbo);
e7b07cee
HW
3128}
3129
7578ecda
AD
3130static int dm_plane_atomic_check(struct drm_plane *plane,
3131 struct drm_plane_state *state)
cbd19488
AG
3132{
3133 struct amdgpu_device *adev = plane->dev->dev_private;
3134 struct dc *dc = adev->dm.dc;
3135 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3136
3be5262e 3137 if (!dm_plane_state->dc_state)
9a3329b1 3138 return 0;
cbd19488 3139
a05bcff1
S
3140 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3141 return -EINVAL;
3142
62c933f9 3143 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
3144 return 0;
3145
3146 return -EINVAL;
3147}
3148
e7b07cee
HW
3149static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3150 .prepare_fb = dm_plane_helper_prepare_fb,
3151 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 3152 .atomic_check = dm_plane_atomic_check,
e7b07cee
HW
3153};
3154
3155/*
3156 * TODO: these are currently initialized to rgb formats only.
3157 * For future use cases we should either initialize them dynamically based on
3158 * plane capabilities, or initialize this array to all formats, so internal drm
3159 * check will succeed, and let DC to implement proper check
3160 */
d90371b0 3161static const uint32_t rgb_formats[] = {
e7b07cee
HW
3162 DRM_FORMAT_RGB888,
3163 DRM_FORMAT_XRGB8888,
3164 DRM_FORMAT_ARGB8888,
3165 DRM_FORMAT_RGBA8888,
3166 DRM_FORMAT_XRGB2101010,
3167 DRM_FORMAT_XBGR2101010,
3168 DRM_FORMAT_ARGB2101010,
3169 DRM_FORMAT_ABGR2101010,
3170};
3171
99d1abf8 3172static const uint32_t yuv_formats[] = {
e7b07cee
HW
3173 DRM_FORMAT_NV12,
3174 DRM_FORMAT_NV21,
3175};
3176
3177static const u32 cursor_formats[] = {
3178 DRM_FORMAT_ARGB8888
3179};
3180
7578ecda
AD
3181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3182 struct amdgpu_plane *aplane,
3183 unsigned long possible_crtcs)
e7b07cee
HW
3184{
3185 int res = -EPERM;
3186
3187 switch (aplane->base.type) {
3188 case DRM_PLANE_TYPE_PRIMARY:
e7b07cee
HW
3189 res = drm_universal_plane_init(
3190 dm->adev->ddev,
3191 &aplane->base,
3192 possible_crtcs,
3193 &dm_plane_funcs,
3194 rgb_formats,
3195 ARRAY_SIZE(rgb_formats),
3196 NULL, aplane->base.type, NULL);
3197 break;
3198 case DRM_PLANE_TYPE_OVERLAY:
3199 res = drm_universal_plane_init(
3200 dm->adev->ddev,
3201 &aplane->base,
3202 possible_crtcs,
3203 &dm_plane_funcs,
3204 yuv_formats,
3205 ARRAY_SIZE(yuv_formats),
3206 NULL, aplane->base.type, NULL);
3207 break;
3208 case DRM_PLANE_TYPE_CURSOR:
3209 res = drm_universal_plane_init(
3210 dm->adev->ddev,
3211 &aplane->base,
3212 possible_crtcs,
3213 &dm_plane_funcs,
3214 cursor_formats,
3215 ARRAY_SIZE(cursor_formats),
3216 NULL, aplane->base.type, NULL);
3217 break;
3218 }
3219
3220 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3221
96719c54
HW
3222 /* Create (reset) the plane state */
3223 if (aplane->base.funcs->reset)
3224 aplane->base.funcs->reset(&aplane->base);
3225
3226
e7b07cee
HW
3227 return res;
3228}
3229
7578ecda
AD
3230static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3231 struct drm_plane *plane,
3232 uint32_t crtc_index)
e7b07cee
HW
3233{
3234 struct amdgpu_crtc *acrtc = NULL;
3235 struct amdgpu_plane *cursor_plane;
3236
3237 int res = -ENOMEM;
3238
3239 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3240 if (!cursor_plane)
3241 goto fail;
3242
3243 cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3244 res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3245
3246 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3247 if (!acrtc)
3248 goto fail;
3249
3250 res = drm_crtc_init_with_planes(
3251 dm->ddev,
3252 &acrtc->base,
3253 plane,
3254 &cursor_plane->base,
3255 &amdgpu_dm_crtc_funcs, NULL);
3256
3257 if (res)
3258 goto fail;
3259
3260 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3261
96719c54
HW
3262 /* Create (reset) the plane state */
3263 if (acrtc->base.funcs->reset)
3264 acrtc->base.funcs->reset(&acrtc->base);
3265
e7b07cee
HW
3266 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3267 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3268
3269 acrtc->crtc_id = crtc_index;
3270 acrtc->base.enabled = false;
3271
3272 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
3273 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3274 true, MAX_COLOR_LUT_ENTRIES);
086247a4 3275 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
3276
3277 return 0;
3278
3279fail:
b830ebc9
HW
3280 kfree(acrtc);
3281 kfree(cursor_plane);
e7b07cee
HW
3282 return res;
3283}
3284
3285
3286static int to_drm_connector_type(enum signal_type st)
3287{
3288 switch (st) {
3289 case SIGNAL_TYPE_HDMI_TYPE_A:
3290 return DRM_MODE_CONNECTOR_HDMIA;
3291 case SIGNAL_TYPE_EDP:
3292 return DRM_MODE_CONNECTOR_eDP;
3293 case SIGNAL_TYPE_RGB:
3294 return DRM_MODE_CONNECTOR_VGA;
3295 case SIGNAL_TYPE_DISPLAY_PORT:
3296 case SIGNAL_TYPE_DISPLAY_PORT_MST:
3297 return DRM_MODE_CONNECTOR_DisplayPort;
3298 case SIGNAL_TYPE_DVI_DUAL_LINK:
3299 case SIGNAL_TYPE_DVI_SINGLE_LINK:
3300 return DRM_MODE_CONNECTOR_DVID;
3301 case SIGNAL_TYPE_VIRTUAL:
3302 return DRM_MODE_CONNECTOR_VIRTUAL;
3303
3304 default:
3305 return DRM_MODE_CONNECTOR_Unknown;
3306 }
3307}
3308
3309static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3310{
3311 const struct drm_connector_helper_funcs *helper =
3312 connector->helper_private;
3313 struct drm_encoder *encoder;
3314 struct amdgpu_encoder *amdgpu_encoder;
3315
3316 encoder = helper->best_encoder(connector);
3317
3318 if (encoder == NULL)
3319 return;
3320
3321 amdgpu_encoder = to_amdgpu_encoder(encoder);
3322
3323 amdgpu_encoder->native_mode.clock = 0;
3324
3325 if (!list_empty(&connector->probed_modes)) {
3326 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 3327
e7b07cee 3328 list_for_each_entry(preferred_mode,
b830ebc9
HW
3329 &connector->probed_modes,
3330 head) {
3331 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3332 amdgpu_encoder->native_mode = *preferred_mode;
3333
e7b07cee
HW
3334 break;
3335 }
3336
3337 }
3338}
3339
3ee6b26b
AD
3340static struct drm_display_mode *
3341amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3342 char *name,
3343 int hdisplay, int vdisplay)
e7b07cee
HW
3344{
3345 struct drm_device *dev = encoder->dev;
3346 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3347 struct drm_display_mode *mode = NULL;
3348 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3349
3350 mode = drm_mode_duplicate(dev, native_mode);
3351
b830ebc9 3352 if (mode == NULL)
e7b07cee
HW
3353 return NULL;
3354
3355 mode->hdisplay = hdisplay;
3356 mode->vdisplay = vdisplay;
3357 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3358 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3359
3360 return mode;
3361
3362}
3363
3364static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 3365 struct drm_connector *connector)
e7b07cee
HW
3366{
3367 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3368 struct drm_display_mode *mode = NULL;
3369 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
3370 struct amdgpu_dm_connector *amdgpu_dm_connector =
3371 to_amdgpu_dm_connector(connector);
e7b07cee
HW
3372 int i;
3373 int n;
3374 struct mode_size {
3375 char name[DRM_DISPLAY_MODE_LEN];
3376 int w;
3377 int h;
b830ebc9 3378 } common_modes[] = {
e7b07cee
HW
3379 { "640x480", 640, 480},
3380 { "800x600", 800, 600},
3381 { "1024x768", 1024, 768},
3382 { "1280x720", 1280, 720},
3383 { "1280x800", 1280, 800},
3384 {"1280x1024", 1280, 1024},
3385 { "1440x900", 1440, 900},
3386 {"1680x1050", 1680, 1050},
3387 {"1600x1200", 1600, 1200},
3388 {"1920x1080", 1920, 1080},
3389 {"1920x1200", 1920, 1200}
3390 };
3391
b830ebc9 3392 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
3393
3394 for (i = 0; i < n; i++) {
3395 struct drm_display_mode *curmode = NULL;
3396 bool mode_existed = false;
3397
3398 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
3399 common_modes[i].h > native_mode->vdisplay ||
3400 (common_modes[i].w == native_mode->hdisplay &&
3401 common_modes[i].h == native_mode->vdisplay))
3402 continue;
e7b07cee
HW
3403
3404 list_for_each_entry(curmode, &connector->probed_modes, head) {
3405 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 3406 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
3407 mode_existed = true;
3408 break;
3409 }
3410 }
3411
3412 if (mode_existed)
3413 continue;
3414
3415 mode = amdgpu_dm_create_common_mode(encoder,
3416 common_modes[i].name, common_modes[i].w,
3417 common_modes[i].h);
3418 drm_mode_probed_add(connector, mode);
c84dec2f 3419 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
3420 }
3421}
3422
3ee6b26b
AD
3423static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3424 struct edid *edid)
e7b07cee 3425{
c84dec2f
HW
3426 struct amdgpu_dm_connector *amdgpu_dm_connector =
3427 to_amdgpu_dm_connector(connector);
e7b07cee
HW
3428
3429 if (edid) {
3430 /* empty probed_modes */
3431 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 3432 amdgpu_dm_connector->num_modes =
e7b07cee
HW
3433 drm_add_edid_modes(connector, edid);
3434
e7b07cee 3435 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 3436 } else {
c84dec2f 3437 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 3438 }
e7b07cee
HW
3439}
3440
7578ecda 3441static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee
HW
3442{
3443 const struct drm_connector_helper_funcs *helper =
3444 connector->helper_private;
c84dec2f
HW
3445 struct amdgpu_dm_connector *amdgpu_dm_connector =
3446 to_amdgpu_dm_connector(connector);
e7b07cee 3447 struct drm_encoder *encoder;
c84dec2f 3448 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee
HW
3449
3450 encoder = helper->best_encoder(connector);
e7b07cee
HW
3451 amdgpu_dm_connector_ddc_get_modes(connector, edid);
3452 amdgpu_dm_connector_add_common_modes(encoder, connector);
3e332d3a
RL
3453
3454#if defined(CONFIG_DRM_AMD_DC_FBC)
3455 amdgpu_dm_fbc_init(connector);
3456#endif
c84dec2f 3457 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
3458}
3459
3ee6b26b
AD
3460void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3461 struct amdgpu_dm_connector *aconnector,
3462 int connector_type,
3463 struct dc_link *link,
3464 int link_index)
e7b07cee
HW
3465{
3466 struct amdgpu_device *adev = dm->ddev->dev_private;
3467
3468 aconnector->connector_id = link_index;
3469 aconnector->dc_link = link;
3470 aconnector->base.interlace_allowed = false;
3471 aconnector->base.doublescan_allowed = false;
3472 aconnector->base.stereo_allowed = false;
3473 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3474 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3475
3476 mutex_init(&aconnector->hpd_lock);
3477
b830ebc9
HW
3478 /* configure support HPD hot plug connector_>polled default value is 0
3479 * which means HPD hot plug not supported
3480 */
e7b07cee
HW
3481 switch (connector_type) {
3482 case DRM_MODE_CONNECTOR_HDMIA:
3483 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3484 break;
3485 case DRM_MODE_CONNECTOR_DisplayPort:
3486 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3487 break;
3488 case DRM_MODE_CONNECTOR_DVID:
3489 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3490 break;
3491 default:
3492 break;
3493 }
3494
3495 drm_object_attach_property(&aconnector->base.base,
3496 dm->ddev->mode_config.scaling_mode_property,
3497 DRM_MODE_SCALE_NONE);
3498
3499 drm_object_attach_property(&aconnector->base.base,
3500 adev->mode_info.underscan_property,
3501 UNDERSCAN_OFF);
3502 drm_object_attach_property(&aconnector->base.base,
3503 adev->mode_info.underscan_hborder_property,
3504 0);
3505 drm_object_attach_property(&aconnector->base.base,
3506 adev->mode_info.underscan_vborder_property,
3507 0);
3508
3509}
3510
7578ecda
AD
3511static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3512 struct i2c_msg *msgs, int num)
e7b07cee
HW
3513{
3514 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3515 struct ddc_service *ddc_service = i2c->ddc_service;
3516 struct i2c_command cmd;
3517 int i;
3518 int result = -EIO;
3519
b830ebc9 3520 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
3521
3522 if (!cmd.payloads)
3523 return result;
3524
3525 cmd.number_of_payloads = num;
3526 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3527 cmd.speed = 100;
3528
3529 for (i = 0; i < num; i++) {
3530 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3531 cmd.payloads[i].address = msgs[i].addr;
3532 cmd.payloads[i].length = msgs[i].len;
3533 cmd.payloads[i].data = msgs[i].buf;
3534 }
3535
3536 if (dal_i2caux_submit_i2c_command(
3537 ddc_service->ctx->i2caux,
3538 ddc_service->ddc_pin,
3539 &cmd))
3540 result = num;
3541
3542 kfree(cmd.payloads);
3543 return result;
3544}
3545
7578ecda 3546static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
3547{
3548 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3549}
3550
3551static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3552 .master_xfer = amdgpu_dm_i2c_xfer,
3553 .functionality = amdgpu_dm_i2c_func,
3554};
3555
3ee6b26b
AD
3556static struct amdgpu_i2c_adapter *
3557create_i2c(struct ddc_service *ddc_service,
3558 int link_index,
3559 int *res)
e7b07cee
HW
3560{
3561 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3562 struct amdgpu_i2c_adapter *i2c;
3563
b830ebc9 3564 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
3565 if (!i2c)
3566 return NULL;
e7b07cee
HW
3567 i2c->base.owner = THIS_MODULE;
3568 i2c->base.class = I2C_CLASS_DDC;
3569 i2c->base.dev.parent = &adev->pdev->dev;
3570 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 3571 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
3572 i2c_set_adapdata(&i2c->base, i2c);
3573 i2c->ddc_service = ddc_service;
3574
3575 return i2c;
3576}
3577
742811b7 3578
e7b07cee 3579/* Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
3580 * dc_link which will be represented by this aconnector.
3581 */
7578ecda
AD
3582static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3583 struct amdgpu_dm_connector *aconnector,
3584 uint32_t link_index,
3585 struct amdgpu_encoder *aencoder)
e7b07cee
HW
3586{
3587 int res = 0;
3588 int connector_type;
3589 struct dc *dc = dm->dc;
3590 struct dc_link *link = dc_get_link_at_index(dc, link_index);
3591 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
3592
3593 link->priv = aconnector;
e7b07cee 3594
f1ad2f5e 3595 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
3596
3597 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
3598 if (!i2c) {
3599 DRM_ERROR("Failed to create i2c adapter data\n");
3600 return -ENOMEM;
3601 }
3602
e7b07cee
HW
3603 aconnector->i2c = i2c;
3604 res = i2c_add_adapter(&i2c->base);
3605
3606 if (res) {
3607 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3608 goto out_free;
3609 }
3610
3611 connector_type = to_drm_connector_type(link->connector_signal);
3612
3613 res = drm_connector_init(
3614 dm->ddev,
3615 &aconnector->base,
3616 &amdgpu_dm_connector_funcs,
3617 connector_type);
3618
3619 if (res) {
3620 DRM_ERROR("connector_init failed\n");
3621 aconnector->connector_id = -1;
3622 goto out_free;
3623 }
3624
3625 drm_connector_helper_add(
3626 &aconnector->base,
3627 &amdgpu_dm_connector_helper_funcs);
3628
96719c54
HW
3629 if (aconnector->base.funcs->reset)
3630 aconnector->base.funcs->reset(&aconnector->base);
3631
e7b07cee
HW
3632 amdgpu_dm_connector_init_helper(
3633 dm,
3634 aconnector,
3635 connector_type,
3636 link,
3637 link_index);
3638
3639 drm_mode_connector_attach_encoder(
3640 &aconnector->base, &aencoder->base);
3641
3642 drm_connector_register(&aconnector->base);
3643
3644 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3645 || connector_type == DRM_MODE_CONNECTOR_eDP)
3646 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3647
e7b07cee
HW
3648out_free:
3649 if (res) {
3650 kfree(i2c);
3651 aconnector->i2c = NULL;
3652 }
3653 return res;
3654}
3655
3656int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3657{
3658 switch (adev->mode_info.num_crtc) {
3659 case 1:
3660 return 0x1;
3661 case 2:
3662 return 0x3;
3663 case 3:
3664 return 0x7;
3665 case 4:
3666 return 0xf;
3667 case 5:
3668 return 0x1f;
3669 case 6:
3670 default:
3671 return 0x3f;
3672 }
3673}
3674
7578ecda
AD
3675static int amdgpu_dm_encoder_init(struct drm_device *dev,
3676 struct amdgpu_encoder *aencoder,
3677 uint32_t link_index)
e7b07cee
HW
3678{
3679 struct amdgpu_device *adev = dev->dev_private;
3680
3681 int res = drm_encoder_init(dev,
3682 &aencoder->base,
3683 &amdgpu_dm_encoder_funcs,
3684 DRM_MODE_ENCODER_TMDS,
3685 NULL);
3686
3687 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3688
3689 if (!res)
3690 aencoder->encoder_id = link_index;
3691 else
3692 aencoder->encoder_id = -1;
3693
3694 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3695
3696 return res;
3697}
3698
3ee6b26b
AD
3699static void manage_dm_interrupts(struct amdgpu_device *adev,
3700 struct amdgpu_crtc *acrtc,
3701 bool enable)
e7b07cee
HW
3702{
3703 /*
3704 * this is not correct translation but will work as soon as VBLANK
3705 * constant is the same as PFLIP
3706 */
3707 int irq_type =
734dd01d 3708 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
3709 adev,
3710 acrtc->crtc_id);
3711
3712 if (enable) {
3713 drm_crtc_vblank_on(&acrtc->base);
3714 amdgpu_irq_get(
3715 adev,
3716 &adev->pageflip_irq,
3717 irq_type);
3718 } else {
3719
3720 amdgpu_irq_put(
3721 adev,
3722 &adev->pageflip_irq,
3723 irq_type);
3724 drm_crtc_vblank_off(&acrtc->base);
3725 }
3726}
3727
3ee6b26b
AD
3728static bool
3729is_scaling_state_different(const struct dm_connector_state *dm_state,
3730 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
3731{
3732 if (dm_state->scaling != old_dm_state->scaling)
3733 return true;
3734 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3735 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3736 return true;
3737 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3738 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3739 return true;
b830ebc9
HW
3740 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3741 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3742 return true;
e7b07cee
HW
3743 return false;
3744}
3745
3ee6b26b
AD
3746static void remove_stream(struct amdgpu_device *adev,
3747 struct amdgpu_crtc *acrtc,
3748 struct dc_stream_state *stream)
e7b07cee
HW
3749{
3750 /* this is the update mode case */
3751 if (adev->dm.freesync_module)
3752 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3753
3754 acrtc->otg_inst = -1;
3755 acrtc->enabled = false;
3756}
3757
7578ecda
AD
3758static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3759 struct dc_cursor_position *position)
2a8f6ccb
HW
3760{
3761 struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3762 int x, y;
3763 int xorigin = 0, yorigin = 0;
3764
3765 if (!crtc || !plane->state->fb) {
3766 position->enable = false;
3767 position->x = 0;
3768 position->y = 0;
3769 return 0;
3770 }
3771
3772 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3773 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3774 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3775 __func__,
3776 plane->state->crtc_w,
3777 plane->state->crtc_h);
3778 return -EINVAL;
3779 }
3780
3781 x = plane->state->crtc_x;
3782 y = plane->state->crtc_y;
3783 /* avivo cursor are offset into the total surface */
3784 x += crtc->primary->state->src_x >> 16;
3785 y += crtc->primary->state->src_y >> 16;
3786 if (x < 0) {
3787 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3788 x = 0;
3789 }
3790 if (y < 0) {
3791 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3792 y = 0;
3793 }
3794 position->enable = true;
3795 position->x = x;
3796 position->y = y;
3797 position->x_hotspot = xorigin;
3798 position->y_hotspot = yorigin;
3799
3800 return 0;
3801}
3802
3ee6b26b
AD
3803static void handle_cursor_update(struct drm_plane *plane,
3804 struct drm_plane_state *old_plane_state)
e7b07cee 3805{
2a8f6ccb
HW
3806 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3807 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3808 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3809 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3810 uint64_t address = afb ? afb->address : 0;
3811 struct dc_cursor_position position;
3812 struct dc_cursor_attributes attributes;
3813 int ret;
3814
e7b07cee
HW
3815 if (!plane->state->fb && !old_plane_state->fb)
3816 return;
3817
f1ad2f5e 3818 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
3819 __func__,
3820 amdgpu_crtc->crtc_id,
3821 plane->state->crtc_w,
3822 plane->state->crtc_h);
2a8f6ccb
HW
3823
3824 ret = get_cursor_position(plane, crtc, &position);
3825 if (ret)
3826 return;
3827
3828 if (!position.enable) {
3829 /* turn off cursor */
3830 if (crtc_state && crtc_state->stream)
3831 dc_stream_set_cursor_position(crtc_state->stream,
3832 &position);
3833 return;
e7b07cee 3834 }
e7b07cee 3835
2a8f6ccb
HW
3836 amdgpu_crtc->cursor_width = plane->state->crtc_w;
3837 amdgpu_crtc->cursor_height = plane->state->crtc_h;
3838
3839 attributes.address.high_part = upper_32_bits(address);
3840 attributes.address.low_part = lower_32_bits(address);
3841 attributes.width = plane->state->crtc_w;
3842 attributes.height = plane->state->crtc_h;
3843 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3844 attributes.rotation_angle = 0;
3845 attributes.attribute_flags.value = 0;
3846
3847 attributes.pitch = attributes.width;
3848
886daac9
JZ
3849 if (crtc_state->stream) {
3850 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3851 &attributes))
3852 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 3853
2a8f6ccb
HW
3854 if (!dc_stream_set_cursor_position(crtc_state->stream,
3855 &position))
3856 DRM_ERROR("DC failed to set cursor position\n");
886daac9 3857 }
2a8f6ccb 3858}
e7b07cee
HW
3859
3860static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3861{
3862
3863 assert_spin_locked(&acrtc->base.dev->event_lock);
3864 WARN_ON(acrtc->event);
3865
3866 acrtc->event = acrtc->base.state->event;
3867
3868 /* Set the flip status */
3869 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3870
3871 /* Mark this event as consumed */
3872 acrtc->base.state->event = NULL;
3873
3874 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3875 acrtc->crtc_id);
3876}
3877
3878/*
3879 * Executes flip
3880 *
3881 * Waits on all BO's fences and for proper vblank count
3882 */
3ee6b26b
AD
3883static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3884 struct drm_framebuffer *fb,
bc6828e0
BL
3885 uint32_t target,
3886 struct dc_state *state)
e7b07cee
HW
3887{
3888 unsigned long flags;
3889 uint32_t target_vblank;
3890 int r, vpos, hpos;
3891 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3892 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
e68d14dd 3893 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
e7b07cee 3894 struct amdgpu_device *adev = crtc->dev->dev_private;
aac6a07e 3895 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
e7b07cee 3896 struct dc_flip_addrs addr = { {0} };
3be5262e 3897 /* TODO eliminate or rename surface_update */
e7b07cee
HW
3898 struct dc_surface_update surface_updates[1] = { {0} };
3899 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3900
3901
3902 /* Prepare wait for target vblank early - before the fence-waits */
23effc11 3903 target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
e7b07cee
HW
3904 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3905
b830ebc9 3906 /* TODO This might fail and hence better not used, wait
e7b07cee
HW
3907 * explicitly on fences instead
3908 * and in general should be called for
3909 * blocking commit to as per framework helpers
b830ebc9 3910 */
e7b07cee
HW
3911 r = amdgpu_bo_reserve(abo, true);
3912 if (unlikely(r != 0)) {
3913 DRM_ERROR("failed to reserve buffer before flip\n");
3914 WARN_ON(1);
3915 }
3916
3917 /* Wait for all fences on this FB */
3918 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3919 MAX_SCHEDULE_TIMEOUT) < 0);
3920
3921 amdgpu_bo_unreserve(abo);
3922
3923 /* Wait until we're out of the vertical blank period before the one
3924 * targeted by the flip
3925 */
3926 while ((acrtc->enabled &&
aa8e286a
SL
3927 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
3928 0, &vpos, &hpos, NULL,
3929 NULL, &crtc->hwmode)
e7b07cee
HW
3930 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3931 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3932 (int)(target_vblank -
3933 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3934 usleep_range(1000, 1100);
3935 }
3936
3937 /* Flip */
3938 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3939 /* update crtc fb */
3940 crtc->primary->fb = fb;
3941
3942 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3943 WARN_ON(!acrtc_state->stream);
3944
3945 addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3946 addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3947 addr.flip_immediate = async_flip;
3948
3949
3950 if (acrtc->base.state->event)
3951 prepare_flip_isr(acrtc);
3952
3be5262e 3953 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
e7b07cee
HW
3954 surface_updates->flip_addr = &addr;
3955
3956
bc6828e0
BL
3957 dc_commit_updates_for_stream(adev->dm.dc,
3958 surface_updates,
3959 1,
3960 acrtc_state->stream,
3961 NULL,
3962 &surface_updates->surface,
3963 state);
e7b07cee
HW
3964
3965 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3966 __func__,
3967 addr.address.grph.addr.high_part,
3968 addr.address.grph.addr.low_part);
3969
3970
3971 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3972}
3973
44d09c6a
HW
3974/*
3975 * TODO this whole function needs to go
3976 *
3977 * dc_surface_update is needlessly complex. See if we can just replace this
3978 * with a dc_plane_state and follow the atomic model a bit more closely here.
3979 */
3980static bool commit_planes_to_stream(
3981 struct dc *dc,
3982 struct dc_plane_state **plane_states,
3983 uint8_t new_plane_count,
3984 struct dm_crtc_state *dm_new_crtc_state,
3985 struct dm_crtc_state *dm_old_crtc_state,
3986 struct dc_state *state)
3987{
3988 /* no need to dynamically allocate this. it's pretty small */
3989 struct dc_surface_update updates[MAX_SURFACES];
3990 struct dc_flip_addrs *flip_addr;
3991 struct dc_plane_info *plane_info;
3992 struct dc_scaling_info *scaling_info;
3993 int i;
3994 struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
3995 struct dc_stream_update *stream_update =
3996 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
3997
3998 if (!stream_update) {
3999 BREAK_TO_DEBUGGER();
4000 return false;
4001 }
4002
4003 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4004 GFP_KERNEL);
4005 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4006 GFP_KERNEL);
4007 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4008 GFP_KERNEL);
4009
4010 if (!flip_addr || !plane_info || !scaling_info) {
4011 kfree(flip_addr);
4012 kfree(plane_info);
4013 kfree(scaling_info);
4014 kfree(stream_update);
4015 return false;
4016 }
4017
4018 memset(updates, 0, sizeof(updates));
4019
4020 stream_update->src = dc_stream->src;
4021 stream_update->dst = dc_stream->dst;
4022 stream_update->out_transfer_func = dc_stream->out_transfer_func;
4023
4024 for (i = 0; i < new_plane_count; i++) {
4025 updates[i].surface = plane_states[i];
4026 updates[i].gamma =
4027 (struct dc_gamma *)plane_states[i]->gamma_correction;
4028 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4029 flip_addr[i].address = plane_states[i]->address;
4030 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4031 plane_info[i].color_space = plane_states[i]->color_space;
44d09c6a
HW
4032 plane_info[i].format = plane_states[i]->format;
4033 plane_info[i].plane_size = plane_states[i]->plane_size;
4034 plane_info[i].rotation = plane_states[i]->rotation;
4035 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4036 plane_info[i].stereo_format = plane_states[i]->stereo_format;
4037 plane_info[i].tiling_info = plane_states[i]->tiling_info;
4038 plane_info[i].visible = plane_states[i]->visible;
4039 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4040 plane_info[i].dcc = plane_states[i]->dcc;
4041 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4042 scaling_info[i].src_rect = plane_states[i]->src_rect;
4043 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4044 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4045
4046 updates[i].flip_addr = &flip_addr[i];
4047 updates[i].plane_info = &plane_info[i];
4048 updates[i].scaling_info = &scaling_info[i];
4049 }
4050
4051 dc_commit_updates_for_stream(
4052 dc,
4053 updates,
4054 new_plane_count,
4055 dc_stream, stream_update, plane_states, state);
4056
4057 kfree(flip_addr);
4058 kfree(plane_info);
4059 kfree(scaling_info);
4060 kfree(stream_update);
4061 return true;
4062}
4063
3be5262e 4064static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3ee6b26b
AD
4065 struct drm_device *dev,
4066 struct amdgpu_display_manager *dm,
4067 struct drm_crtc *pcrtc,
4068 bool *wait_for_vblank)
e7b07cee
HW
4069{
4070 uint32_t i;
4071 struct drm_plane *plane;
0bc9706d 4072 struct drm_plane_state *old_plane_state, *new_plane_state;
0971c40e 4073 struct dc_stream_state *dc_stream_attach;
3be5262e 4074 struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
e7b07cee 4075 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
4076 struct drm_crtc_state *new_pcrtc_state =
4077 drm_atomic_get_new_crtc_state(state, pcrtc);
4078 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
4079 struct dm_crtc_state *dm_old_crtc_state =
4080 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
bc6828e0 4081 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
e7b07cee
HW
4082 int planes_count = 0;
4083 unsigned long flags;
4084
4085 /* update planes when needed */
0bc9706d
LSL
4086 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4087 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 4088 struct drm_crtc_state *new_crtc_state;
0bc9706d 4089 struct drm_framebuffer *fb = new_plane_state->fb;
e7b07cee 4090 bool pflip_needed;
54d76575 4091 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee
HW
4092
4093 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4094 handle_cursor_update(plane, old_plane_state);
4095 continue;
4096 }
4097
f5ba60fe
DD
4098 if (!fb || !crtc || pcrtc != crtc)
4099 continue;
4100
4101 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4102 if (!new_crtc_state->active)
e7b07cee
HW
4103 continue;
4104
4105 pflip_needed = !state->allow_modeset;
4106
4107 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4108 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3be5262e
HW
4109 DRM_ERROR("%s: acrtc %d, already busy\n",
4110 __func__,
4111 acrtc_attach->crtc_id);
b830ebc9 4112 /* In commit tail framework this cannot happen */
e7b07cee
HW
4113 WARN_ON(1);
4114 }
4115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4116
4117 if (!pflip_needed) {
54d76575 4118 WARN_ON(!dm_new_plane_state->dc_state);
e7b07cee 4119
54d76575 4120 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
e7b07cee
HW
4121
4122 dc_stream_attach = acrtc_state->stream;
4123 planes_count++;
4124
0bc9706d 4125 } else if (new_crtc_state->planes_changed) {
e7b07cee
HW
4126 /* Assume even ONE crtc with immediate flip means
4127 * entire can't wait for VBLANK
4128 * TODO Check if it's correct
4129 */
4130 *wait_for_vblank =
0bc9706d 4131 new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
e7b07cee
HW
4132 false : true;
4133
4134 /* TODO: Needs rework for multiplane flip */
4135 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4136 drm_crtc_vblank_get(crtc);
4137
4138 amdgpu_dm_do_flip(
4139 crtc,
4140 fb,
23effc11 4141 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
bc6828e0 4142 dm_state->context);
e7b07cee
HW
4143 }
4144
4145 }
4146
4147 if (planes_count) {
4148 unsigned long flags;
4149
0bc9706d 4150 if (new_pcrtc_state->event) {
e7b07cee
HW
4151
4152 drm_crtc_vblank_get(pcrtc);
4153
4154 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4155 prepare_flip_isr(acrtc_attach);
4156 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4157 }
4158
44d09c6a
HW
4159
4160 if (false == commit_planes_to_stream(dm->dc,
3be5262e
HW
4161 plane_states_constructed,
4162 planes_count,
44d09c6a
HW
4163 acrtc_state,
4164 dm_old_crtc_state,
bc6828e0 4165 dm_state->context))
3be5262e 4166 dm_error("%s: Failed to attach plane!\n", __func__);
e7b07cee
HW
4167 } else {
4168 /*TODO BUG Here should go disable planes on CRTC. */
4169 }
4170}
4171
27b3f4fc
LSL
4172/**
4173 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4174 * @crtc_state: the DRM CRTC state
4175 * @stream_state: the DC stream state.
4176 *
4177 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4178 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4179 */
4180static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4181 struct dc_stream_state *stream_state)
4182{
4183 stream_state->mode_changed = crtc_state->mode_changed;
4184}
e7b07cee 4185
7578ecda
AD
4186static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4187 struct drm_atomic_state *state,
4188 bool nonblock)
e7b07cee
HW
4189{
4190 struct drm_crtc *crtc;
c2cea706 4191 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
4192 struct amdgpu_device *adev = dev->dev_private;
4193 int i;
4194
4195 /*
4196 * We evade vblanks and pflips on crtc that
4197 * should be changed. We do it here to flush & disable
4198 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4199 * it will update crtc->dm_crtc_state->stream pointer which is used in
4200 * the ISRs.
4201 */
c2cea706 4202 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 4203 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee
HW
4204 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4205
54d76575 4206 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
e7b07cee
HW
4207 manage_dm_interrupts(adev, acrtc, false);
4208 }
fc9e9920
S
4209 /* Add check here for SoC's that support hardware cursor plane, to
4210 * unset legacy_cursor_update */
e7b07cee
HW
4211
4212 return drm_atomic_helper_commit(dev, state, nonblock);
4213
4214 /*TODO Handle EINTR, reenable IRQ*/
4215}
4216
7578ecda 4217static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
4218{
4219 struct drm_device *dev = state->dev;
4220 struct amdgpu_device *adev = dev->dev_private;
4221 struct amdgpu_display_manager *dm = &adev->dm;
4222 struct dm_atomic_state *dm_state;
4223 uint32_t i, j;
5cc6dcbd 4224 struct drm_crtc *crtc;
0bc9706d 4225 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
4226 unsigned long flags;
4227 bool wait_for_vblank = true;
4228 struct drm_connector *connector;
c2cea706 4229 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 4230 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
e7b07cee
HW
4231
4232 drm_atomic_helper_update_legacy_modeset_state(dev, state);
4233
4234 dm_state = to_dm_atomic_state(state);
4235
4236 /* update changed items */
0bc9706d 4237 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 4238 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 4239
54d76575
LSL
4240 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4241 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 4242
f1ad2f5e 4243 DRM_DEBUG_DRIVER(
e7b07cee
HW
4244 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4245 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4246 "connectors_changed:%d\n",
4247 acrtc->crtc_id,
0bc9706d
LSL
4248 new_crtc_state->enable,
4249 new_crtc_state->active,
4250 new_crtc_state->planes_changed,
4251 new_crtc_state->mode_changed,
4252 new_crtc_state->active_changed,
4253 new_crtc_state->connectors_changed);
e7b07cee 4254
27b3f4fc
LSL
4255 /* Copy all transient state flags into dc state */
4256 if (dm_new_crtc_state->stream) {
4257 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
4258 dm_new_crtc_state->stream);
4259 }
4260
e7b07cee
HW
4261 /* handles headless hotplug case, updating new_state and
4262 * aconnector as needed
4263 */
4264
54d76575 4265 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 4266
f1ad2f5e 4267 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 4268
54d76575 4269 if (!dm_new_crtc_state->stream) {
e7b07cee 4270 /*
b830ebc9
HW
4271 * this could happen because of issues with
4272 * userspace notifications delivery.
4273 * In this case userspace tries to set mode on
4274 * display which is disconnect in fact.
4275 * dc_sink in NULL in this case on aconnector.
4276 * We expect reset mode will come soon.
4277 *
4278 * This can also happen when unplug is done
4279 * during resume sequence ended
4280 *
4281 * In this case, we want to pretend we still
4282 * have a sink to keep the pipe running so that
4283 * hw state is consistent with the sw state
4284 */
f1ad2f5e 4285 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
4286 __func__, acrtc->base.base.id);
4287 continue;
4288 }
4289
54d76575
LSL
4290 if (dm_old_crtc_state->stream)
4291 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 4292
e7b07cee 4293 acrtc->enabled = true;
0bc9706d
LSL
4294 acrtc->hw_mode = new_crtc_state->mode;
4295 crtc->hwmode = new_crtc_state->mode;
4296 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 4297 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee
HW
4298
4299 /* i.e. reset mode */
54d76575
LSL
4300 if (dm_old_crtc_state->stream)
4301 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee
HW
4302 }
4303 } /* for_each_crtc_in_state() */
4304
4305 /*
4306 * Add streams after required streams from new and replaced streams
4307 * are removed from freesync module
4308 */
4309 if (adev->dm.freesync_module) {
8b8f27f9
LSL
4310 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4311 new_crtc_state, i) {
c84dec2f 4312 struct amdgpu_dm_connector *aconnector = NULL;
f01a2cf0
LSL
4313 struct dm_connector_state *dm_new_con_state = NULL;
4314 struct amdgpu_crtc *acrtc = NULL;
1c77d4ee 4315 bool modeset_needed;
b830ebc9 4316
54d76575 4317 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1c77d4ee
LSL
4318 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4319 modeset_needed = modeset_required(
4320 new_crtc_state,
4321 dm_new_crtc_state->stream,
4322 dm_old_crtc_state->stream);
4323 /* We add stream to freesync if:
4324 * 1. Said stream is not null, and
4325 * 2. A modeset is requested. This means that the
4326 * stream was removed previously, and needs to be
4327 * replaced.
4328 */
4329 if (dm_new_crtc_state->stream == NULL ||
4330 !modeset_needed)
4331 continue;
4332
f01a2cf0 4333 acrtc = to_amdgpu_crtc(crtc);
e7b07cee 4334
f01a2cf0
LSL
4335 aconnector =
4336 amdgpu_dm_find_first_crtc_matching_connector(
4337 state, crtc);
e7b07cee 4338 if (!aconnector) {
8b8f27f9
LSL
4339 DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4340 "find connector for acrtc "
4341 "id:%d skipping freesync "
4342 "init\n",
4343 acrtc->crtc_id);
e7b07cee
HW
4344 continue;
4345 }
4346
4347 mod_freesync_add_stream(adev->dm.freesync_module,
f01a2cf0
LSL
4348 dm_new_crtc_state->stream,
4349 &aconnector->caps);
4350 new_con_state = drm_atomic_get_new_connector_state(
4351 state, &aconnector->base);
4352 dm_new_con_state = to_dm_connector_state(new_con_state);
4353
1c77d4ee
LSL
4354 mod_freesync_set_user_enable(adev->dm.freesync_module,
4355 &dm_new_crtc_state->stream,
4356 1,
4357 &dm_new_con_state->user_enable);
bfe1708c 4358 }
e7b07cee
HW
4359 }
4360
fa2123db
ML
4361 if (dm_state->context) {
4362 dm_enable_per_frame_crtc_master_sync(dm_state->context);
608ac7bb 4363 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
fa2123db 4364 }
e7b07cee 4365
0bc9706d 4366 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 4367 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 4368
54d76575 4369 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 4370
54d76575 4371 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 4372 const struct dc_stream_status *status =
54d76575 4373 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee
HW
4374
4375 if (!status)
54d76575 4376 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
4377 else
4378 acrtc->otg_inst = status->primary_otg_inst;
4379 }
4380 }
4381
ebdd27e1 4382 /* Handle scaling and underscan changes*/
c2cea706 4383 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
4384 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4385 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4386 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
4387 struct dc_stream_status *status = NULL;
4388
44d09c6a 4389 if (acrtc) {
0bc9706d 4390 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
4391 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
4392 }
0bc9706d 4393
e7b07cee 4394 /* Skip any modesets/resets */
0bc9706d 4395 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
4396 continue;
4397
4398 /* Skip any thing not scale or underscan changes */
54d76575 4399 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
4400 continue;
4401
54d76575 4402 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 4403
54d76575
LSL
4404 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4405 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
e7b07cee 4406
70e8ffc5
HW
4407 if (!dm_new_crtc_state->stream)
4408 continue;
4409
54d76575 4410 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 4411 WARN_ON(!status);
3be5262e 4412 WARN_ON(!status->plane_count);
e7b07cee 4413
e7b07cee 4414 /*TODO How it works with MPO ?*/
44d09c6a 4415 if (!commit_planes_to_stream(
e7b07cee 4416 dm->dc,
3be5262e
HW
4417 status->plane_states,
4418 status->plane_count,
44d09c6a
HW
4419 dm_new_crtc_state,
4420 to_dm_crtc_state(old_crtc_state),
bc6828e0 4421 dm_state->context))
e7b07cee
HW
4422 dm_error("%s: Failed to update stream scaling!\n", __func__);
4423 }
4424
e1fc2dca
LSL
4425 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4426 new_crtc_state, i) {
e7b07cee
HW
4427 /*
4428 * loop to enable interrupts on newly arrived crtc
4429 */
e1fc2dca
LSL
4430 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4431 bool modeset_needed;
b830ebc9 4432
54d76575 4433 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca
LSL
4434 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4435 modeset_needed = modeset_required(
4436 new_crtc_state,
4437 dm_new_crtc_state->stream,
4438 dm_old_crtc_state->stream);
4439
4440 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4441 continue;
e7b07cee
HW
4442
4443 if (adev->dm.freesync_module)
4444 mod_freesync_notify_mode_change(
e1fc2dca
LSL
4445 adev->dm.freesync_module,
4446 &dm_new_crtc_state->stream, 1);
e7b07cee
HW
4447
4448 manage_dm_interrupts(adev, acrtc, true);
4449 }
4450
4451 /* update planes when needed per crtc*/
5cc6dcbd 4452 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 4453 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 4454
54d76575 4455 if (dm_new_crtc_state->stream)
5cc6dcbd 4456 amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
e7b07cee
HW
4457 }
4458
4459
4460 /*
4461 * send vblank event on all events not handled in flip and
4462 * mark consumed event for drm_atomic_helper_commit_hw_done
4463 */
4464 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 4465 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 4466
0bc9706d
LSL
4467 if (new_crtc_state->event)
4468 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 4469
0bc9706d 4470 new_crtc_state->event = NULL;
e7b07cee
HW
4471 }
4472 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4473
4474 /* Signal HW programming completion */
4475 drm_atomic_helper_commit_hw_done(state);
4476
4477 if (wait_for_vblank)
320a1274 4478 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
4479
4480 drm_atomic_helper_cleanup_planes(dev, state);
4481}
4482
4483
4484static int dm_force_atomic_commit(struct drm_connector *connector)
4485{
4486 int ret = 0;
4487 struct drm_device *ddev = connector->dev;
4488 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4489 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4490 struct drm_plane *plane = disconnected_acrtc->base.primary;
4491 struct drm_connector_state *conn_state;
4492 struct drm_crtc_state *crtc_state;
4493 struct drm_plane_state *plane_state;
4494
4495 if (!state)
4496 return -ENOMEM;
4497
4498 state->acquire_ctx = ddev->mode_config.acquire_ctx;
4499
4500 /* Construct an atomic state to restore previous display setting */
4501
4502 /*
4503 * Attach connectors to drm_atomic_state
4504 */
4505 conn_state = drm_atomic_get_connector_state(state, connector);
4506
4507 ret = PTR_ERR_OR_ZERO(conn_state);
4508 if (ret)
4509 goto err;
4510
4511 /* Attach crtc to drm_atomic_state*/
4512 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4513
4514 ret = PTR_ERR_OR_ZERO(crtc_state);
4515 if (ret)
4516 goto err;
4517
4518 /* force a restore */
4519 crtc_state->mode_changed = true;
4520
4521 /* Attach plane to drm_atomic_state */
4522 plane_state = drm_atomic_get_plane_state(state, plane);
4523
4524 ret = PTR_ERR_OR_ZERO(plane_state);
4525 if (ret)
4526 goto err;
4527
4528
4529 /* Call commit internally with the state we just constructed */
4530 ret = drm_atomic_commit(state);
4531 if (!ret)
4532 return 0;
4533
4534err:
4535 DRM_ERROR("Restoring old state failed with %i\n", ret);
4536 drm_atomic_state_put(state);
4537
4538 return ret;
4539}
4540
4541/*
4542 * This functions handle all cases when set mode does not come upon hotplug.
4543 * This include when the same display is unplugged then plugged back into the
4544 * same port and when we are running without usermode desktop manager supprot
4545 */
3ee6b26b
AD
4546void dm_restore_drm_connector_state(struct drm_device *dev,
4547 struct drm_connector *connector)
e7b07cee 4548{
c84dec2f 4549 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4550 struct amdgpu_crtc *disconnected_acrtc;
4551 struct dm_crtc_state *acrtc_state;
4552
4553 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4554 return;
4555
4556 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
4557 if (!disconnected_acrtc)
4558 return;
e7b07cee 4559
70e8ffc5
HW
4560 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4561 if (!acrtc_state->stream)
e7b07cee
HW
4562 return;
4563
4564 /*
4565 * If the previous sink is not released and different from the current,
4566 * we deduce we are in a state where we can not rely on usermode call
4567 * to turn on the display, so we do it here
4568 */
4569 if (acrtc_state->stream->sink != aconnector->dc_sink)
4570 dm_force_atomic_commit(&aconnector->base);
4571}
4572
e7b07cee
HW
4573/*`
4574 * Grabs all modesetting locks to serialize against any blocking commits,
4575 * Waits for completion of all non blocking commits.
4576 */
3ee6b26b
AD
4577static int do_aquire_global_lock(struct drm_device *dev,
4578 struct drm_atomic_state *state)
e7b07cee
HW
4579{
4580 struct drm_crtc *crtc;
4581 struct drm_crtc_commit *commit;
4582 long ret;
4583
4584 /* Adding all modeset locks to aquire_ctx will
4585 * ensure that when the framework release it the
4586 * extra locks we are locking here will get released to
4587 */
4588 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4589 if (ret)
4590 return ret;
4591
4592 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4593 spin_lock(&crtc->commit_lock);
4594 commit = list_first_entry_or_null(&crtc->commit_list,
4595 struct drm_crtc_commit, commit_entry);
4596 if (commit)
4597 drm_crtc_commit_get(commit);
4598 spin_unlock(&crtc->commit_lock);
4599
4600 if (!commit)
4601 continue;
4602
4603 /* Make sure all pending HW programming completed and
4604 * page flips done
4605 */
4606 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4607
4608 if (ret > 0)
4609 ret = wait_for_completion_interruptible_timeout(
4610 &commit->flip_done, 10*HZ);
4611
4612 if (ret == 0)
4613 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 4614 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
4615
4616 drm_crtc_commit_put(commit);
4617 }
4618
4619 return ret < 0 ? ret : 0;
4620}
4621
3ee6b26b
AD
4622static int dm_update_crtcs_state(struct dc *dc,
4623 struct drm_atomic_state *state,
4624 bool enable,
4625 bool *lock_and_validation_needed)
e7b07cee 4626{
e7b07cee 4627 struct drm_crtc *crtc;
c2cea706 4628 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
62f55537 4629 int i;
54d76575 4630 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
1dc90497 4631 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
9635b754 4632 struct dc_stream_state *new_stream;
62f55537 4633 int ret = 0;
d4d4a645 4634
62f55537
AG
4635 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4636 /* update changed items */
c2cea706 4637 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
62f55537 4638 struct amdgpu_crtc *acrtc = NULL;
c84dec2f 4639 struct amdgpu_dm_connector *aconnector = NULL;
c2cea706 4640 struct drm_connector_state *new_con_state = NULL;
62f55537 4641 struct dm_connector_state *dm_conn_state = NULL;
e7b07cee 4642
9635b754
DS
4643 new_stream = NULL;
4644
54d76575
LSL
4645 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4646 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
62f55537 4647 acrtc = to_amdgpu_crtc(crtc);
e7b07cee 4648
1daf8c63 4649 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 4650
62f55537 4651 /* TODO This hack should go away */
f4ac176e
JZ
4652 if (aconnector && enable) {
4653 // Make sure fake sink is created in plug-in scenario
c2cea706
LSL
4654 new_con_state = drm_atomic_get_connector_state(state,
4655 &aconnector->base);
19f89e23 4656
c2cea706
LSL
4657 if (IS_ERR(new_con_state)) {
4658 ret = PTR_ERR_OR_ZERO(new_con_state);
62f55537
AG
4659 break;
4660 }
19f89e23 4661
c2cea706 4662 dm_conn_state = to_dm_connector_state(new_con_state);
19f89e23 4663
62f55537 4664 new_stream = create_stream_for_sink(aconnector,
c2cea706 4665 &new_crtc_state->mode,
62f55537 4666 dm_conn_state);
19f89e23 4667
62f55537
AG
4668 /*
4669 * we can have no stream on ACTION_SET if a display
4670 * was disconnected during S3, in this case it not and
4671 * error, the OS will be updated after detection, and
4672 * do the right thing on next atomic commit
4673 */
19f89e23 4674
62f55537 4675 if (!new_stream) {
f1ad2f5e 4676 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
62f55537
AG
4677 __func__, acrtc->base.base.id);
4678 break;
19f89e23 4679 }
e7b07cee 4680
a97599a4
LSL
4681 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4682 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4683 new_crtc_state->mode_changed = false;
4684 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4685 new_crtc_state->mode_changed);
4686 }
62f55537 4687 }
b830ebc9 4688
c2cea706 4689 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9635b754 4690 goto next_crtc;
e7b07cee 4691
f1ad2f5e 4692 DRM_DEBUG_DRIVER(
e7b07cee
HW
4693 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4694 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4695 "connectors_changed:%d\n",
4696 acrtc->crtc_id,
c2cea706
LSL
4697 new_crtc_state->enable,
4698 new_crtc_state->active,
4699 new_crtc_state->planes_changed,
4700 new_crtc_state->mode_changed,
4701 new_crtc_state->active_changed,
4702 new_crtc_state->connectors_changed);
e7b07cee 4703
62f55537
AG
4704 /* Remove stream for any changed/disabled CRTC */
4705 if (!enable) {
4706
54d76575 4707 if (!dm_old_crtc_state->stream)
9635b754 4708 goto next_crtc;
62f55537 4709
f1ad2f5e 4710 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
62f55537 4711 crtc->base.id);
e7b07cee 4712
1dc90497 4713 /* i.e. reset mode */
62c933f9 4714 if (dc_remove_stream_from_ctx(
62f55537
AG
4715 dc,
4716 dm_state->context,
62c933f9 4717 dm_old_crtc_state->stream) != DC_OK) {
62f55537 4718 ret = -EINVAL;
9635b754 4719 goto fail;
62f55537
AG
4720 }
4721
54d76575
LSL
4722 dc_stream_release(dm_old_crtc_state->stream);
4723 dm_new_crtc_state->stream = NULL;
62f55537
AG
4724
4725 *lock_and_validation_needed = true;
4726
4727 } else {/* Add stream for any updated/enabled CRTC */
fc17235f
JZ
4728 /*
4729 * Quick fix to prevent NULL pointer on new_stream when
4730 * added MST connectors not found in existing crtc_state in the chained mode
4731 * TODO: need to dig out the root cause of that
4732 */
4733 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
1bed4d09 4734 goto next_crtc;
62f55537 4735
c2cea706 4736 if (modereset_required(new_crtc_state))
9635b754 4737 goto next_crtc;
62f55537 4738
c2cea706 4739 if (modeset_required(new_crtc_state, new_stream,
54d76575 4740 dm_old_crtc_state->stream)) {
62f55537 4741
54d76575 4742 WARN_ON(dm_new_crtc_state->stream);
62f55537 4743
54d76575 4744 dm_new_crtc_state->stream = new_stream;
27b3f4fc 4745
62f55537
AG
4746 dc_stream_retain(new_stream);
4747
f1ad2f5e 4748 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
62f55537 4749 crtc->base.id);
1dc90497 4750
13ab1b44 4751 if (dc_add_stream_to_ctx(
1dc90497
AG
4752 dc,
4753 dm_state->context,
13ab1b44 4754 dm_new_crtc_state->stream) != DC_OK) {
1dc90497 4755 ret = -EINVAL;
9635b754 4756 goto fail;
1dc90497
AG
4757 }
4758
62f55537 4759 *lock_and_validation_needed = true;
9b690ef3 4760 }
62f55537 4761 }
9b690ef3 4762
9635b754 4763next_crtc:
62f55537
AG
4764 /* Release extra reference */
4765 if (new_stream)
4766 dc_stream_release(new_stream);
e277adc5
LSL
4767
4768 /*
4769 * We want to do dc stream updates that do not require a
4770 * full modeset below.
4771 */
4772 if (!enable || !aconnector || modereset_required(new_crtc_state))
4773 continue;
4774 /*
4775 * Given above conditions, the dc state cannot be NULL because:
4776 * 1. We're attempting to enable a CRTC. Which has a...
4777 * 2. Valid connector attached, and
4778 * 3. User does not want to reset it (disable or mark inactive,
4779 * which can happen on a CRTC that's already disabled).
4780 * => It currently exists.
4781 */
4782 BUG_ON(dm_new_crtc_state->stream == NULL);
4783
4784 /* Color managment settings */
4785 if (dm_new_crtc_state->base.color_mgmt_changed) {
4786 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
4787 if (ret)
4788 goto fail;
4789 amdgpu_dm_set_ctm(dm_new_crtc_state);
4790 }
62f55537 4791 }
e7b07cee 4792
62f55537 4793 return ret;
9635b754
DS
4794
4795fail:
4796 if (new_stream)
4797 dc_stream_release(new_stream);
4798 return ret;
62f55537 4799}
9b690ef3 4800
3ee6b26b
AD
4801static int dm_update_planes_state(struct dc *dc,
4802 struct drm_atomic_state *state,
4803 bool enable,
4804 bool *lock_and_validation_needed)
62f55537
AG
4805{
4806 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 4807 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
62f55537
AG
4808 struct drm_plane *plane;
4809 struct drm_plane_state *old_plane_state, *new_plane_state;
54d76575 4810 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
62f55537 4811 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
54d76575 4812 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
62f55537
AG
4813 int i ;
4814 /* TODO return page_flip_needed() function */
4815 bool pflip_needed = !state->allow_modeset;
4816 int ret = 0;
e7b07cee 4817
9b690ef3 4818
df534fff
S
4819 /* Add new planes, in reverse order as DC expectation */
4820 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
62f55537
AG
4821 new_plane_crtc = new_plane_state->crtc;
4822 old_plane_crtc = old_plane_state->crtc;
54d76575
LSL
4823 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4824 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537
AG
4825
4826 /*TODO Implement atomic check for cursor plane */
4827 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4828 continue;
9b690ef3 4829
62f55537
AG
4830 /* Remove any changed/removed planes */
4831 if (!enable) {
c21b68c5
S
4832 if (pflip_needed)
4833 continue;
a7b06724 4834
62f55537
AG
4835 if (!old_plane_crtc)
4836 continue;
4837
0bc9706d
LSL
4838 old_crtc_state = drm_atomic_get_old_crtc_state(
4839 state, old_plane_crtc);
54d76575 4840 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 4841
54d76575 4842 if (!dm_old_crtc_state->stream)
62f55537
AG
4843 continue;
4844
f1ad2f5e 4845 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
62f55537 4846 plane->base.id, old_plane_crtc->base.id);
9b690ef3 4847
62f55537
AG
4848 if (!dc_remove_plane_from_context(
4849 dc,
54d76575
LSL
4850 dm_old_crtc_state->stream,
4851 dm_old_plane_state->dc_state,
62f55537
AG
4852 dm_state->context)) {
4853
4854 ret = EINVAL;
4855 return ret;
e7b07cee
HW
4856 }
4857
9b690ef3 4858
54d76575
LSL
4859 dc_plane_state_release(dm_old_plane_state->dc_state);
4860 dm_new_plane_state->dc_state = NULL;
1dc90497 4861
62f55537 4862 *lock_and_validation_needed = true;
1dc90497 4863
62f55537 4864 } else { /* Add new planes */
8c45c5db 4865 struct dc_plane_state *dc_new_plane_state;
1dc90497 4866
62f55537
AG
4867 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4868 continue;
e7b07cee 4869
62f55537
AG
4870 if (!new_plane_crtc)
4871 continue;
e7b07cee 4872
62f55537 4873 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
54d76575 4874 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 4875
54d76575 4876 if (!dm_new_crtc_state->stream)
62f55537
AG
4877 continue;
4878
c21b68c5
S
4879 if (pflip_needed)
4880 continue;
62f55537 4881
54d76575 4882 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 4883
8c45c5db 4884 dc_new_plane_state = dc_create_plane_state(dc);
d5400aab
LSL
4885 if (!dc_new_plane_state)
4886 return -ENOMEM;
62f55537 4887
8c45c5db
LSL
4888 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4889 plane->base.id, new_plane_crtc->base.id);
4890
62f55537
AG
4891 ret = fill_plane_attributes(
4892 new_plane_crtc->dev->dev_private,
8c45c5db 4893 dc_new_plane_state,
62f55537 4894 new_plane_state,
9817d5f5 4895 new_crtc_state);
8c45c5db
LSL
4896 if (ret) {
4897 dc_plane_state_release(dc_new_plane_state);
62f55537 4898 return ret;
8c45c5db 4899 }
62f55537 4900
8c45c5db
LSL
4901 /*
4902 * Any atomic check errors that occur after this will
4903 * not need a release. The plane state will be attached
4904 * to the stream, and therefore part of the atomic
4905 * state. It'll be released when the atomic state is
4906 * cleaned.
4907 */
62f55537
AG
4908 if (!dc_add_plane_to_context(
4909 dc,
54d76575 4910 dm_new_crtc_state->stream,
8c45c5db 4911 dc_new_plane_state,
62f55537
AG
4912 dm_state->context)) {
4913
8c45c5db 4914 dc_plane_state_release(dc_new_plane_state);
d5400aab 4915 return -EINVAL;
e7b07cee 4916 }
62f55537 4917
8c45c5db
LSL
4918 dm_new_plane_state->dc_state = dc_new_plane_state;
4919
000b59ea
LSL
4920 /* Tell DC to do a full surface update every time there
4921 * is a plane change. Inefficient, but works for now.
4922 */
4923 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
4924
62f55537 4925 *lock_and_validation_needed = true;
e7b07cee 4926 }
62f55537 4927 }
e7b07cee
HW
4928
4929
62f55537
AG
4930 return ret;
4931}
4932
7578ecda
AD
4933static int amdgpu_dm_atomic_check(struct drm_device *dev,
4934 struct drm_atomic_state *state)
62f55537 4935{
62f55537
AG
4936 struct amdgpu_device *adev = dev->dev_private;
4937 struct dc *dc = adev->dm.dc;
4938 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4939 struct drm_connector *connector;
c2cea706 4940 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 4941 struct drm_crtc *crtc;
fc9e9920 4942 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1e88ad0a 4943 int ret, i;
e7b07cee 4944
62f55537
AG
4945 /*
4946 * This bool will be set for true for any modeset/reset
4947 * or plane update which implies non fast surface update.
4948 */
4949 bool lock_and_validation_needed = false;
4950
4951 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
4952 if (ret)
4953 goto fail;
62f55537 4954
1e88ad0a
S
4955 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4956 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4957 !new_crtc_state->color_mgmt_changed)
4958 continue;
7bef1af3 4959
1e88ad0a
S
4960 if (!new_crtc_state->enable)
4961 continue;
fc9e9920 4962
1e88ad0a
S
4963 ret = drm_atomic_add_affected_connectors(state, crtc);
4964 if (ret)
4965 return ret;
fc9e9920 4966
1e88ad0a
S
4967 ret = drm_atomic_add_affected_planes(state, crtc);
4968 if (ret)
4969 goto fail;
e7b07cee
HW
4970 }
4971
62f55537
AG
4972 dm_state->context = dc_create_state();
4973 ASSERT(dm_state->context);
f36cc577 4974 dc_resource_state_copy_construct_current(dc, dm_state->context);
62f55537
AG
4975
4976 /* Remove exiting planes if they are modified */
4977 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4978 if (ret) {
4979 goto fail;
4980 }
4981
4982 /* Disable all crtcs which require disable */
4983 ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4984 if (ret) {
4985 goto fail;
4986 }
4987
4988 /* Enable all crtcs which require enable */
4989 ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4990 if (ret) {
4991 goto fail;
4992 }
4993
4994 /* Add new/modified planes */
4995 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4996 if (ret) {
4997 goto fail;
4998 }
4999
b349f76e
ES
5000 /* Run this here since we want to validate the streams we created */
5001 ret = drm_atomic_helper_check_planes(dev, state);
5002 if (ret)
5003 goto fail;
62f55537 5004
ebdd27e1 5005 /* Check scaling and underscan changes*/
e7b07cee
HW
5006 /*TODO Removed scaling changes validation due to inability to commit
5007 * new stream into context w\o causing full reset. Need to
5008 * decide how to handle.
5009 */
c2cea706 5010 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
5011 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5012 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5013 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
5014
5015 /* Skip any modesets/resets */
0bc9706d
LSL
5016 if (!acrtc || drm_atomic_crtc_needs_modeset(
5017 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
5018 continue;
5019
b830ebc9 5020 /* Skip any thing not scale or underscan changes */
54d76575 5021 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
5022 continue;
5023
5024 lock_and_validation_needed = true;
5025 }
5026
e7b07cee
HW
5027 /*
5028 * For full updates case when
5029 * removing/adding/updating streams on once CRTC while flipping
5030 * on another CRTC,
5031 * acquiring global lock will guarantee that any such full
5032 * update commit
5033 * will wait for completion of any outstanding flip using DRMs
5034 * synchronization events.
5035 */
5036
5037 if (lock_and_validation_needed) {
5038
5039 ret = do_aquire_global_lock(dev, state);
5040 if (ret)
5041 goto fail;
1dc90497 5042
e750d56d 5043 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
e7b07cee
HW
5044 ret = -EINVAL;
5045 goto fail;
5046 }
5047 }
5048
5049 /* Must be success */
5050 WARN_ON(ret);
5051 return ret;
5052
5053fail:
5054 if (ret == -EDEADLK)
01e28f9c 5055 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 5056 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 5057 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 5058 else
01e28f9c 5059 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
5060
5061 return ret;
5062}
5063
3ee6b26b
AD
5064static bool is_dp_capable_without_timing_msa(struct dc *dc,
5065 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
5066{
5067 uint8_t dpcd_data;
5068 bool capable = false;
5069
c84dec2f 5070 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
5071 dm_helpers_dp_read_dpcd(
5072 NULL,
c84dec2f 5073 amdgpu_dm_connector->dc_link,
e7b07cee
HW
5074 DP_DOWN_STREAM_PORT_COUNT,
5075 &dpcd_data,
5076 sizeof(dpcd_data))) {
5077 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
5078 }
5079
5080 return capable;
5081}
3ee6b26b
AD
5082void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5083 struct edid *edid)
e7b07cee
HW
5084{
5085 int i;
e7b07cee
HW
5086 bool edid_check_required;
5087 struct detailed_timing *timing;
5088 struct detailed_non_pixel *data;
5089 struct detailed_data_monitor_range *range;
c84dec2f
HW
5090 struct amdgpu_dm_connector *amdgpu_dm_connector =
5091 to_amdgpu_dm_connector(connector);
8218d7f1 5092 struct dm_connector_state *dm_con_state;
e7b07cee
HW
5093
5094 struct drm_device *dev = connector->dev;
5095 struct amdgpu_device *adev = dev->dev_private;
b830ebc9 5096
8218d7f1
HW
5097 if (!connector->state) {
5098 DRM_ERROR("%s - Connector has no state", __func__);
5099 return;
5100 }
5101
5102 dm_con_state = to_dm_connector_state(connector->state);
5103
e7b07cee 5104 edid_check_required = false;
c84dec2f 5105 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee
HW
5106 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5107 return;
5108 }
5109 if (!adev->dm.freesync_module)
5110 return;
5111 /*
5112 * if edid non zero restrict freesync only for dp and edp
5113 */
5114 if (edid) {
c84dec2f
HW
5115 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
5116 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
5117 edid_check_required = is_dp_capable_without_timing_msa(
5118 adev->dm.dc,
c84dec2f 5119 amdgpu_dm_connector);
e7b07cee
HW
5120 }
5121 }
8218d7f1 5122 dm_con_state->freesync_capable = false;
e7b07cee
HW
5123 if (edid_check_required == true && (edid->version > 1 ||
5124 (edid->version == 1 && edid->revision > 1))) {
5125 for (i = 0; i < 4; i++) {
5126
5127 timing = &edid->detailed_timings[i];
5128 data = &timing->data.other_data;
5129 range = &data->data.range;
5130 /*
5131 * Check if monitor has continuous frequency mode
5132 */
5133 if (data->type != EDID_DETAIL_MONITOR_RANGE)
5134 continue;
5135 /*
5136 * Check for flag range limits only. If flag == 1 then
5137 * no additional timing information provided.
5138 * Default GTF, GTF Secondary curve and CVT are not
5139 * supported
5140 */
5141 if (range->flags != 1)
5142 continue;
5143
c84dec2f
HW
5144 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
5145 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
5146 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
5147 range->pixel_clock_mhz * 10;
5148 break;
5149 }
5150
c84dec2f
HW
5151 if (amdgpu_dm_connector->max_vfreq -
5152 amdgpu_dm_connector->min_vfreq > 10) {
5153 amdgpu_dm_connector->caps.supported = true;
5154 amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
5155 amdgpu_dm_connector->min_vfreq * 1000000;
5156 amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
5157 amdgpu_dm_connector->max_vfreq * 1000000;
8218d7f1 5158 dm_con_state->freesync_capable = true;
e7b07cee
HW
5159 }
5160 }
5161
5162 /*
5163 * TODO figure out how to notify user-mode or DRM of freesync caps
5164 * once we figure out how to deal with freesync in an upstreamable
5165 * fashion
5166 */
5167
5168}
5169
3ee6b26b 5170void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
e7b07cee
HW
5171{
5172 /*
5173 * TODO fill in once we figure out how to deal with freesync in
5174 * an upstreamable fashion
5175 */
5176}