drm/amd/display: Pass log_mask from DM
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
1dc90497 28#include "dc/inc/core_types.h"
4562236b
HW
29
30#include "vid.h"
31#include "amdgpu.h"
a49dcb88 32#include "amdgpu_display.h"
4562236b
HW
33#include "atom.h"
34#include "amdgpu_dm.h"
e7b07cee 35#include "amdgpu_pm.h"
4562236b
HW
36
37#include "amd_shared.h"
38#include "amdgpu_dm_irq.h"
39#include "dm_helpers.h"
e7b07cee
HW
40#include "dm_services_types.h"
41#include "amdgpu_dm_mst_types.h"
4562236b
HW
42
43#include "ivsrcid/ivsrcid_vislands30.h"
44
45#include <linux/module.h>
46#include <linux/moduleparam.h>
47#include <linux/version.h>
e7b07cee 48#include <linux/types.h>
4562236b 49
e7b07cee 50#include <drm/drmP.h>
4562236b
HW
51#include <drm/drm_atomic.h>
52#include <drm/drm_atomic_helper.h>
53#include <drm/drm_dp_mst_helper.h>
e7b07cee
HW
54#include <drm/drm_fb_helper.h>
55#include <drm/drm_edid.h>
4562236b
HW
56
57#include "modules/inc/mod_freesync.h"
58
ff5ef992
AD
59#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60#include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62#include "raven1/DCN/dcn_1_0_offset.h"
63#include "raven1/DCN/dcn_1_0_sh_mask.h"
64#include "vega10/soc15ip.h"
65
66#include "soc15_common.h"
67#endif
68
e7b07cee
HW
69#include "modules/inc/mod_freesync.h"
70
71#include "i2caux_interface.h"
72
73
3be5262e 74static enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
d4e13b0d
AD
75 DRM_PLANE_TYPE_PRIMARY,
76 DRM_PLANE_TYPE_PRIMARY,
77 DRM_PLANE_TYPE_PRIMARY,
78 DRM_PLANE_TYPE_PRIMARY,
79 DRM_PLANE_TYPE_PRIMARY,
80 DRM_PLANE_TYPE_PRIMARY,
81};
82
3be5262e 83static enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
d4e13b0d
AD
84 DRM_PLANE_TYPE_PRIMARY,
85 DRM_PLANE_TYPE_PRIMARY,
86 DRM_PLANE_TYPE_PRIMARY,
87 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
88};
89
3be5262e 90static enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
d4e13b0d
AD
91 DRM_PLANE_TYPE_PRIMARY,
92 DRM_PLANE_TYPE_PRIMARY,
93 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
94};
95
4562236b
HW
96/*
97 * dm_vblank_get_counter
98 *
99 * @brief
100 * Get counter for number of vertical blanks
101 *
102 * @param
103 * struct amdgpu_device *adev - [in] desired amdgpu device
104 * int disp_idx - [in] which CRTC to get the counter from
105 *
106 * @return
107 * Counter for vertical blanks
108 */
109static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
110{
111 if (crtc >= adev->mode_info.num_crtc)
112 return 0;
113 else {
114 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
115 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
116 acrtc->base.state);
4562236b 117
da5c47f6
AG
118
119 if (acrtc_state->stream == NULL) {
0971c40e
HW
120 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
121 crtc);
4562236b
HW
122 return 0;
123 }
124
da5c47f6 125 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
126 }
127}
128
129static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
130 u32 *vbl, u32 *position)
131{
81c50963
ST
132 uint32_t v_blank_start, v_blank_end, h_position, v_position;
133
4562236b
HW
134 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
135 return -EINVAL;
136 else {
137 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
138 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
139 acrtc->base.state);
4562236b 140
da5c47f6 141 if (acrtc_state->stream == NULL) {
0971c40e
HW
142 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
143 crtc);
4562236b
HW
144 return 0;
145 }
146
81c50963
ST
147 /*
148 * TODO rework base driver to use values directly.
149 * for now parse it back into reg-format
150 */
da5c47f6 151 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
152 &v_blank_start,
153 &v_blank_end,
154 &h_position,
155 &v_position);
156
e806208d
AG
157 *position = v_position | (h_position << 16);
158 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
159 }
160
161 return 0;
162}
163
164static bool dm_is_idle(void *handle)
165{
166 /* XXX todo */
167 return true;
168}
169
170static int dm_wait_for_idle(void *handle)
171{
172 /* XXX todo */
173 return 0;
174}
175
176static bool dm_check_soft_reset(void *handle)
177{
178 return false;
179}
180
181static int dm_soft_reset(void *handle)
182{
183 /* XXX todo */
184 return 0;
185}
186
187static struct amdgpu_crtc *get_crtc_by_otg_inst(
188 struct amdgpu_device *adev,
189 int otg_inst)
190{
191 struct drm_device *dev = adev->ddev;
192 struct drm_crtc *crtc;
193 struct amdgpu_crtc *amdgpu_crtc;
194
195 /*
196 * following if is check inherited from both functions where this one is
197 * used now. Need to be checked why it could happen.
198 */
199 if (otg_inst == -1) {
200 WARN_ON(1);
201 return adev->mode_info.crtcs[0];
202 }
203
204 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
205 amdgpu_crtc = to_amdgpu_crtc(crtc);
206
207 if (amdgpu_crtc->otg_inst == otg_inst)
208 return amdgpu_crtc;
209 }
210
211 return NULL;
212}
213
214static void dm_pflip_high_irq(void *interrupt_params)
215{
4562236b
HW
216 struct amdgpu_crtc *amdgpu_crtc;
217 struct common_irq_params *irq_params = interrupt_params;
218 struct amdgpu_device *adev = irq_params->adev;
219 unsigned long flags;
220
221 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
222
223 /* IRQ could occur when in initial stage */
224 /*TODO work and BO cleanup */
225 if (amdgpu_crtc == NULL) {
226 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
227 return;
228 }
229
230 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
231
232 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
233 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
234 amdgpu_crtc->pflip_status,
235 AMDGPU_FLIP_SUBMITTED,
236 amdgpu_crtc->crtc_id,
237 amdgpu_crtc);
238 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
239 return;
240 }
241
4562236b
HW
242
243 /* wakeup usersapce */
1159898a 244 if (amdgpu_crtc->event) {
753c66c9
MK
245 /* Update to correct count/ts if racing with vblank irq */
246 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
247
54f5499a 248 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
1159898a 249
54f5499a
AG
250 /* page flip completed. clean up */
251 amdgpu_crtc->event = NULL;
1159898a 252
54f5499a
AG
253 } else
254 WARN_ON(1);
4562236b 255
54f5499a 256 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
257 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
258
54f5499a
AG
259 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
260 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
4562236b
HW
261
262 drm_crtc_vblank_put(&amdgpu_crtc->base);
4562236b
HW
263}
264
265static void dm_crtc_high_irq(void *interrupt_params)
266{
267 struct common_irq_params *irq_params = interrupt_params;
268 struct amdgpu_device *adev = irq_params->adev;
269 uint8_t crtc_index = 0;
270 struct amdgpu_crtc *acrtc;
271
b57de80a 272 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b
HW
273
274 if (acrtc)
275 crtc_index = acrtc->crtc_id;
276
277 drm_handle_vblank(adev->ddev, crtc_index);
278}
279
280static int dm_set_clockgating_state(void *handle,
281 enum amd_clockgating_state state)
282{
283 return 0;
284}
285
286static int dm_set_powergating_state(void *handle,
287 enum amd_powergating_state state)
288{
289 return 0;
290}
291
292/* Prototypes of private functions */
293static int dm_early_init(void* handle);
294
295static void hotplug_notify_work_func(struct work_struct *work)
296{
297 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
298 struct drm_device *dev = dm->ddev;
299
300 drm_kms_helper_hotplug_event(dev);
301}
302
a32e24b4
RL
303#ifdef ENABLE_FBC
304#include "dal_asic_id.h"
305/* Allocate memory for FBC compressed data */
306/* TODO: Dynamic allocation */
307#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
308
309void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
310{
311 int r;
312 struct dm_comressor_info *compressor = &adev->dm.compressor;
313
314 if (!compressor->bo_ptr) {
315 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
316 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
317 &compressor->gpu_addr, &compressor->cpu_addr);
318
319 if (r)
320 DRM_ERROR("DM: Failed to initialize fbc\n");
321 }
322
323}
324#endif
325
326
4562236b
HW
327/* Init display KMS
328 *
329 * Returns 0 on success
330 */
331int amdgpu_dm_init(struct amdgpu_device *adev)
332{
333 struct dc_init_data init_data;
334 adev->dm.ddev = adev->ddev;
335 adev->dm.adev = adev;
336
337 DRM_INFO("DAL is enabled\n");
338 /* Zero all the fields */
339 memset(&init_data, 0, sizeof(init_data));
340
341 /* initialize DAL's lock (for SYNC context use) */
342 spin_lock_init(&adev->dm.dal_lock);
343
344 /* initialize DAL's mutex */
345 mutex_init(&adev->dm.dal_mutex);
346
347 if(amdgpu_dm_irq_init(adev)) {
348 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
349 goto error;
350 }
351
352 init_data.asic_id.chip_family = adev->family;
353
354 init_data.asic_id.pci_revision_id = adev->rev_id;
355 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
356
357 init_data.asic_id.vram_width = adev->mc.vram_width;
358 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
359 init_data.asic_id.atombios_base_address =
360 adev->mode_info.atom_context->bios;
361
362 init_data.driver = adev;
363
364 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
365
366 if (!adev->dm.cgs_device) {
367 DRM_ERROR("amdgpu: failed to create cgs device.\n");
368 goto error;
369 }
370
371 init_data.cgs_device = adev->dm.cgs_device;
372
373 adev->dm.dal = NULL;
374
375 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
376
01a526f3
HW
377 init_data.log_mask = DC_DEFAULT_LOG_MASK;
378
a32e24b4
RL
379#ifdef ENABLE_FBC
380 if (adev->family == FAMILY_CZ)
381 amdgpu_dm_initialize_fbc(adev);
382 init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
383#endif
4562236b
HW
384 /* Display Core create. */
385 adev->dm.dc = dc_create(&init_data);
386
387 if (!adev->dm.dc)
388 DRM_INFO("Display Core failed to initialize!\n");
389
390 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
391
392 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
393 if (!adev->dm.freesync_module) {
394 DRM_ERROR(
395 "amdgpu: failed to initialize freesync_module.\n");
396 } else
397 DRM_INFO("amdgpu: freesync_module init done %p.\n",
398 adev->dm.freesync_module);
399
400 if (amdgpu_dm_initialize_drm_device(adev)) {
401 DRM_ERROR(
402 "amdgpu: failed to initialize sw for display support.\n");
403 goto error;
404 }
405
406 /* Update the actual used number of crtc */
407 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
408
409 /* TODO: Add_display_info? */
410
411 /* TODO use dynamic cursor width */
ce75805e
AG
412 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
413 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
414
415 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
416 DRM_ERROR(
417 "amdgpu: failed to initialize sw for display support.\n");
418 goto error;
419 }
420
421 DRM_INFO("KMS initialized.\n");
422
423 return 0;
424error:
425 amdgpu_dm_fini(adev);
426
427 return -1;
428}
429
430void amdgpu_dm_fini(struct amdgpu_device *adev)
431{
432 amdgpu_dm_destroy_drm_device(&adev->dm);
433 /*
434 * TODO: pageflip, vlank interrupt
435 *
436 * amdgpu_dm_irq_fini(adev);
437 */
438
439 if (adev->dm.cgs_device) {
440 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
441 adev->dm.cgs_device = NULL;
442 }
443 if (adev->dm.freesync_module) {
444 mod_freesync_destroy(adev->dm.freesync_module);
445 adev->dm.freesync_module = NULL;
446 }
447 /* DC Destroy TODO: Replace destroy DAL */
21de3396 448 if (adev->dm.dc)
4562236b 449 dc_destroy(&adev->dm.dc);
4562236b
HW
450 return;
451}
452
453/* moved from amdgpu_dm_kms.c */
454void amdgpu_dm_destroy()
455{
456}
457
458static int dm_sw_init(void *handle)
459{
460 return 0;
461}
462
463static int dm_sw_fini(void *handle)
464{
465 return 0;
466}
467
7abcf6b5 468static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 469{
c84dec2f 470 struct amdgpu_dm_connector *aconnector;
4562236b 471 struct drm_connector *connector;
7abcf6b5 472 int ret = 0;
4562236b
HW
473
474 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
475
476 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
c84dec2f 477 aconnector = to_amdgpu_dm_connector(connector);
7abcf6b5
AG
478 if (aconnector->dc_link->type == dc_connection_mst_branch) {
479 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
480 aconnector, aconnector->base.base.id);
481
482 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
483 if (ret < 0) {
484 DRM_ERROR("DM_MST: Failed to start MST\n");
485 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
486 return ret;
4562236b 487 }
7abcf6b5 488 }
4562236b
HW
489 }
490
491 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7abcf6b5
AG
492 return ret;
493}
494
495static int dm_late_init(void *handle)
496{
497 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
498 int r = detect_mst_link_for_all_connectors(dev);
499
500 return r;
4562236b
HW
501}
502
503static void s3_handle_mst(struct drm_device *dev, bool suspend)
504{
c84dec2f 505 struct amdgpu_dm_connector *aconnector;
4562236b
HW
506 struct drm_connector *connector;
507
508 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
509
510 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
c84dec2f 511 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
512 if (aconnector->dc_link->type == dc_connection_mst_branch &&
513 !aconnector->mst_port) {
514
515 if (suspend)
516 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
517 else
518 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
519 }
520 }
521
522 drm_modeset_unlock(&dev->mode_config.connection_mutex);
523}
524
525static int dm_hw_init(void *handle)
526{
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528 /* Create DAL display manager */
529 amdgpu_dm_init(adev);
4562236b
HW
530 amdgpu_dm_hpd_init(adev);
531
4562236b
HW
532 return 0;
533}
534
535static int dm_hw_fini(void *handle)
536{
537 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538
539 amdgpu_dm_hpd_fini(adev);
540
541 amdgpu_dm_irq_fini(adev);
21de3396 542 amdgpu_dm_fini(adev);
4562236b
HW
543 return 0;
544}
545
546static int dm_suspend(void *handle)
547{
548 struct amdgpu_device *adev = handle;
549 struct amdgpu_display_manager *dm = &adev->dm;
550 int ret = 0;
4562236b
HW
551
552 s3_handle_mst(adev->ddev, true);
553
4562236b
HW
554 amdgpu_dm_irq_suspend(adev);
555
0a214e2f 556 WARN_ON(adev->dm.cached_state);
a3621485
AG
557 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
558
4562236b
HW
559 dc_set_power_state(
560 dm->dc,
a3621485
AG
561 DC_ACPI_CM_POWER_STATE_D3
562 );
4562236b
HW
563
564 return ret;
565}
566
c84dec2f 567struct amdgpu_dm_connector *amdgpu_dm_find_first_crct_matching_connector(
4562236b
HW
568 struct drm_atomic_state *state,
569 struct drm_crtc *crtc,
570 bool from_state_var)
571{
572 uint32_t i;
573 struct drm_connector_state *conn_state;
574 struct drm_connector *connector;
575 struct drm_crtc *crtc_from_state;
576
577 for_each_connector_in_state(
578 state,
579 connector,
580 conn_state,
581 i) {
582 crtc_from_state =
583 from_state_var ?
584 conn_state->crtc :
585 connector->state->crtc;
586
587 if (crtc_from_state == crtc)
c84dec2f 588 return to_amdgpu_dm_connector(connector);
4562236b
HW
589 }
590
591 return NULL;
592}
593
4562236b
HW
594static int dm_resume(void *handle)
595{
596 struct amdgpu_device *adev = handle;
597 struct amdgpu_display_manager *dm = &adev->dm;
598
599 /* power on hardware */
600 dc_set_power_state(
601 dm->dc,
a3621485
AG
602 DC_ACPI_CM_POWER_STATE_D0
603 );
4562236b
HW
604
605 return 0;
606}
607
1ecfc3da 608int amdgpu_dm_display_resume(struct amdgpu_device *adev)
4562236b
HW
609{
610 struct drm_device *ddev = adev->ddev;
611 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 612 struct amdgpu_dm_connector *aconnector;
4562236b 613 struct drm_connector *connector;
4562236b 614 struct drm_crtc *crtc;
a3621485
AG
615 struct drm_crtc_state *crtc_state;
616 int ret = 0;
617 int i;
4562236b
HW
618
619 /* program HPD filter */
620 dc_resume(dm->dc);
621
622 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
623 s3_handle_mst(ddev, false);
624
625 /*
626 * early enable HPD Rx IRQ, should be done before set mode as short
627 * pulse interrupts are used for MST
628 */
629 amdgpu_dm_irq_resume_early(adev);
630
4562236b
HW
631 /* Do detection*/
632 list_for_each_entry(connector,
633 &ddev->mode_config.connector_list, head) {
c84dec2f 634 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
635
636 /*
637 * this is the case when traversing through already created
638 * MST connectors, should be skipped
639 */
640 if (aconnector->mst_port)
641 continue;
642
03ea364c 643 mutex_lock(&aconnector->hpd_lock);
8f38b66c 644 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
4562236b
HW
645 aconnector->dc_sink = NULL;
646 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 647 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
648 }
649
a3621485
AG
650 /* Force mode set in atomic comit */
651 for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
652 crtc_state->active_changed = true;
653
654 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
4562236b 655
0a214e2f
AG
656 drm_atomic_state_put(adev->dm.cached_state);
657 adev->dm.cached_state = NULL;
658
9faa4237 659 amdgpu_dm_irq_resume_late(adev);
4562236b
HW
660
661 return ret;
662}
663
664static const struct amd_ip_funcs amdgpu_dm_funcs = {
665 .name = "dm",
666 .early_init = dm_early_init,
7abcf6b5 667 .late_init = dm_late_init,
4562236b
HW
668 .sw_init = dm_sw_init,
669 .sw_fini = dm_sw_fini,
670 .hw_init = dm_hw_init,
671 .hw_fini = dm_hw_fini,
672 .suspend = dm_suspend,
673 .resume = dm_resume,
674 .is_idle = dm_is_idle,
675 .wait_for_idle = dm_wait_for_idle,
676 .check_soft_reset = dm_check_soft_reset,
677 .soft_reset = dm_soft_reset,
678 .set_clockgating_state = dm_set_clockgating_state,
679 .set_powergating_state = dm_set_powergating_state,
680};
681
682const struct amdgpu_ip_block_version dm_ip_block =
683{
684 .type = AMD_IP_BLOCK_TYPE_DCE,
685 .major = 1,
686 .minor = 0,
687 .rev = 0,
688 .funcs = &amdgpu_dm_funcs,
689};
690
ca3268c4
HW
691
692struct drm_atomic_state *
693dm_atomic_state_alloc(struct drm_device *dev)
694{
695 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
696
1dc90497 697 if (!state)
ca3268c4 698 return NULL;
1dc90497
AG
699
700 if (drm_atomic_state_init(dev, &state->base) < 0)
701 goto fail;
702
ca3268c4 703 return &state->base;
1dc90497
AG
704
705fail:
706 kfree(state);
707 return NULL;
ca3268c4
HW
708}
709
0a323b84
AG
710static void
711dm_atomic_state_clear(struct drm_atomic_state *state)
712{
713 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
714
715 if (dm_state->context) {
608ac7bb 716 dc_release_state(dm_state->context);
0a323b84
AG
717 dm_state->context = NULL;
718 }
719
720 drm_atomic_state_default_clear(state);
721}
722
723static void
724dm_atomic_state_alloc_free(struct drm_atomic_state *state)
725{
726 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
727 drm_atomic_state_default_release(state);
728 kfree(dm_state);
729}
730
b3663f70 731static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
a49dcb88
HW
732 .fb_create = amdgpu_user_framebuffer_create,
733 .output_poll_changed = amdgpu_output_poll_changed,
4562236b 734 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 735 .atomic_commit = amdgpu_dm_atomic_commit,
ca3268c4 736 .atomic_state_alloc = dm_atomic_state_alloc,
0a323b84
AG
737 .atomic_state_clear = dm_atomic_state_clear,
738 .atomic_state_free = dm_atomic_state_alloc_free
54f5499a
AG
739};
740
741static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
742 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
743};
744
745void amdgpu_dm_update_connector_after_detect(
c84dec2f 746 struct amdgpu_dm_connector *aconnector)
4562236b
HW
747{
748 struct drm_connector *connector = &aconnector->base;
749 struct drm_device *dev = connector->dev;
b73a22d3 750 struct dc_sink *sink;
4562236b
HW
751
752 /* MST handled by drm_mst framework */
753 if (aconnector->mst_mgr.mst_state == true)
754 return;
755
756
757 sink = aconnector->dc_link->local_sink;
758
759 /* Edid mgmt connector gets first update only in mode_valid hook and then
760 * the connector sink is set to either fake or physical sink depends on link status.
761 * don't do it here if u are during boot
762 */
763 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
764 && aconnector->dc_em_sink) {
765
ab2541b6 766 /* For S3 resume with headless use eml_sink to fake stream
4562236b
HW
767 * because on resume connecotr->sink is set ti NULL
768 */
769 mutex_lock(&dev->mode_config.mutex);
770
771 if (sink) {
922aa1e1 772 if (aconnector->dc_sink) {
4562236b
HW
773 amdgpu_dm_remove_sink_from_freesync_module(
774 connector);
922aa1e1
AG
775 /* retain and release bellow are used for
776 * bump up refcount for sink because the link don't point
777 * to it anymore after disconnect so on next crtc to connector
778 * reshuffle by UMD we will get into unwanted dc_sink release
779 */
780 if (aconnector->dc_sink != aconnector->dc_em_sink)
781 dc_sink_release(aconnector->dc_sink);
782 }
4562236b
HW
783 aconnector->dc_sink = sink;
784 amdgpu_dm_add_sink_to_freesync_module(
785 connector, aconnector->edid);
786 } else {
787 amdgpu_dm_remove_sink_from_freesync_module(connector);
788 if (!aconnector->dc_sink)
789 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1
AG
790 else if (aconnector->dc_sink != aconnector->dc_em_sink)
791 dc_sink_retain(aconnector->dc_sink);
4562236b
HW
792 }
793
794 mutex_unlock(&dev->mode_config.mutex);
795 return;
796 }
797
798 /*
799 * TODO: temporary guard to look for proper fix
800 * if this sink is MST sink, we should not do anything
801 */
802 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
803 return;
804
805 if (aconnector->dc_sink == sink) {
806 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
807 * Do nothing!! */
808 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
809 aconnector->connector_id);
810 return;
811 }
812
813 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
814 aconnector->connector_id, aconnector->dc_sink, sink);
815
816 mutex_lock(&dev->mode_config.mutex);
817
818 /* 1. Update status of the drm connector
819 * 2. Send an event and let userspace tell us what to do */
820 if (sink) {
821 /* TODO: check if we still need the S3 mode update workaround.
822 * If yes, put it here. */
823 if (aconnector->dc_sink)
824 amdgpu_dm_remove_sink_from_freesync_module(
825 connector);
826
827 aconnector->dc_sink = sink;
828 if (sink->dc_edid.length == 0)
829 aconnector->edid = NULL;
830 else {
831 aconnector->edid =
832 (struct edid *) sink->dc_edid.raw_edid;
833
834
835 drm_mode_connector_update_edid_property(connector,
836 aconnector->edid);
837 }
838 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
839
840 } else {
841 amdgpu_dm_remove_sink_from_freesync_module(connector);
842 drm_mode_connector_update_edid_property(connector, NULL);
843 aconnector->num_modes = 0;
844 aconnector->dc_sink = NULL;
845 }
846
847 mutex_unlock(&dev->mode_config.mutex);
848}
849
850static void handle_hpd_irq(void *param)
851{
c84dec2f 852 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
853 struct drm_connector *connector = &aconnector->base;
854 struct drm_device *dev = connector->dev;
855
856 /* In case of failure or MST no need to update connector status or notify the OS
857 * since (for MST case) MST does this in it's own context.
858 */
859 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6
HW
860
861 if (aconnector->fake_enable)
862 aconnector->fake_enable = false;
863
8f38b66c 864 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
865 amdgpu_dm_update_connector_after_detect(aconnector);
866
867
868 drm_modeset_lock_all(dev);
869 dm_restore_drm_connector_state(dev, connector);
870 drm_modeset_unlock_all(dev);
871
872 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
873 drm_kms_helper_hotplug_event(dev);
874 }
875 mutex_unlock(&aconnector->hpd_lock);
876
877}
878
c84dec2f 879static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
880{
881 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
882 uint8_t dret;
883 bool new_irq_handled = false;
884 int dpcd_addr;
885 int dpcd_bytes_to_read;
886
887 const int max_process_count = 30;
888 int process_count = 0;
889
890 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
891
892 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
893 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
894 /* DPCD 0x200 - 0x201 for downstream IRQ */
895 dpcd_addr = DP_SINK_COUNT;
896 } else {
897 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
898 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
899 dpcd_addr = DP_SINK_COUNT_ESI;
900 }
901
902 dret = drm_dp_dpcd_read(
903 &aconnector->dm_dp_aux.aux,
904 dpcd_addr,
905 esi,
906 dpcd_bytes_to_read);
907
908 while (dret == dpcd_bytes_to_read &&
909 process_count < max_process_count) {
910 uint8_t retry;
911 dret = 0;
912
913 process_count++;
914
915 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
916 /* handle HPD short pulse irq */
917 if (aconnector->mst_mgr.mst_state)
918 drm_dp_mst_hpd_irq(
919 &aconnector->mst_mgr,
920 esi,
921 &new_irq_handled);
4562236b
HW
922
923 if (new_irq_handled) {
924 /* ACK at DPCD to notify down stream */
925 const int ack_dpcd_bytes_to_write =
926 dpcd_bytes_to_read - 1;
927
928 for (retry = 0; retry < 3; retry++) {
929 uint8_t wret;
930
931 wret = drm_dp_dpcd_write(
932 &aconnector->dm_dp_aux.aux,
933 dpcd_addr + 1,
934 &esi[1],
935 ack_dpcd_bytes_to_write);
936 if (wret == ack_dpcd_bytes_to_write)
937 break;
938 }
939
940 /* check if there is new irq to be handle */
941 dret = drm_dp_dpcd_read(
942 &aconnector->dm_dp_aux.aux,
943 dpcd_addr,
944 esi,
945 dpcd_bytes_to_read);
946
947 new_irq_handled = false;
948 } else
949 break;
950 }
951
952 if (process_count == max_process_count)
953 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
954}
955
956static void handle_hpd_rx_irq(void *param)
957{
c84dec2f 958 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
959 struct drm_connector *connector = &aconnector->base;
960 struct drm_device *dev = connector->dev;
961 const struct dc_link *dc_link = aconnector->dc_link;
962 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
963
964 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
965 * conflict, after implement i2c helper, this mutex should be
966 * retired.
967 */
968 if (aconnector->dc_link->type != dc_connection_mst_branch)
969 mutex_lock(&aconnector->hpd_lock);
970
8ee65d7c 971 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link, NULL) &&
4562236b
HW
972 !is_mst_root_connector) {
973 /* Downstream Port status changed. */
8f38b66c 974 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPDRX)) {
4562236b
HW
975 amdgpu_dm_update_connector_after_detect(aconnector);
976
977
978 drm_modeset_lock_all(dev);
979 dm_restore_drm_connector_state(dev, connector);
980 drm_modeset_unlock_all(dev);
981
982 drm_kms_helper_hotplug_event(dev);
983 }
984 }
985 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
986 (dc_link->type == dc_connection_mst_branch))
987 dm_handle_hpd_rx_irq(aconnector);
988
989 if (aconnector->dc_link->type != dc_connection_mst_branch)
990 mutex_unlock(&aconnector->hpd_lock);
991}
992
993static void register_hpd_handlers(struct amdgpu_device *adev)
994{
995 struct drm_device *dev = adev->ddev;
996 struct drm_connector *connector;
c84dec2f 997 struct amdgpu_dm_connector *aconnector;
4562236b
HW
998 const struct dc_link *dc_link;
999 struct dc_interrupt_params int_params = {0};
1000
1001 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1002 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1003
1004 list_for_each_entry(connector,
1005 &dev->mode_config.connector_list, head) {
1006
c84dec2f 1007 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1008 dc_link = aconnector->dc_link;
1009
1010 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1011 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1012 int_params.irq_source = dc_link->irq_source_hpd;
1013
1014 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1015 handle_hpd_irq,
1016 (void *) aconnector);
1017 }
1018
1019 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1020
1021 /* Also register for DP short pulse (hpd_rx). */
1022 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1023 int_params.irq_source = dc_link->irq_source_hpd_rx;
1024
1025 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1026 handle_hpd_rx_irq,
1027 (void *) aconnector);
1028 }
1029 }
1030}
1031
1032/* Register IRQ sources and initialize IRQ callbacks */
1033static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1034{
1035 struct dc *dc = adev->dm.dc;
1036 struct common_irq_params *c_irq_params;
1037 struct dc_interrupt_params int_params = {0};
1038 int r;
1039 int i;
2c8ad2d5
AD
1040 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1041
ff5ef992
AD
1042 if (adev->asic_type == CHIP_VEGA10 ||
1043 adev->asic_type == CHIP_RAVEN)
2c8ad2d5 1044 client_id = AMDGPU_IH_CLIENTID_DCE;
4562236b
HW
1045
1046 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1047 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1048
1049 /* Actions of amdgpu_irq_add_id():
1050 * 1. Register a set() function with base driver.
1051 * Base driver will call set() function to enable/disable an
1052 * interrupt in DC hardware.
1053 * 2. Register amdgpu_dm_irq_handler().
1054 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1055 * coming from DC hardware.
1056 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1057 * for acknowledging and handling. */
1058
b57de80a 1059 /* Use VBLANK interrupt */
e9029155 1060 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 1061 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
1062 if (r) {
1063 DRM_ERROR("Failed to add crtc irq id!\n");
1064 return r;
1065 }
1066
1067 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1068 int_params.irq_source =
3d761e79 1069 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 1070
b57de80a 1071 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
1072
1073 c_irq_params->adev = adev;
1074 c_irq_params->irq_src = int_params.irq_source;
1075
1076 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1077 dm_crtc_high_irq, c_irq_params);
1078 }
1079
3d761e79 1080 /* Use GRPH_PFLIP interrupt */
4562236b
HW
1081 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1082 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 1083 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
1084 if (r) {
1085 DRM_ERROR("Failed to add page flip irq id!\n");
1086 return r;
1087 }
1088
1089 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1090 int_params.irq_source =
1091 dc_interrupt_to_irq_source(dc, i, 0);
1092
1093 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1094
1095 c_irq_params->adev = adev;
1096 c_irq_params->irq_src = int_params.irq_source;
1097
1098 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1099 dm_pflip_high_irq, c_irq_params);
1100
1101 }
1102
1103 /* HPD */
2c8ad2d5
AD
1104 r = amdgpu_irq_add_id(adev, client_id,
1105 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
1106 if (r) {
1107 DRM_ERROR("Failed to add hpd irq id!\n");
1108 return r;
1109 }
1110
1111 register_hpd_handlers(adev);
1112
1113 return 0;
1114}
1115
ff5ef992
AD
1116#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1117/* Register IRQ sources and initialize IRQ callbacks */
1118static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1119{
1120 struct dc *dc = adev->dm.dc;
1121 struct common_irq_params *c_irq_params;
1122 struct dc_interrupt_params int_params = {0};
1123 int r;
1124 int i;
1125
1126 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1127 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1128
1129 /* Actions of amdgpu_irq_add_id():
1130 * 1. Register a set() function with base driver.
1131 * Base driver will call set() function to enable/disable an
1132 * interrupt in DC hardware.
1133 * 2. Register amdgpu_dm_irq_handler().
1134 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1135 * coming from DC hardware.
1136 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1137 * for acknowledging and handling.
1138 * */
1139
1140 /* Use VSTARTUP interrupt */
1141 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1142 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1143 i++) {
1144 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1145
1146 if (r) {
1147 DRM_ERROR("Failed to add crtc irq id!\n");
1148 return r;
1149 }
1150
1151 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1152 int_params.irq_source =
1153 dc_interrupt_to_irq_source(dc, i, 0);
1154
1155 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1156
1157 c_irq_params->adev = adev;
1158 c_irq_params->irq_src = int_params.irq_source;
1159
1160 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1161 dm_crtc_high_irq, c_irq_params);
1162 }
1163
1164 /* Use GRPH_PFLIP interrupt */
1165 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1166 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1167 i++) {
1168 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1169 if (r) {
1170 DRM_ERROR("Failed to add page flip irq id!\n");
1171 return r;
1172 }
1173
1174 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1175 int_params.irq_source =
1176 dc_interrupt_to_irq_source(dc, i, 0);
1177
1178 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1179
1180 c_irq_params->adev = adev;
1181 c_irq_params->irq_src = int_params.irq_source;
1182
1183 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1184 dm_pflip_high_irq, c_irq_params);
1185
1186 }
1187
1188 /* HPD */
1189 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1190 &adev->hpd_irq);
1191 if (r) {
1192 DRM_ERROR("Failed to add hpd irq id!\n");
1193 return r;
1194 }
1195
1196 register_hpd_handlers(adev);
1197
1198 return 0;
1199}
1200#endif
1201
4562236b
HW
1202static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1203{
1204 int r;
1205
1206 adev->mode_info.mode_config_initialized = true;
1207
4562236b 1208 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 1209 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
1210
1211 adev->ddev->mode_config.max_width = 16384;
1212 adev->ddev->mode_config.max_height = 16384;
1213
1214 adev->ddev->mode_config.preferred_depth = 24;
1215 adev->ddev->mode_config.prefer_shadow = 1;
1216 /* indicate support of immediate flip */
1217 adev->ddev->mode_config.async_page_flip = true;
1218
1219 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1220
1221 r = amdgpu_modeset_create_props(adev);
1222 if (r)
1223 return r;
1224
1225 return 0;
1226}
1227
1228#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1229 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1230
1231static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1232{
1233 struct amdgpu_display_manager *dm = bl_get_data(bd);
1234
1235 if (dc_link_set_backlight_level(dm->backlight_link,
1236 bd->props.brightness, 0, 0))
1237 return 0;
1238 else
1239 return 1;
1240}
1241
1242static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1243{
1244 return bd->props.brightness;
1245}
1246
1247static const struct backlight_ops amdgpu_dm_backlight_ops = {
1248 .get_brightness = amdgpu_dm_backlight_get_brightness,
1249 .update_status = amdgpu_dm_backlight_update_status,
1250};
1251
1252void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1253{
1254 char bl_name[16];
1255 struct backlight_properties props = { 0 };
1256
1257 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1258 props.type = BACKLIGHT_RAW;
1259
1260 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1261 dm->adev->ddev->primary->index);
1262
1263 dm->backlight_dev = backlight_device_register(bl_name,
1264 dm->adev->ddev->dev,
1265 dm,
1266 &amdgpu_dm_backlight_ops,
1267 &props);
1268
1269 if (NULL == dm->backlight_dev)
1270 DRM_ERROR("DM: Backlight registration failed!\n");
1271 else
1272 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1273}
1274
1275#endif
1276
1277/* In this architecture, the association
1278 * connector -> encoder -> crtc
1279 * id not really requried. The crtc and connector will hold the
1280 * display_index as an abstraction to use with DAL component
1281 *
1282 * Returns 0 on success
1283 */
1284int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1285{
1286 struct amdgpu_display_manager *dm = &adev->dm;
1287 uint32_t i;
c84dec2f 1288 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 1289 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 1290 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 1291 uint32_t link_cnt;
92f3ac40 1292 unsigned long possible_crtcs;
4562236b
HW
1293
1294 link_cnt = dm->dc->caps.max_links;
4562236b
HW
1295 if (amdgpu_dm_mode_config_init(dm->adev)) {
1296 DRM_ERROR("DM: Failed to initialize mode config\n");
f2a0f5e6 1297 return -1;
4562236b
HW
1298 }
1299
3be5262e 1300 for (i = 0; i < dm->dc->caps.max_planes; i++) {
d4e13b0d
AD
1301 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1302 GFP_KERNEL);
1303 if (!mode_info->planes[i]) {
3be5262e 1304 DRM_ERROR("KMS: Failed to allocate plane\n");
d4e13b0d
AD
1305 goto fail_free_planes;
1306 }
1605b3be 1307 mode_info->planes[i]->base.type = mode_info->plane_type[i];
92f3ac40
LSL
1308
1309 /*
1310 * HACK: IGT tests expect that each plane can only have one
1311 * one possible CRTC. For now, set one CRTC for each
1312 * plane that is not an underlay, but still allow multiple
1313 * CRTCs for underlay planes.
1314 */
1315 possible_crtcs = 1 << i;
1316 if (i >= dm->dc->caps.max_streams)
1317 possible_crtcs = 0xff;
1318
1319 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
d4e13b0d
AD
1320 DRM_ERROR("KMS: Failed to initialize plane\n");
1321 goto fail_free_planes;
1322 }
1323 }
4562236b 1324
d4e13b0d
AD
1325 for (i = 0; i < dm->dc->caps.max_streams; i++)
1326 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
4562236b 1327 DRM_ERROR("KMS: Failed to initialize crtc\n");
d4e13b0d 1328 goto fail_free_planes;
4562236b 1329 }
4562236b 1330
ab2541b6 1331 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
1332
1333 /* loops over all connectors on the board */
1334 for (i = 0; i < link_cnt; i++) {
1335
1336 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1337 DRM_ERROR(
1338 "KMS: Cannot support more than %d display indexes\n",
1339 AMDGPU_DM_MAX_DISPLAY_INDEX);
1340 continue;
1341 }
1342
1343 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1344 if (!aconnector)
f2a0f5e6 1345 goto fail_free_planes;
4562236b
HW
1346
1347 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1348 if (!aencoder) {
1349 goto fail_free_connector;
1350 }
1351
1352 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1353 DRM_ERROR("KMS: Failed to initialize encoder\n");
1354 goto fail_free_encoder;
1355 }
1356
1357 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1358 DRM_ERROR("KMS: Failed to initialize connector\n");
f2a0f5e6 1359 goto fail_free_encoder;
4562236b
HW
1360 }
1361
8f38b66c
HW
1362 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1363 DETECT_REASON_BOOT))
4562236b
HW
1364 amdgpu_dm_update_connector_after_detect(aconnector);
1365 }
1366
1367 /* Software is initialized. Now we can register interrupt handlers. */
1368 switch (adev->asic_type) {
1369 case CHIP_BONAIRE:
1370 case CHIP_HAWAII:
cd4b356f
AD
1371 case CHIP_KAVERI:
1372 case CHIP_KABINI:
1373 case CHIP_MULLINS:
4562236b
HW
1374 case CHIP_TONGA:
1375 case CHIP_FIJI:
1376 case CHIP_CARRIZO:
1377 case CHIP_STONEY:
1378 case CHIP_POLARIS11:
1379 case CHIP_POLARIS10:
b264d345 1380 case CHIP_POLARIS12:
2c8ad2d5 1381 case CHIP_VEGA10:
4562236b
HW
1382 if (dce110_register_irq_handlers(dm->adev)) {
1383 DRM_ERROR("DM: Failed to initialize IRQ\n");
d4e13b0d 1384 goto fail_free_encoder;
4562236b
HW
1385 }
1386 break;
ff5ef992
AD
1387#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1388 case CHIP_RAVEN:
1389 if (dcn10_register_irq_handlers(dm->adev)) {
1390 DRM_ERROR("DM: Failed to initialize IRQ\n");
1391 goto fail_free_encoder;
1392 }
79c24086
BL
1393 /*
1394 * Temporary disable until pplib/smu interaction is implemented
1395 */
1396 dm->dc->debug.disable_stutter = true;
ff5ef992
AD
1397 break;
1398#endif
4562236b
HW
1399 default:
1400 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
d4e13b0d 1401 goto fail_free_encoder;
4562236b
HW
1402 }
1403
1404 drm_mode_config_reset(dm->ddev);
1405
1406 return 0;
1407fail_free_encoder:
1408 kfree(aencoder);
1409fail_free_connector:
1410 kfree(aconnector);
d4e13b0d 1411fail_free_planes:
3be5262e 1412 for (i = 0; i < dm->dc->caps.max_planes; i++)
d4e13b0d 1413 kfree(mode_info->planes[i]);
4562236b
HW
1414 return -1;
1415}
1416
1417void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1418{
1419 drm_mode_config_cleanup(dm->ddev);
1420 return;
1421}
1422
1423/******************************************************************************
1424 * amdgpu_display_funcs functions
1425 *****************************************************************************/
1426
1427/**
1428 * dm_bandwidth_update - program display watermarks
1429 *
1430 * @adev: amdgpu_device pointer
1431 *
1432 * Calculate and program the display watermarks and line buffer allocation.
1433 */
1434static void dm_bandwidth_update(struct amdgpu_device *adev)
1435{
49c07a99 1436 /* TODO: implement later */
4562236b
HW
1437}
1438
1439static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1440 u8 level)
1441{
1442 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1443}
1444
1445static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1446{
1447 /* TODO: translate amdgpu_encoder to display_index and call DAL */
4562236b
HW
1448 return 0;
1449}
1450
4562236b
HW
1451static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1452 struct drm_file *filp)
1453{
1454 struct mod_freesync_params freesync_params;
ab2541b6 1455 uint8_t num_streams;
4562236b 1456 uint8_t i;
4562236b
HW
1457
1458 struct amdgpu_device *adev = dev->dev_private;
1459 int r = 0;
1460
1461 /* Get freesync enable flag from DRM */
1462
ab2541b6 1463 num_streams = dc_get_current_stream_count(adev->dm.dc);
4562236b 1464
ab2541b6 1465 for (i = 0; i < num_streams; i++) {
0971c40e 1466 struct dc_stream_state *stream;
ab2541b6 1467 stream = dc_get_stream_at_index(adev->dm.dc, i);
4562236b
HW
1468
1469 mod_freesync_update_state(adev->dm.freesync_module,
ab2541b6 1470 &stream, 1, &freesync_params);
4562236b
HW
1471 }
1472
1473 return r;
1474}
1475
39cc5be2 1476static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
1477 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1478 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1479 .vblank_wait = NULL,
1480 .backlight_set_level =
1481 dm_set_backlight_level,/* called unconditionally */
1482 .backlight_get_level =
1483 dm_get_backlight_level,/* called unconditionally */
1484 .hpd_sense = NULL,/* called unconditionally */
1485 .hpd_set_polarity = NULL, /* called unconditionally */
1486 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
1487 .page_flip_get_scanoutpos =
1488 dm_crtc_get_scanoutpos,/* called unconditionally */
1489 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1490 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1491 .notify_freesync = amdgpu_notify_freesync,
1492
1493};
1494
1495#if defined(CONFIG_DEBUG_KERNEL_DC)
1496
1497static ssize_t s3_debug_store(
1498 struct device *device,
1499 struct device_attribute *attr,
1500 const char *buf,
1501 size_t count)
1502{
1503 int ret;
1504 int s3_state;
1505 struct pci_dev *pdev = to_pci_dev(device);
1506 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1507 struct amdgpu_device *adev = drm_dev->dev_private;
1508
1509 ret = kstrtoint(buf, 0, &s3_state);
1510
1511 if (ret == 0) {
1512 if (s3_state) {
1513 dm_resume(adev);
1514 amdgpu_dm_display_resume(adev);
1515 drm_kms_helper_hotplug_event(adev->ddev);
1516 } else
1517 dm_suspend(adev);
1518 }
1519
1520 return ret == 0 ? count : 0;
1521}
1522
1523DEVICE_ATTR_WO(s3_debug);
1524
1525#endif
1526
1527static int dm_early_init(void *handle)
1528{
1529 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
d7ec53d9 1531 adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
4562236b
HW
1532 amdgpu_dm_set_irq_funcs(adev);
1533
1534 switch (adev->asic_type) {
1535 case CHIP_BONAIRE:
1536 case CHIP_HAWAII:
1537 adev->mode_info.num_crtc = 6;
1538 adev->mode_info.num_hpd = 6;
1539 adev->mode_info.num_dig = 6;
3be5262e 1540 adev->mode_info.plane_type = dm_plane_type_default;
4562236b 1541 break;
cd4b356f
AD
1542 case CHIP_KAVERI:
1543 adev->mode_info.num_crtc = 4;
1544 adev->mode_info.num_hpd = 6;
1545 adev->mode_info.num_dig = 7;
1546 adev->mode_info.plane_type = dm_plane_type_default;
1547 break;
1548 case CHIP_KABINI:
1549 case CHIP_MULLINS:
1550 adev->mode_info.num_crtc = 2;
1551 adev->mode_info.num_hpd = 6;
1552 adev->mode_info.num_dig = 6;
1553 adev->mode_info.plane_type = dm_plane_type_default;
1554 break;
4562236b
HW
1555 case CHIP_FIJI:
1556 case CHIP_TONGA:
1557 adev->mode_info.num_crtc = 6;
1558 adev->mode_info.num_hpd = 6;
1559 adev->mode_info.num_dig = 7;
3be5262e 1560 adev->mode_info.plane_type = dm_plane_type_default;
4562236b
HW
1561 break;
1562 case CHIP_CARRIZO:
1563 adev->mode_info.num_crtc = 3;
1564 adev->mode_info.num_hpd = 6;
1565 adev->mode_info.num_dig = 9;
3be5262e 1566 adev->mode_info.plane_type = dm_plane_type_carizzo;
4562236b
HW
1567 break;
1568 case CHIP_STONEY:
1569 adev->mode_info.num_crtc = 2;
1570 adev->mode_info.num_hpd = 6;
1571 adev->mode_info.num_dig = 9;
3be5262e 1572 adev->mode_info.plane_type = dm_plane_type_stoney;
4562236b
HW
1573 break;
1574 case CHIP_POLARIS11:
b264d345 1575 case CHIP_POLARIS12:
4562236b
HW
1576 adev->mode_info.num_crtc = 5;
1577 adev->mode_info.num_hpd = 5;
1578 adev->mode_info.num_dig = 5;
3be5262e 1579 adev->mode_info.plane_type = dm_plane_type_default;
4562236b
HW
1580 break;
1581 case CHIP_POLARIS10:
1582 adev->mode_info.num_crtc = 6;
1583 adev->mode_info.num_hpd = 6;
1584 adev->mode_info.num_dig = 6;
3be5262e 1585 adev->mode_info.plane_type = dm_plane_type_default;
4562236b 1586 break;
2c8ad2d5
AD
1587 case CHIP_VEGA10:
1588 adev->mode_info.num_crtc = 6;
1589 adev->mode_info.num_hpd = 6;
1590 adev->mode_info.num_dig = 6;
3be5262e 1591 adev->mode_info.plane_type = dm_plane_type_default;
2c8ad2d5 1592 break;
ff5ef992
AD
1593#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1594 case CHIP_RAVEN:
1595 adev->mode_info.num_crtc = 4;
1596 adev->mode_info.num_hpd = 4;
1597 adev->mode_info.num_dig = 4;
3be5262e 1598 adev->mode_info.plane_type = dm_plane_type_default;
ff5ef992
AD
1599 break;
1600#endif
4562236b
HW
1601 default:
1602 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1603 return -EINVAL;
1604 }
1605
39cc5be2
AD
1606 if (adev->mode_info.funcs == NULL)
1607 adev->mode_info.funcs = &dm_display_funcs;
1608
4562236b
HW
1609 /* Note: Do NOT change adev->audio_endpt_rreg and
1610 * adev->audio_endpt_wreg because they are initialised in
1611 * amdgpu_device_init() */
1612#if defined(CONFIG_DEBUG_KERNEL_DC)
1613 device_create_file(
1614 adev->ddev->dev,
1615 &dev_attr_s3_debug);
1616#endif
1617
1618 return 0;
1619}
1620
1621bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1622{
1623 /* TODO */
1624 return true;
1625}
1626
1627bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1628{
e1403629
HW
1629 /* TODO */
1630 return true;
e7b07cee
HW
1631}
1632
1633
1634struct dm_connector_state {
1635 struct drm_connector_state base;
1636
1637 enum amdgpu_rmx_type scaling;
1638 uint8_t underscan_vborder;
1639 uint8_t underscan_hborder;
1640 bool underscan_enable;
1641};
1642
1643#define to_dm_connector_state(x)\
1644 container_of((x), struct dm_connector_state, base)
1645
9b690ef3 1646static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
1647 struct dc_stream_state *new_stream,
1648 struct dc_stream_state *old_stream)
9b690ef3 1649{
e7b07cee
HW
1650 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1651 return false;
1652
1653 if (!crtc_state->enable)
1654 return false;
1655
1656 return crtc_state->active;
1657}
1658
1659static bool modereset_required(struct drm_crtc_state *crtc_state)
1660{
1661 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1662 return false;
1663
1664 return !crtc_state->enable || !crtc_state->active;
1665}
1666
1667void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1668{
1669 drm_encoder_cleanup(encoder);
1670 kfree(encoder);
1671}
1672
1673static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1674 .destroy = amdgpu_dm_encoder_destroy,
1675};
1676
e7b07cee
HW
1677static bool fill_rects_from_plane_state(
1678 const struct drm_plane_state *state,
3be5262e 1679 struct dc_plane_state *plane_state)
e7b07cee 1680{
3be5262e
HW
1681 plane_state->src_rect.x = state->src_x >> 16;
1682 plane_state->src_rect.y = state->src_y >> 16;
e7b07cee 1683 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
3be5262e 1684 plane_state->src_rect.width = state->src_w >> 16;
e7b07cee 1685
3be5262e 1686 if (plane_state->src_rect.width == 0)
e7b07cee
HW
1687 return false;
1688
3be5262e
HW
1689 plane_state->src_rect.height = state->src_h >> 16;
1690 if (plane_state->src_rect.height == 0)
e7b07cee
HW
1691 return false;
1692
3be5262e
HW
1693 plane_state->dst_rect.x = state->crtc_x;
1694 plane_state->dst_rect.y = state->crtc_y;
e7b07cee
HW
1695
1696 if (state->crtc_w == 0)
1697 return false;
1698
3be5262e 1699 plane_state->dst_rect.width = state->crtc_w;
e7b07cee
HW
1700
1701 if (state->crtc_h == 0)
1702 return false;
1703
3be5262e 1704 plane_state->dst_rect.height = state->crtc_h;
e7b07cee 1705
3be5262e 1706 plane_state->clip_rect = plane_state->dst_rect;
e7b07cee
HW
1707
1708 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1709 case DRM_MODE_ROTATE_0:
3be5262e 1710 plane_state->rotation = ROTATION_ANGLE_0;
e7b07cee
HW
1711 break;
1712 case DRM_MODE_ROTATE_90:
3be5262e 1713 plane_state->rotation = ROTATION_ANGLE_90;
e7b07cee
HW
1714 break;
1715 case DRM_MODE_ROTATE_180:
3be5262e 1716 plane_state->rotation = ROTATION_ANGLE_180;
e7b07cee
HW
1717 break;
1718 case DRM_MODE_ROTATE_270:
3be5262e 1719 plane_state->rotation = ROTATION_ANGLE_270;
e7b07cee
HW
1720 break;
1721 default:
3be5262e 1722 plane_state->rotation = ROTATION_ANGLE_0;
e7b07cee
HW
1723 break;
1724 }
1725
4562236b
HW
1726 return true;
1727}
e7b07cee
HW
1728static int get_fb_info(
1729 const struct amdgpu_framebuffer *amdgpu_fb,
1730 uint64_t *tiling_flags,
1731 uint64_t *fb_location)
1732{
1733 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1734 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 1735
e7b07cee
HW
1736 if (unlikely(r)) {
1737 DRM_ERROR("Unable to reserve buffer\n");
1738 return r;
1739 }
1740
1741 if (fb_location)
1742 *fb_location = amdgpu_bo_gpu_offset(rbo);
1743
1744 if (tiling_flags)
1745 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1746
1747 amdgpu_bo_unreserve(rbo);
1748
1749 return r;
1750}
1751
1752static int fill_plane_attributes_from_fb(
1753 struct amdgpu_device *adev,
3be5262e 1754 struct dc_plane_state *plane_state,
e7b07cee
HW
1755 const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
1756{
1757 uint64_t tiling_flags;
1758 uint64_t fb_location = 0;
1759 unsigned int awidth;
1760 const struct drm_framebuffer *fb = &amdgpu_fb->base;
1761 int ret = 0;
1762 struct drm_format_name_buf format_name;
1763
1764 ret = get_fb_info(
1765 amdgpu_fb,
1766 &tiling_flags,
1767 addReq == true ? &fb_location:NULL);
1768
1769 if (ret)
1770 return ret;
1771
1772 switch (fb->format->format) {
1773 case DRM_FORMAT_C8:
3be5262e 1774 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
e7b07cee
HW
1775 break;
1776 case DRM_FORMAT_RGB565:
3be5262e 1777 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
e7b07cee
HW
1778 break;
1779 case DRM_FORMAT_XRGB8888:
1780 case DRM_FORMAT_ARGB8888:
3be5262e 1781 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
e7b07cee
HW
1782 break;
1783 case DRM_FORMAT_XRGB2101010:
1784 case DRM_FORMAT_ARGB2101010:
3be5262e 1785 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
e7b07cee
HW
1786 break;
1787 case DRM_FORMAT_XBGR2101010:
1788 case DRM_FORMAT_ABGR2101010:
3be5262e 1789 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
e7b07cee
HW
1790 break;
1791 case DRM_FORMAT_NV21:
3be5262e 1792 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
e7b07cee
HW
1793 break;
1794 case DRM_FORMAT_NV12:
3be5262e 1795 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
e7b07cee
HW
1796 break;
1797 default:
1798 DRM_ERROR("Unsupported screen format %s\n",
1ecfc3da 1799 drm_get_format_name(fb->format->format, &format_name));
e7b07cee
HW
1800 return -EINVAL;
1801 }
1802
3be5262e
HW
1803 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1804 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1805 plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
1806 plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
1807 plane_state->plane_size.grph.surface_size.x = 0;
1808 plane_state->plane_size.grph.surface_size.y = 0;
1809 plane_state->plane_size.grph.surface_size.width = fb->width;
1810 plane_state->plane_size.grph.surface_size.height = fb->height;
1811 plane_state->plane_size.grph.surface_pitch =
e7b07cee
HW
1812 fb->pitches[0] / fb->format->cpp[0];
1813 /* TODO: unhardcode */
3be5262e 1814 plane_state->color_space = COLOR_SPACE_SRGB;
e7b07cee
HW
1815
1816 } else {
1817 awidth = ALIGN(fb->width, 64);
3be5262e
HW
1818 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1819 plane_state->address.video_progressive.luma_addr.low_part
e7b07cee 1820 = lower_32_bits(fb_location);
3be5262e 1821 plane_state->address.video_progressive.chroma_addr.low_part
e7b07cee
HW
1822 = lower_32_bits(fb_location) +
1823 (awidth * fb->height);
3be5262e
HW
1824 plane_state->plane_size.video.luma_size.x = 0;
1825 plane_state->plane_size.video.luma_size.y = 0;
1826 plane_state->plane_size.video.luma_size.width = awidth;
1827 plane_state->plane_size.video.luma_size.height = fb->height;
e7b07cee 1828 /* TODO: unhardcode */
3be5262e 1829 plane_state->plane_size.video.luma_pitch = awidth;
e7b07cee 1830
3be5262e
HW
1831 plane_state->plane_size.video.chroma_size.x = 0;
1832 plane_state->plane_size.video.chroma_size.y = 0;
1833 plane_state->plane_size.video.chroma_size.width = awidth;
1834 plane_state->plane_size.video.chroma_size.height = fb->height;
1835 plane_state->plane_size.video.chroma_pitch = awidth / 2;
e7b07cee
HW
1836
1837 /* TODO: unhardcode */
3be5262e 1838 plane_state->color_space = COLOR_SPACE_YCBCR709;
e7b07cee
HW
1839 }
1840
3be5262e 1841 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
e7b07cee 1842
b830ebc9
HW
1843 /* Fill GFX8 params */
1844 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1845 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
e7b07cee
HW
1846
1847 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1848 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1849 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1850 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1851 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1852
1853 /* XXX fix me for VI */
3be5262e
HW
1854 plane_state->tiling_info.gfx8.num_banks = num_banks;
1855 plane_state->tiling_info.gfx8.array_mode =
e7b07cee 1856 DC_ARRAY_2D_TILED_THIN1;
3be5262e
HW
1857 plane_state->tiling_info.gfx8.tile_split = tile_split;
1858 plane_state->tiling_info.gfx8.bank_width = bankw;
1859 plane_state->tiling_info.gfx8.bank_height = bankh;
1860 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1861 plane_state->tiling_info.gfx8.tile_mode =
e7b07cee
HW
1862 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1863 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1864 == DC_ARRAY_1D_TILED_THIN1) {
3be5262e 1865 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
1866 }
1867
3be5262e 1868 plane_state->tiling_info.gfx8.pipe_config =
e7b07cee
HW
1869 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1870
1871 if (adev->asic_type == CHIP_VEGA10 ||
1872 adev->asic_type == CHIP_RAVEN) {
1873 /* Fill GFX9 params */
3be5262e 1874 plane_state->tiling_info.gfx9.num_pipes =
e7b07cee 1875 adev->gfx.config.gb_addr_config_fields.num_pipes;
3be5262e 1876 plane_state->tiling_info.gfx9.num_banks =
e7b07cee 1877 adev->gfx.config.gb_addr_config_fields.num_banks;
3be5262e 1878 plane_state->tiling_info.gfx9.pipe_interleave =
e7b07cee 1879 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3be5262e 1880 plane_state->tiling_info.gfx9.num_shader_engines =
e7b07cee 1881 adev->gfx.config.gb_addr_config_fields.num_se;
3be5262e 1882 plane_state->tiling_info.gfx9.max_compressed_frags =
e7b07cee 1883 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3be5262e 1884 plane_state->tiling_info.gfx9.num_rb_per_se =
e7b07cee 1885 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3be5262e 1886 plane_state->tiling_info.gfx9.swizzle =
e7b07cee 1887 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3be5262e 1888 plane_state->tiling_info.gfx9.shaderEnable = 1;
e7b07cee
HW
1889 }
1890
3be5262e
HW
1891 plane_state->visible = true;
1892 plane_state->scaling_quality.h_taps_c = 0;
1893 plane_state->scaling_quality.v_taps_c = 0;
e7b07cee 1894
3be5262e
HW
1895 /* is this needed? is plane_state zeroed at allocation? */
1896 plane_state->scaling_quality.h_taps = 0;
1897 plane_state->scaling_quality.v_taps = 0;
1898 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
e7b07cee
HW
1899
1900 return ret;
1901
1902}
1903
e7b07cee
HW
1904static void fill_gamma_from_crtc_state(
1905 const struct drm_crtc_state *crtc_state,
3be5262e 1906 struct dc_plane_state *plane_state)
e7b07cee
HW
1907{
1908 int i;
1909 struct dc_gamma *gamma;
d66cf5f5
AK
1910 struct drm_color_lut *lut =
1911 (struct drm_color_lut *) crtc_state->gamma_lut->data;
e7b07cee
HW
1912
1913 gamma = dc_create_gamma();
1914
1915 if (gamma == NULL) {
1916 WARN_ON(1);
1917 return;
1918 }
1919
7483bed4
HW
1920 gamma->type = GAMMA_RGB_256;
1921 gamma->num_entries = GAMMA_RGB_256_ENTRIES;
d66cf5f5
AK
1922 for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1923 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1924 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1925 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
e7b07cee
HW
1926 }
1927
3be5262e 1928 plane_state->gamma_correction = gamma;
e7b07cee
HW
1929}
1930
1931static int fill_plane_attributes(
1932 struct amdgpu_device *adev,
3be5262e 1933 struct dc_plane_state *dc_plane_state,
e7b07cee
HW
1934 struct drm_plane_state *plane_state,
1935 struct drm_crtc_state *crtc_state,
1936 bool addrReq)
1937{
1938 const struct amdgpu_framebuffer *amdgpu_fb =
1939 to_amdgpu_framebuffer(plane_state->fb);
1940 const struct drm_crtc *crtc = plane_state->crtc;
1941 struct dc_transfer_func *input_tf;
1942 int ret = 0;
1943
3be5262e 1944 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
e7b07cee
HW
1945 return -EINVAL;
1946
1947 ret = fill_plane_attributes_from_fb(
1948 crtc->dev->dev_private,
3be5262e 1949 dc_plane_state,
e7b07cee
HW
1950 amdgpu_fb,
1951 addrReq);
1952
1953 if (ret)
1954 return ret;
1955
1956 input_tf = dc_create_transfer_func();
1957
1958 if (input_tf == NULL)
1959 return -ENOMEM;
1960
1961 input_tf->type = TF_TYPE_PREDEFINED;
1962 input_tf->tf = TRANSFER_FUNCTION_SRGB;
1963
3be5262e 1964 dc_plane_state->in_transfer_func = input_tf;
e7b07cee
HW
1965
1966 /* In case of gamma set, update gamma value */
1967 if (crtc_state->gamma_lut)
3be5262e 1968 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
e7b07cee
HW
1969
1970 return ret;
1971}
1972
1973/*****************************************************************************/
1974
c84dec2f 1975struct amdgpu_dm_connector *aconnector_from_drm_crtc_id(
e7b07cee
HW
1976 const struct drm_crtc *crtc)
1977{
1978 struct drm_device *dev = crtc->dev;
1979 struct drm_connector *connector;
1980 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
c84dec2f 1981 struct amdgpu_dm_connector *aconnector;
e7b07cee
HW
1982
1983 list_for_each_entry(connector,
1984 &dev->mode_config.connector_list, head) {
1985
c84dec2f 1986 aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
1987
1988 if (aconnector->base.state->crtc != &acrtc->base)
1989 continue;
1990
1991 /* Found the connector */
1992 return aconnector;
1993 }
1994
1995 /* If we get here, not found. */
1996 return NULL;
1997}
1998
1999static void update_stream_scaling_settings(
2000 const struct drm_display_mode *mode,
2001 const struct dm_connector_state *dm_state,
0971c40e 2002 struct dc_stream_state *stream)
e7b07cee
HW
2003{
2004 enum amdgpu_rmx_type rmx_type;
2005
2006 struct rect src = { 0 }; /* viewport in composition space*/
2007 struct rect dst = { 0 }; /* stream addressable area */
2008
2009 /* no mode. nothing to be done */
2010 if (!mode)
2011 return;
2012
2013 /* Full screen scaling by default */
2014 src.width = mode->hdisplay;
2015 src.height = mode->vdisplay;
2016 dst.width = stream->timing.h_addressable;
2017 dst.height = stream->timing.v_addressable;
2018
2019 rmx_type = dm_state->scaling;
2020 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2021 if (src.width * dst.height <
2022 src.height * dst.width) {
2023 /* height needs less upscaling/more downscaling */
2024 dst.width = src.width *
2025 dst.height / src.height;
2026 } else {
2027 /* width needs less upscaling/more downscaling */
2028 dst.height = src.height *
2029 dst.width / src.width;
2030 }
2031 } else if (rmx_type == RMX_CENTER) {
2032 dst = src;
2033 }
2034
2035 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2036 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2037
2038 if (dm_state->underscan_enable) {
2039 dst.x += dm_state->underscan_hborder / 2;
2040 dst.y += dm_state->underscan_vborder / 2;
2041 dst.width -= dm_state->underscan_hborder;
2042 dst.height -= dm_state->underscan_vborder;
2043 }
2044
2045 stream->src = src;
2046 stream->dst = dst;
2047
2048 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2049 dst.x, dst.y, dst.width, dst.height);
2050
2051}
2052
2053static enum dc_color_depth convert_color_depth_from_display_info(
2054 const struct drm_connector *connector)
2055{
2056 uint32_t bpc = connector->display_info.bpc;
2057
2058 /* Limited color depth to 8bit
b830ebc9
HW
2059 * TODO: Still need to handle deep color
2060 */
e7b07cee
HW
2061 if (bpc > 8)
2062 bpc = 8;
2063
2064 switch (bpc) {
2065 case 0:
2066 /* Temporary Work around, DRM don't parse color depth for
2067 * EDID revision before 1.4
2068 * TODO: Fix edid parsing
2069 */
2070 return COLOR_DEPTH_888;
2071 case 6:
2072 return COLOR_DEPTH_666;
2073 case 8:
2074 return COLOR_DEPTH_888;
2075 case 10:
2076 return COLOR_DEPTH_101010;
2077 case 12:
2078 return COLOR_DEPTH_121212;
2079 case 14:
2080 return COLOR_DEPTH_141414;
2081 case 16:
2082 return COLOR_DEPTH_161616;
2083 default:
2084 return COLOR_DEPTH_UNDEFINED;
2085 }
2086}
2087
2088static enum dc_aspect_ratio get_aspect_ratio(
2089 const struct drm_display_mode *mode_in)
2090{
2091 int32_t width = mode_in->crtc_hdisplay * 9;
2092 int32_t height = mode_in->crtc_vdisplay * 16;
b830ebc9 2093
e7b07cee
HW
2094 if ((width - height) < 10 && (width - height) > -10)
2095 return ASPECT_RATIO_16_9;
2096 else
2097 return ASPECT_RATIO_4_3;
2098}
2099
2100static enum dc_color_space get_output_color_space(
2101 const struct dc_crtc_timing *dc_crtc_timing)
2102{
2103 enum dc_color_space color_space = COLOR_SPACE_SRGB;
2104
2105 switch (dc_crtc_timing->pixel_encoding) {
2106 case PIXEL_ENCODING_YCBCR422:
2107 case PIXEL_ENCODING_YCBCR444:
2108 case PIXEL_ENCODING_YCBCR420:
2109 {
2110 /*
2111 * 27030khz is the separation point between HDTV and SDTV
2112 * according to HDMI spec, we use YCbCr709 and YCbCr601
2113 * respectively
2114 */
2115 if (dc_crtc_timing->pix_clk_khz > 27030) {
2116 if (dc_crtc_timing->flags.Y_ONLY)
2117 color_space =
2118 COLOR_SPACE_YCBCR709_LIMITED;
2119 else
2120 color_space = COLOR_SPACE_YCBCR709;
2121 } else {
2122 if (dc_crtc_timing->flags.Y_ONLY)
2123 color_space =
2124 COLOR_SPACE_YCBCR601_LIMITED;
2125 else
2126 color_space = COLOR_SPACE_YCBCR601;
2127 }
2128
2129 }
2130 break;
2131 case PIXEL_ENCODING_RGB:
2132 color_space = COLOR_SPACE_SRGB;
2133 break;
2134
2135 default:
2136 WARN_ON(1);
2137 break;
2138 }
2139
2140 return color_space;
2141}
2142
2143/*****************************************************************************/
2144
2145static void fill_stream_properties_from_drm_display_mode(
0971c40e 2146 struct dc_stream_state *stream,
e7b07cee
HW
2147 const struct drm_display_mode *mode_in,
2148 const struct drm_connector *connector)
2149{
2150 struct dc_crtc_timing *timing_out = &stream->timing;
b830ebc9 2151
e7b07cee
HW
2152 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2153
2154 timing_out->h_border_left = 0;
2155 timing_out->h_border_right = 0;
2156 timing_out->v_border_top = 0;
2157 timing_out->v_border_bottom = 0;
2158 /* TODO: un-hardcode */
2159
2160 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2161 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2162 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2163 else
2164 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2165
2166 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2167 timing_out->display_color_depth = convert_color_depth_from_display_info(
2168 connector);
2169 timing_out->scan_type = SCANNING_TYPE_NODATA;
2170 timing_out->hdmi_vic = 0;
2171 timing_out->vic = drm_match_cea_mode(mode_in);
2172
2173 timing_out->h_addressable = mode_in->crtc_hdisplay;
2174 timing_out->h_total = mode_in->crtc_htotal;
2175 timing_out->h_sync_width =
2176 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2177 timing_out->h_front_porch =
2178 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2179 timing_out->v_total = mode_in->crtc_vtotal;
2180 timing_out->v_addressable = mode_in->crtc_vdisplay;
2181 timing_out->v_front_porch =
2182 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2183 timing_out->v_sync_width =
2184 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2185 timing_out->pix_clk_khz = mode_in->crtc_clock;
2186 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2187 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2188 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2189 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2190 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2191
2192 stream->output_color_space = get_output_color_space(timing_out);
2193
2194 {
2195 struct dc_transfer_func *tf = dc_create_transfer_func();
b830ebc9 2196
e7b07cee
HW
2197 tf->type = TF_TYPE_PREDEFINED;
2198 tf->tf = TRANSFER_FUNCTION_SRGB;
2199 stream->out_transfer_func = tf;
2200 }
2201}
2202
2203static void fill_audio_info(
2204 struct audio_info *audio_info,
2205 const struct drm_connector *drm_connector,
2206 const struct dc_sink *dc_sink)
2207{
2208 int i = 0;
2209 int cea_revision = 0;
2210 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2211
2212 audio_info->manufacture_id = edid_caps->manufacturer_id;
2213 audio_info->product_id = edid_caps->product_id;
2214
2215 cea_revision = drm_connector->display_info.cea_rev;
2216
2217 while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
2218 edid_caps->display_name[i]) {
2219 audio_info->display_name[i] = edid_caps->display_name[i];
2220 i++;
2221 }
2222
b830ebc9 2223 if (cea_revision >= 3) {
e7b07cee
HW
2224 audio_info->mode_count = edid_caps->audio_mode_count;
2225
2226 for (i = 0; i < audio_info->mode_count; ++i) {
2227 audio_info->modes[i].format_code =
2228 (enum audio_format_code)
2229 (edid_caps->audio_modes[i].format_code);
2230 audio_info->modes[i].channel_count =
2231 edid_caps->audio_modes[i].channel_count;
2232 audio_info->modes[i].sample_rates.all =
2233 edid_caps->audio_modes[i].sample_rate;
2234 audio_info->modes[i].sample_size =
2235 edid_caps->audio_modes[i].sample_size;
2236 }
2237 }
2238
2239 audio_info->flags.all = edid_caps->speaker_flags;
2240
2241 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 2242 if (drm_connector->latency_present[0]) {
e7b07cee
HW
2243 audio_info->video_latency = drm_connector->video_latency[0];
2244 audio_info->audio_latency = drm_connector->audio_latency[0];
2245 }
2246
2247 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2248
2249}
2250
2251static void copy_crtc_timing_for_drm_display_mode(
2252 const struct drm_display_mode *src_mode,
2253 struct drm_display_mode *dst_mode)
2254{
2255 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2256 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2257 dst_mode->crtc_clock = src_mode->crtc_clock;
2258 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2259 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 2260 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
2261 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2262 dst_mode->crtc_htotal = src_mode->crtc_htotal;
2263 dst_mode->crtc_hskew = src_mode->crtc_hskew;
2264 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2265 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2266 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2267 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2268 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2269}
2270
2271static void decide_crtc_timing_for_drm_display_mode(
2272 struct drm_display_mode *drm_mode,
2273 const struct drm_display_mode *native_mode,
2274 bool scale_enabled)
2275{
2276 if (scale_enabled) {
2277 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2278 } else if (native_mode->clock == drm_mode->clock &&
2279 native_mode->htotal == drm_mode->htotal &&
2280 native_mode->vtotal == drm_mode->vtotal) {
2281 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2282 } else {
2283 /* no scaling nor amdgpu inserted, no need to patch */
2284 }
2285}
2286
2e0ac3d6
HW
2287static void create_fake_sink(struct amdgpu_dm_connector *aconnector)
2288{
2289 struct dc_sink *sink = NULL;
2290 struct dc_sink_init_data sink_init_data = { 0 };
2291
2292 sink_init_data.link = aconnector->dc_link;
2293 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2294
2295 sink = dc_sink_create(&sink_init_data);
2296 if (!sink)
2297 DRM_ERROR("Failed to create sink!\n");
2298
2299 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2300 aconnector->fake_enable = true;
2301
2302 aconnector->dc_sink = sink;
2303 aconnector->dc_link->local_sink = sink;
2304}
2305
0971c40e 2306static struct dc_stream_state *create_stream_for_sink(
c84dec2f 2307 struct amdgpu_dm_connector *aconnector,
e7b07cee
HW
2308 const struct drm_display_mode *drm_mode,
2309 const struct dm_connector_state *dm_state)
2310{
2311 struct drm_display_mode *preferred_mode = NULL;
2312 const struct drm_connector *drm_connector;
0971c40e 2313 struct dc_stream_state *stream = NULL;
e7b07cee
HW
2314 struct drm_display_mode mode = *drm_mode;
2315 bool native_mode_found = false;
2316
b830ebc9 2317 if (aconnector == NULL) {
e7b07cee
HW
2318 DRM_ERROR("aconnector is NULL!\n");
2319 goto drm_connector_null;
2320 }
2321
b830ebc9 2322 if (dm_state == NULL) {
e7b07cee
HW
2323 DRM_ERROR("dm_state is NULL!\n");
2324 goto dm_state_null;
2325 }
4562236b 2326
e7b07cee 2327 drm_connector = &aconnector->base;
2e0ac3d6
HW
2328
2329 if (!aconnector->dc_sink)
2330 create_fake_sink(aconnector);
2331
e7b07cee 2332 stream = dc_create_stream_for_sink(aconnector->dc_sink);
4562236b 2333
b830ebc9 2334 if (stream == NULL) {
e7b07cee
HW
2335 DRM_ERROR("Failed to create stream for sink!\n");
2336 goto stream_create_fail;
2337 }
2338
2339 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2340 /* Search for preferred mode */
2341 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2342 native_mode_found = true;
2343 break;
2344 }
2345 }
2346 if (!native_mode_found)
2347 preferred_mode = list_first_entry_or_null(
2348 &aconnector->base.modes,
2349 struct drm_display_mode,
2350 head);
2351
b830ebc9 2352 if (preferred_mode == NULL) {
e7b07cee
HW
2353 /* This may not be an error, the use case is when we we have no
2354 * usermode calls to reset and set mode upon hotplug. In this
2355 * case, we call set mode ourselves to restore the previous mode
2356 * and the modelist may not be filled in in time.
2357 */
2358 DRM_INFO("No preferred mode found\n");
2359 } else {
2360 decide_crtc_timing_for_drm_display_mode(
2361 &mode, preferred_mode,
2362 dm_state->scaling != RMX_OFF);
2363 }
2364
2365 fill_stream_properties_from_drm_display_mode(stream,
2366 &mode, &aconnector->base);
2367 update_stream_scaling_settings(&mode, dm_state, stream);
2368
2369 fill_audio_info(
2370 &stream->audio_info,
2371 drm_connector,
2372 aconnector->dc_sink);
2373
2374stream_create_fail:
2375dm_state_null:
2376drm_connector_null:
2377 return stream;
2378}
2379
2380void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2381{
2382 drm_crtc_cleanup(crtc);
2383 kfree(crtc);
2384}
2385
2386static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2387 struct drm_crtc_state *state)
2388{
2389 struct dm_crtc_state *cur = to_dm_crtc_state(state);
2390
2391 /* TODO Destroy dc_stream objects are stream object is flattened */
2392 if (cur->stream)
2393 dc_stream_release(cur->stream);
2394
2395
2396 __drm_atomic_helper_crtc_destroy_state(state);
2397
2398
2399 kfree(state);
2400}
2401
2402static void dm_crtc_reset_state(struct drm_crtc *crtc)
2403{
2404 struct dm_crtc_state *state;
2405
2406 if (crtc->state)
2407 dm_crtc_destroy_state(crtc, crtc->state);
2408
2409 state = kzalloc(sizeof(*state), GFP_KERNEL);
2410 if (WARN_ON(!state))
2411 return;
2412
2413 crtc->state = &state->base;
2414 crtc->state->crtc = crtc;
2415
2416}
2417
2418static struct drm_crtc_state *
2419dm_crtc_duplicate_state(struct drm_crtc *crtc)
2420{
2421 struct dm_crtc_state *state, *cur;
2422
2423 cur = to_dm_crtc_state(crtc->state);
2424
2425 if (WARN_ON(!crtc->state))
2426 return NULL;
2427
2428 state = dm_alloc(sizeof(*state));
2429
2430 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2431
2432 if (cur->stream) {
2433 state->stream = cur->stream;
2434 dc_stream_retain(state->stream);
2435 }
2436
2437 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2438
2439 return &state->base;
2440}
2441
2442/* Implemented only the options currently availible for the driver */
2443static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2444 .reset = dm_crtc_reset_state,
2445 .destroy = amdgpu_dm_crtc_destroy,
2446 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2447 .set_config = drm_atomic_helper_set_config,
2448 .page_flip = drm_atomic_helper_page_flip,
2449 .atomic_duplicate_state = dm_crtc_duplicate_state,
2450 .atomic_destroy_state = dm_crtc_destroy_state,
2451};
2452
2453static enum drm_connector_status
2454amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2455{
2456 bool connected;
c84dec2f 2457 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
2458
2459 /* Notes:
2460 * 1. This interface is NOT called in context of HPD irq.
2461 * 2. This interface *is called* in context of user-mode ioctl. Which
2462 * makes it a bad place for *any* MST-related activit. */
2463
2464 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2465 connected = (aconnector->dc_sink != NULL);
2466 else
2467 connected = (aconnector->base.force == DRM_FORCE_ON);
2468
2469 return (connected ? connector_status_connected :
2470 connector_status_disconnected);
2471}
2472
2473int amdgpu_dm_connector_atomic_set_property(
2474 struct drm_connector *connector,
2475 struct drm_connector_state *connector_state,
2476 struct drm_property *property,
2477 uint64_t val)
2478{
2479 struct drm_device *dev = connector->dev;
2480 struct amdgpu_device *adev = dev->dev_private;
2481 struct dm_connector_state *dm_old_state =
2482 to_dm_connector_state(connector->state);
2483 struct dm_connector_state *dm_new_state =
2484 to_dm_connector_state(connector_state);
2485
2486 int ret = -EINVAL;
2487
2488 if (property == dev->mode_config.scaling_mode_property) {
2489 enum amdgpu_rmx_type rmx_type;
2490
2491 switch (val) {
2492 case DRM_MODE_SCALE_CENTER:
2493 rmx_type = RMX_CENTER;
2494 break;
2495 case DRM_MODE_SCALE_ASPECT:
2496 rmx_type = RMX_ASPECT;
2497 break;
2498 case DRM_MODE_SCALE_FULLSCREEN:
2499 rmx_type = RMX_FULL;
2500 break;
2501 case DRM_MODE_SCALE_NONE:
2502 default:
2503 rmx_type = RMX_OFF;
2504 break;
2505 }
2506
2507 if (dm_old_state->scaling == rmx_type)
2508 return 0;
2509
2510 dm_new_state->scaling = rmx_type;
2511 ret = 0;
2512 } else if (property == adev->mode_info.underscan_hborder_property) {
2513 dm_new_state->underscan_hborder = val;
2514 ret = 0;
2515 } else if (property == adev->mode_info.underscan_vborder_property) {
2516 dm_new_state->underscan_vborder = val;
2517 ret = 0;
2518 } else if (property == adev->mode_info.underscan_property) {
2519 dm_new_state->underscan_enable = val;
2520 ret = 0;
2521 }
2522
2523 return ret;
2524}
2525
2526int amdgpu_dm_connector_atomic_get_property(
2527 struct drm_connector *connector,
2528 const struct drm_connector_state *state,
2529 struct drm_property *property,
2530 uint64_t *val)
2531{
2532 struct drm_device *dev = connector->dev;
2533 struct amdgpu_device *adev = dev->dev_private;
2534 struct dm_connector_state *dm_state =
2535 to_dm_connector_state(state);
2536 int ret = -EINVAL;
2537
2538 if (property == dev->mode_config.scaling_mode_property) {
2539 switch (dm_state->scaling) {
2540 case RMX_CENTER:
2541 *val = DRM_MODE_SCALE_CENTER;
2542 break;
2543 case RMX_ASPECT:
2544 *val = DRM_MODE_SCALE_ASPECT;
2545 break;
2546 case RMX_FULL:
2547 *val = DRM_MODE_SCALE_FULLSCREEN;
2548 break;
2549 case RMX_OFF:
2550 default:
2551 *val = DRM_MODE_SCALE_NONE;
2552 break;
2553 }
2554 ret = 0;
2555 } else if (property == adev->mode_info.underscan_hborder_property) {
2556 *val = dm_state->underscan_hborder;
2557 ret = 0;
2558 } else if (property == adev->mode_info.underscan_vborder_property) {
2559 *val = dm_state->underscan_vborder;
2560 ret = 0;
2561 } else if (property == adev->mode_info.underscan_property) {
2562 *val = dm_state->underscan_enable;
2563 ret = 0;
2564 }
2565 return ret;
2566}
2567
2568void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2569{
c84dec2f 2570 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
2571 const struct dc_link *link = aconnector->dc_link;
2572 struct amdgpu_device *adev = connector->dev->dev_private;
2573 struct amdgpu_display_manager *dm = &adev->dm;
2574#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2575 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2576
2577 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2578 amdgpu_dm_register_backlight_device(dm);
2579
2580 if (dm->backlight_dev) {
2581 backlight_device_unregister(dm->backlight_dev);
2582 dm->backlight_dev = NULL;
2583 }
2584
2585 }
2586#endif
2587 drm_connector_unregister(connector);
2588 drm_connector_cleanup(connector);
2589 kfree(connector);
2590}
2591
2592void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2593{
2594 struct dm_connector_state *state =
2595 to_dm_connector_state(connector->state);
2596
2597 kfree(state);
2598
2599 state = kzalloc(sizeof(*state), GFP_KERNEL);
2600
2601 if (state) {
2602 state->scaling = RMX_OFF;
2603 state->underscan_enable = false;
2604 state->underscan_hborder = 0;
2605 state->underscan_vborder = 0;
2606
2607 connector->state = &state->base;
2608 connector->state->connector = connector;
2609 }
2610}
2611
2612struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
2613 struct drm_connector *connector)
2614{
2615 struct dm_connector_state *state =
2616 to_dm_connector_state(connector->state);
2617
2618 struct dm_connector_state *new_state =
2619 kmemdup(state, sizeof(*state), GFP_KERNEL);
2620
2621 if (new_state) {
2622 __drm_atomic_helper_connector_duplicate_state(connector,
1ecfc3da 2623 &new_state->base);
e7b07cee
HW
2624 return &new_state->base;
2625 }
2626
2627 return NULL;
2628}
2629
2630static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2631 .reset = amdgpu_dm_connector_funcs_reset,
2632 .detect = amdgpu_dm_connector_detect,
2633 .fill_modes = drm_helper_probe_single_connector_modes,
2634 .destroy = amdgpu_dm_connector_destroy,
2635 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2636 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2637 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2638 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2639};
2640
2641static struct drm_encoder *best_encoder(struct drm_connector *connector)
2642{
2643 int enc_id = connector->encoder_ids[0];
2644 struct drm_mode_object *obj;
2645 struct drm_encoder *encoder;
2646
2647 DRM_DEBUG_KMS("Finding the best encoder\n");
2648
2649 /* pick the encoder ids */
2650 if (enc_id) {
2651 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
2652 if (!obj) {
2653 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2654 return NULL;
2655 }
2656 encoder = obj_to_encoder(obj);
2657 return encoder;
2658 }
2659 DRM_ERROR("No encoder id\n");
2660 return NULL;
2661}
2662
2663static int get_modes(struct drm_connector *connector)
2664{
2665 return amdgpu_dm_connector_get_modes(connector);
2666}
2667
c84dec2f 2668static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
2669{
2670 struct dc_sink_init_data init_params = {
2671 .link = aconnector->dc_link,
2672 .sink_signal = SIGNAL_TYPE_VIRTUAL
2673 };
2674 struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2675
2676 if (!aconnector->base.edid_blob_ptr ||
2677 !aconnector->base.edid_blob_ptr->data) {
2678 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2679 aconnector->base.name);
2680
2681 aconnector->base.force = DRM_FORCE_OFF;
2682 aconnector->base.override_edid = false;
2683 return;
2684 }
2685
2686 aconnector->edid = edid;
2687
2688 aconnector->dc_em_sink = dc_link_add_remote_sink(
2689 aconnector->dc_link,
2690 (uint8_t *)edid,
2691 (edid->extensions + 1) * EDID_LENGTH,
2692 &init_params);
2693
2694 if (aconnector->base.force
2695 == DRM_FORCE_ON)
2696 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2697 aconnector->dc_link->local_sink :
2698 aconnector->dc_em_sink;
2699}
2700
c84dec2f 2701static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
2702{
2703 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2704
2705 /* In case of headless boot with force on for DP managed connector
2706 * Those settings have to be != 0 to get initial modeset
2707 */
2708 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2709 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2710 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2711 }
2712
2713
2714 aconnector->base.override_edid = true;
2715 create_eml_sink(aconnector);
2716}
2717
2718int amdgpu_dm_connector_mode_valid(
2719 struct drm_connector *connector,
2720 struct drm_display_mode *mode)
2721{
2722 int result = MODE_ERROR;
2723 struct dc_sink *dc_sink;
2724 struct amdgpu_device *adev = connector->dev->dev_private;
2725 /* TODO: Unhardcode stream count */
0971c40e 2726 struct dc_stream_state *stream;
c84dec2f 2727 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
2728
2729 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2730 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2731 return result;
2732
2733 /* Only run this the first time mode_valid is called to initilialize
2734 * EDID mgmt
2735 */
2736 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2737 !aconnector->dc_em_sink)
2738 handle_edid_mgmt(aconnector);
2739
c84dec2f 2740 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 2741
b830ebc9 2742 if (dc_sink == NULL) {
e7b07cee
HW
2743 DRM_ERROR("dc_sink is NULL!\n");
2744 goto fail;
2745 }
2746
2747 stream = dc_create_stream_for_sink(dc_sink);
b830ebc9 2748 if (stream == NULL) {
e7b07cee
HW
2749 DRM_ERROR("Failed to create stream for sink!\n");
2750 goto fail;
2751 }
2752
2753 drm_mode_set_crtcinfo(mode, 0);
2754 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2755
2756 stream->src.width = mode->hdisplay;
2757 stream->src.height = mode->vdisplay;
2758 stream->dst = stream->src;
2759
2760 if (dc_validate_stream(adev->dm.dc, stream))
2761 result = MODE_OK;
2762
2763 dc_stream_release(stream);
2764
2765fail:
2766 /* TODO: error handling*/
2767 return result;
2768}
2769
2770static const struct drm_connector_helper_funcs
2771amdgpu_dm_connector_helper_funcs = {
2772 /*
b830ebc9
HW
2773 * If hotplug a second bigger display in FB Con mode, bigger resolution
2774 * modes will be filtered by drm_mode_validate_size(), and those modes
2775 * is missing after user start lightdm. So we need to renew modes list.
2776 * in get_modes call back, not just return the modes count
2777 */
e7b07cee
HW
2778 .get_modes = get_modes,
2779 .mode_valid = amdgpu_dm_connector_mode_valid,
2780 .best_encoder = best_encoder
2781};
2782
2783static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2784{
2785}
2786
2787static int dm_crtc_helper_atomic_check(
2788 struct drm_crtc *crtc,
2789 struct drm_crtc_state *state)
2790{
2791 struct amdgpu_device *adev = crtc->dev->dev_private;
2792 struct dc *dc = adev->dm.dc;
2793 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2794 int ret = -EINVAL;
2795
9b690ef3
BL
2796 if (unlikely(!dm_crtc_state->stream &&
2797 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
2798 WARN_ON(1);
2799 return ret;
2800 }
2801
2802 /* In some use cases, like reset, no stream is attached */
2803 if (!dm_crtc_state->stream)
2804 return 0;
2805
2806 if (dc_validate_stream(dc, dm_crtc_state->stream))
2807 return 0;
2808
2809 return ret;
2810}
2811
2812static bool dm_crtc_helper_mode_fixup(
2813 struct drm_crtc *crtc,
2814 const struct drm_display_mode *mode,
2815 struct drm_display_mode *adjusted_mode)
2816{
2817 return true;
2818}
2819
2820static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2821 .disable = dm_crtc_helper_disable,
2822 .atomic_check = dm_crtc_helper_atomic_check,
2823 .mode_fixup = dm_crtc_helper_mode_fixup
2824};
2825
2826static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2827{
2828
2829}
2830
2831static int dm_encoder_helper_atomic_check(
2832 struct drm_encoder *encoder,
2833 struct drm_crtc_state *crtc_state,
2834 struct drm_connector_state *conn_state)
2835{
2836 return 0;
2837}
2838
2839const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2840 .disable = dm_encoder_helper_disable,
2841 .atomic_check = dm_encoder_helper_atomic_check
2842};
2843
2844static void dm_drm_plane_reset(struct drm_plane *plane)
2845{
2846 struct dm_plane_state *amdgpu_state = NULL;
2847
2848 if (plane->state)
2849 plane->funcs->atomic_destroy_state(plane, plane->state);
2850
2851 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2852
2853 if (amdgpu_state) {
2854 plane->state = &amdgpu_state->base;
2855 plane->state->plane = plane;
2856 plane->state->rotation = DRM_MODE_ROTATE_0;
2857 } else
2858 WARN_ON(1);
2859}
2860
2861static struct drm_plane_state *
2862dm_drm_plane_duplicate_state(struct drm_plane *plane)
2863{
2864 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2865
2866 old_dm_plane_state = to_dm_plane_state(plane->state);
2867 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2868 if (!dm_plane_state)
2869 return NULL;
2870
2871 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2872
3be5262e
HW
2873 if (old_dm_plane_state->dc_state) {
2874 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2875 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
2876 }
2877
2878 return &dm_plane_state->base;
2879}
2880
2881void dm_drm_plane_destroy_state(struct drm_plane *plane,
2882 struct drm_plane_state *state)
2883{
2884 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2885
3be5262e
HW
2886 if (dm_plane_state->dc_state)
2887 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 2888
0627bbd3 2889 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
2890}
2891
2892static const struct drm_plane_funcs dm_plane_funcs = {
2893 .update_plane = drm_atomic_helper_update_plane,
2894 .disable_plane = drm_atomic_helper_disable_plane,
2895 .destroy = drm_plane_cleanup,
2896 .reset = dm_drm_plane_reset,
2897 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2898 .atomic_destroy_state = dm_drm_plane_destroy_state,
2899};
2900
2901static int dm_plane_helper_prepare_fb(
2902 struct drm_plane *plane,
2903 struct drm_plane_state *new_state)
2904{
2905 struct amdgpu_framebuffer *afb;
2906 struct drm_gem_object *obj;
2907 struct amdgpu_bo *rbo;
2908 int r;
2909 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2910 unsigned int awidth;
2911
2912 dm_plane_state_old = to_dm_plane_state(plane->state);
2913 dm_plane_state_new = to_dm_plane_state(new_state);
2914
2915 if (!new_state->fb) {
2916 DRM_DEBUG_KMS("No FB bound\n");
2917 return 0;
2918 }
2919
2920 afb = to_amdgpu_framebuffer(new_state->fb);
2921
2922 obj = afb->obj;
2923 rbo = gem_to_amdgpu_bo(obj);
2924 r = amdgpu_bo_reserve(rbo, false);
2925 if (unlikely(r != 0))
2926 return r;
2927
2928 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2929
2930
2931 amdgpu_bo_unreserve(rbo);
2932
2933 if (unlikely(r != 0)) {
2934 DRM_ERROR("Failed to pin framebuffer\n");
2935 return r;
2936 }
2937
2938 amdgpu_bo_ref(rbo);
2939
3be5262e
HW
2940 if (dm_plane_state_new->dc_state &&
2941 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2942 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 2943
3be5262e
HW
2944 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2945 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2946 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
e7b07cee
HW
2947 } else {
2948 awidth = ALIGN(new_state->fb->width, 64);
3be5262e 2949 plane_state->address.video_progressive.luma_addr.low_part
e7b07cee 2950 = lower_32_bits(afb->address);
3be5262e 2951 plane_state->address.video_progressive.chroma_addr.low_part
e7b07cee
HW
2952 = lower_32_bits(afb->address) +
2953 (awidth * new_state->fb->height);
2954 }
2955 }
2956
2957 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2958 * prepare and cleanup in drm_atomic_helper_prepare_planes
2959 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2960 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2961 * code touching fram buffers should be avoided for DC.
2962 */
2963 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
2964 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
2965
2966 acrtc->cursor_bo = obj;
2967 }
2968 return 0;
2969}
2970
2971static void dm_plane_helper_cleanup_fb(
2972 struct drm_plane *plane,
2973 struct drm_plane_state *old_state)
2974{
2975 struct amdgpu_bo *rbo;
2976 struct amdgpu_framebuffer *afb;
2977 int r;
2978
2979 if (!old_state->fb)
2980 return;
2981
2982 afb = to_amdgpu_framebuffer(old_state->fb);
2983 rbo = gem_to_amdgpu_bo(afb->obj);
2984 r = amdgpu_bo_reserve(rbo, false);
2985 if (unlikely(r)) {
2986 DRM_ERROR("failed to reserve rbo before unpin\n");
2987 return;
b830ebc9
HW
2988 }
2989
2990 amdgpu_bo_unpin(rbo);
2991 amdgpu_bo_unreserve(rbo);
2992 amdgpu_bo_unref(&rbo);
e7b07cee
HW
2993}
2994
2995int dm_create_validation_set_for_connector(struct drm_connector *connector,
2996 struct drm_display_mode *mode, struct dc_validation_set *val_set)
2997{
2998 int result = MODE_ERROR;
2999 struct dc_sink *dc_sink =
c84dec2f 3000 to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 3001 /* TODO: Unhardcode stream count */
0971c40e 3002 struct dc_stream_state *stream;
e7b07cee
HW
3003
3004 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3005 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3006 return result;
3007
b830ebc9 3008 if (dc_sink == NULL) {
e7b07cee
HW
3009 DRM_ERROR("dc_sink is NULL!\n");
3010 return result;
3011 }
3012
3013 stream = dc_create_stream_for_sink(dc_sink);
3014
b830ebc9 3015 if (stream == NULL) {
e7b07cee
HW
3016 DRM_ERROR("Failed to create stream for sink!\n");
3017 return result;
3018 }
3019
3020 drm_mode_set_crtcinfo(mode, 0);
3021
3022 fill_stream_properties_from_drm_display_mode(stream, mode, connector);
3023
3024 val_set->stream = stream;
3025
3026 stream->src.width = mode->hdisplay;
3027 stream->src.height = mode->vdisplay;
3028 stream->dst = stream->src;
3029
3030 return MODE_OK;
3031}
3032
cbd19488
AG
3033int dm_plane_atomic_check(struct drm_plane *plane,
3034 struct drm_plane_state *state)
3035{
3036 struct amdgpu_device *adev = plane->dev->dev_private;
3037 struct dc *dc = adev->dm.dc;
3038 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3039
3be5262e 3040 if (!dm_plane_state->dc_state)
9a3329b1 3041 return 0;
cbd19488 3042
3be5262e 3043 if (dc_validate_plane(dc, dm_plane_state->dc_state))
cbd19488
AG
3044 return 0;
3045
3046 return -EINVAL;
3047}
3048
e7b07cee
HW
3049static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3050 .prepare_fb = dm_plane_helper_prepare_fb,
3051 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 3052 .atomic_check = dm_plane_atomic_check,
e7b07cee
HW
3053};
3054
3055/*
3056 * TODO: these are currently initialized to rgb formats only.
3057 * For future use cases we should either initialize them dynamically based on
3058 * plane capabilities, or initialize this array to all formats, so internal drm
3059 * check will succeed, and let DC to implement proper check
3060 */
3061static uint32_t rgb_formats[] = {
3062 DRM_FORMAT_RGB888,
3063 DRM_FORMAT_XRGB8888,
3064 DRM_FORMAT_ARGB8888,
3065 DRM_FORMAT_RGBA8888,
3066 DRM_FORMAT_XRGB2101010,
3067 DRM_FORMAT_XBGR2101010,
3068 DRM_FORMAT_ARGB2101010,
3069 DRM_FORMAT_ABGR2101010,
3070};
3071
3072static uint32_t yuv_formats[] = {
3073 DRM_FORMAT_NV12,
3074 DRM_FORMAT_NV21,
3075};
3076
3077static const u32 cursor_formats[] = {
3078 DRM_FORMAT_ARGB8888
3079};
3080
3081int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3082 struct amdgpu_plane *aplane,
3083 unsigned long possible_crtcs)
3084{
3085 int res = -EPERM;
3086
3087 switch (aplane->base.type) {
3088 case DRM_PLANE_TYPE_PRIMARY:
3089 aplane->base.format_default = true;
3090
3091 res = drm_universal_plane_init(
3092 dm->adev->ddev,
3093 &aplane->base,
3094 possible_crtcs,
3095 &dm_plane_funcs,
3096 rgb_formats,
3097 ARRAY_SIZE(rgb_formats),
3098 NULL, aplane->base.type, NULL);
3099 break;
3100 case DRM_PLANE_TYPE_OVERLAY:
3101 res = drm_universal_plane_init(
3102 dm->adev->ddev,
3103 &aplane->base,
3104 possible_crtcs,
3105 &dm_plane_funcs,
3106 yuv_formats,
3107 ARRAY_SIZE(yuv_formats),
3108 NULL, aplane->base.type, NULL);
3109 break;
3110 case DRM_PLANE_TYPE_CURSOR:
3111 res = drm_universal_plane_init(
3112 dm->adev->ddev,
3113 &aplane->base,
3114 possible_crtcs,
3115 &dm_plane_funcs,
3116 cursor_formats,
3117 ARRAY_SIZE(cursor_formats),
3118 NULL, aplane->base.type, NULL);
3119 break;
3120 }
3121
3122 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3123
3124 return res;
3125}
3126
3127int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3128 struct drm_plane *plane,
3129 uint32_t crtc_index)
3130{
3131 struct amdgpu_crtc *acrtc = NULL;
3132 struct amdgpu_plane *cursor_plane;
3133
3134 int res = -ENOMEM;
3135
3136 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3137 if (!cursor_plane)
3138 goto fail;
3139
3140 cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3141 res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3142
3143 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3144 if (!acrtc)
3145 goto fail;
3146
3147 res = drm_crtc_init_with_planes(
3148 dm->ddev,
3149 &acrtc->base,
3150 plane,
3151 &cursor_plane->base,
3152 &amdgpu_dm_crtc_funcs, NULL);
3153
3154 if (res)
3155 goto fail;
3156
3157 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3158
3159 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3160 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3161
3162 acrtc->crtc_id = crtc_index;
3163 acrtc->base.enabled = false;
3164
3165 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3166 drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3167
3168 return 0;
3169
3170fail:
b830ebc9
HW
3171 kfree(acrtc);
3172 kfree(cursor_plane);
e7b07cee
HW
3173 acrtc->crtc_id = -1;
3174 return res;
3175}
3176
3177
3178static int to_drm_connector_type(enum signal_type st)
3179{
3180 switch (st) {
3181 case SIGNAL_TYPE_HDMI_TYPE_A:
3182 return DRM_MODE_CONNECTOR_HDMIA;
3183 case SIGNAL_TYPE_EDP:
3184 return DRM_MODE_CONNECTOR_eDP;
3185 case SIGNAL_TYPE_RGB:
3186 return DRM_MODE_CONNECTOR_VGA;
3187 case SIGNAL_TYPE_DISPLAY_PORT:
3188 case SIGNAL_TYPE_DISPLAY_PORT_MST:
3189 return DRM_MODE_CONNECTOR_DisplayPort;
3190 case SIGNAL_TYPE_DVI_DUAL_LINK:
3191 case SIGNAL_TYPE_DVI_SINGLE_LINK:
3192 return DRM_MODE_CONNECTOR_DVID;
3193 case SIGNAL_TYPE_VIRTUAL:
3194 return DRM_MODE_CONNECTOR_VIRTUAL;
3195
3196 default:
3197 return DRM_MODE_CONNECTOR_Unknown;
3198 }
3199}
3200
3201static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3202{
3203 const struct drm_connector_helper_funcs *helper =
3204 connector->helper_private;
3205 struct drm_encoder *encoder;
3206 struct amdgpu_encoder *amdgpu_encoder;
3207
3208 encoder = helper->best_encoder(connector);
3209
3210 if (encoder == NULL)
3211 return;
3212
3213 amdgpu_encoder = to_amdgpu_encoder(encoder);
3214
3215 amdgpu_encoder->native_mode.clock = 0;
3216
3217 if (!list_empty(&connector->probed_modes)) {
3218 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 3219
e7b07cee 3220 list_for_each_entry(preferred_mode,
b830ebc9
HW
3221 &connector->probed_modes,
3222 head) {
3223 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3224 amdgpu_encoder->native_mode = *preferred_mode;
3225
e7b07cee
HW
3226 break;
3227 }
3228
3229 }
3230}
3231
3232static struct drm_display_mode *amdgpu_dm_create_common_mode(
3233 struct drm_encoder *encoder, char *name,
3234 int hdisplay, int vdisplay)
3235{
3236 struct drm_device *dev = encoder->dev;
3237 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3238 struct drm_display_mode *mode = NULL;
3239 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3240
3241 mode = drm_mode_duplicate(dev, native_mode);
3242
b830ebc9 3243 if (mode == NULL)
e7b07cee
HW
3244 return NULL;
3245
3246 mode->hdisplay = hdisplay;
3247 mode->vdisplay = vdisplay;
3248 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3249 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3250
3251 return mode;
3252
3253}
3254
3255static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3256 struct drm_connector *connector)
3257{
3258 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3259 struct drm_display_mode *mode = NULL;
3260 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
3261 struct amdgpu_dm_connector *amdgpu_dm_connector =
3262 to_amdgpu_dm_connector(connector);
e7b07cee
HW
3263 int i;
3264 int n;
3265 struct mode_size {
3266 char name[DRM_DISPLAY_MODE_LEN];
3267 int w;
3268 int h;
b830ebc9 3269 } common_modes[] = {
e7b07cee
HW
3270 { "640x480", 640, 480},
3271 { "800x600", 800, 600},
3272 { "1024x768", 1024, 768},
3273 { "1280x720", 1280, 720},
3274 { "1280x800", 1280, 800},
3275 {"1280x1024", 1280, 1024},
3276 { "1440x900", 1440, 900},
3277 {"1680x1050", 1680, 1050},
3278 {"1600x1200", 1600, 1200},
3279 {"1920x1080", 1920, 1080},
3280 {"1920x1200", 1920, 1200}
3281 };
3282
b830ebc9 3283 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
3284
3285 for (i = 0; i < n; i++) {
3286 struct drm_display_mode *curmode = NULL;
3287 bool mode_existed = false;
3288
3289 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
3290 common_modes[i].h > native_mode->vdisplay ||
3291 (common_modes[i].w == native_mode->hdisplay &&
3292 common_modes[i].h == native_mode->vdisplay))
3293 continue;
e7b07cee
HW
3294
3295 list_for_each_entry(curmode, &connector->probed_modes, head) {
3296 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 3297 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
3298 mode_existed = true;
3299 break;
3300 }
3301 }
3302
3303 if (mode_existed)
3304 continue;
3305
3306 mode = amdgpu_dm_create_common_mode(encoder,
3307 common_modes[i].name, common_modes[i].w,
3308 common_modes[i].h);
3309 drm_mode_probed_add(connector, mode);
c84dec2f 3310 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
3311 }
3312}
3313
3314static void amdgpu_dm_connector_ddc_get_modes(
3315 struct drm_connector *connector,
3316 struct edid *edid)
3317{
c84dec2f
HW
3318 struct amdgpu_dm_connector *amdgpu_dm_connector =
3319 to_amdgpu_dm_connector(connector);
e7b07cee
HW
3320
3321 if (edid) {
3322 /* empty probed_modes */
3323 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 3324 amdgpu_dm_connector->num_modes =
e7b07cee
HW
3325 drm_add_edid_modes(connector, edid);
3326
3327 drm_edid_to_eld(connector, edid);
3328
3329 amdgpu_dm_get_native_mode(connector);
3330 } else
c84dec2f 3331 amdgpu_dm_connector->num_modes = 0;
e7b07cee
HW
3332}
3333
3334int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3335{
3336 const struct drm_connector_helper_funcs *helper =
3337 connector->helper_private;
c84dec2f
HW
3338 struct amdgpu_dm_connector *amdgpu_dm_connector =
3339 to_amdgpu_dm_connector(connector);
e7b07cee 3340 struct drm_encoder *encoder;
c84dec2f 3341 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee
HW
3342
3343 encoder = helper->best_encoder(connector);
3344
3345 amdgpu_dm_connector_ddc_get_modes(connector, edid);
3346 amdgpu_dm_connector_add_common_modes(encoder, connector);
c84dec2f 3347 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
3348}
3349
3350void amdgpu_dm_connector_init_helper(
3351 struct amdgpu_display_manager *dm,
c84dec2f 3352 struct amdgpu_dm_connector *aconnector,
e7b07cee
HW
3353 int connector_type,
3354 struct dc_link *link,
3355 int link_index)
3356{
3357 struct amdgpu_device *adev = dm->ddev->dev_private;
3358
3359 aconnector->connector_id = link_index;
3360 aconnector->dc_link = link;
3361 aconnector->base.interlace_allowed = false;
3362 aconnector->base.doublescan_allowed = false;
3363 aconnector->base.stereo_allowed = false;
3364 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3365 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3366
3367 mutex_init(&aconnector->hpd_lock);
3368
b830ebc9
HW
3369 /* configure support HPD hot plug connector_>polled default value is 0
3370 * which means HPD hot plug not supported
3371 */
e7b07cee
HW
3372 switch (connector_type) {
3373 case DRM_MODE_CONNECTOR_HDMIA:
3374 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3375 break;
3376 case DRM_MODE_CONNECTOR_DisplayPort:
3377 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3378 break;
3379 case DRM_MODE_CONNECTOR_DVID:
3380 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3381 break;
3382 default:
3383 break;
3384 }
3385
3386 drm_object_attach_property(&aconnector->base.base,
3387 dm->ddev->mode_config.scaling_mode_property,
3388 DRM_MODE_SCALE_NONE);
3389
3390 drm_object_attach_property(&aconnector->base.base,
3391 adev->mode_info.underscan_property,
3392 UNDERSCAN_OFF);
3393 drm_object_attach_property(&aconnector->base.base,
3394 adev->mode_info.underscan_hborder_property,
3395 0);
3396 drm_object_attach_property(&aconnector->base.base,
3397 adev->mode_info.underscan_vborder_property,
3398 0);
3399
3400}
3401
3402int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3403 struct i2c_msg *msgs, int num)
3404{
3405 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3406 struct ddc_service *ddc_service = i2c->ddc_service;
3407 struct i2c_command cmd;
3408 int i;
3409 int result = -EIO;
3410
b830ebc9 3411 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
3412
3413 if (!cmd.payloads)
3414 return result;
3415
3416 cmd.number_of_payloads = num;
3417 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3418 cmd.speed = 100;
3419
3420 for (i = 0; i < num; i++) {
3421 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3422 cmd.payloads[i].address = msgs[i].addr;
3423 cmd.payloads[i].length = msgs[i].len;
3424 cmd.payloads[i].data = msgs[i].buf;
3425 }
3426
3427 if (dal_i2caux_submit_i2c_command(
3428 ddc_service->ctx->i2caux,
3429 ddc_service->ddc_pin,
3430 &cmd))
3431 result = num;
3432
3433 kfree(cmd.payloads);
3434 return result;
3435}
3436
3437u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3438{
3439 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3440}
3441
3442static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3443 .master_xfer = amdgpu_dm_i2c_xfer,
3444 .functionality = amdgpu_dm_i2c_func,
3445};
3446
3447static struct amdgpu_i2c_adapter *create_i2c(
3448 struct ddc_service *ddc_service,
3449 int link_index,
3450 int *res)
3451{
3452 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3453 struct amdgpu_i2c_adapter *i2c;
3454
b830ebc9 3455 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
e7b07cee
HW
3456 i2c->base.owner = THIS_MODULE;
3457 i2c->base.class = I2C_CLASS_DDC;
3458 i2c->base.dev.parent = &adev->pdev->dev;
3459 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 3460 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
3461 i2c_set_adapdata(&i2c->base, i2c);
3462 i2c->ddc_service = ddc_service;
3463
3464 return i2c;
3465}
3466
3467/* Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
3468 * dc_link which will be represented by this aconnector.
3469 */
e7b07cee
HW
3470int amdgpu_dm_connector_init(
3471 struct amdgpu_display_manager *dm,
c84dec2f 3472 struct amdgpu_dm_connector *aconnector,
e7b07cee
HW
3473 uint32_t link_index,
3474 struct amdgpu_encoder *aencoder)
3475{
3476 int res = 0;
3477 int connector_type;
3478 struct dc *dc = dm->dc;
3479 struct dc_link *link = dc_get_link_at_index(dc, link_index);
3480 struct amdgpu_i2c_adapter *i2c;
3481 ((struct dc_link *)link)->priv = aconnector;
3482
3483 DRM_DEBUG_KMS("%s()\n", __func__);
3484
3485 i2c = create_i2c(link->ddc, link->link_index, &res);
3486 aconnector->i2c = i2c;
3487 res = i2c_add_adapter(&i2c->base);
3488
3489 if (res) {
3490 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3491 goto out_free;
3492 }
3493
3494 connector_type = to_drm_connector_type(link->connector_signal);
3495
3496 res = drm_connector_init(
3497 dm->ddev,
3498 &aconnector->base,
3499 &amdgpu_dm_connector_funcs,
3500 connector_type);
3501
3502 if (res) {
3503 DRM_ERROR("connector_init failed\n");
3504 aconnector->connector_id = -1;
3505 goto out_free;
3506 }
3507
3508 drm_connector_helper_add(
3509 &aconnector->base,
3510 &amdgpu_dm_connector_helper_funcs);
3511
3512 amdgpu_dm_connector_init_helper(
3513 dm,
3514 aconnector,
3515 connector_type,
3516 link,
3517 link_index);
3518
3519 drm_mode_connector_attach_encoder(
3520 &aconnector->base, &aencoder->base);
3521
3522 drm_connector_register(&aconnector->base);
3523
3524 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3525 || connector_type == DRM_MODE_CONNECTOR_eDP)
3526 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3527
3528#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3529 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3530
3531 /* NOTE: this currently will create backlight device even if a panel
3532 * is not connected to the eDP/LVDS connector.
3533 *
3534 * This is less than ideal but we don't have sink information at this
3535 * stage since detection happens after. We can't do detection earlier
3536 * since MST detection needs connectors to be created first.
3537 */
3538 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3539 /* Event if registration failed, we should continue with
3540 * DM initialization because not having a backlight control
b830ebc9
HW
3541 * is better then a black screen.
3542 */
e7b07cee
HW
3543 amdgpu_dm_register_backlight_device(dm);
3544
3545 if (dm->backlight_dev)
3546 dm->backlight_link = link;
3547 }
3548#endif
3549
3550out_free:
3551 if (res) {
3552 kfree(i2c);
3553 aconnector->i2c = NULL;
3554 }
3555 return res;
3556}
3557
3558int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3559{
3560 switch (adev->mode_info.num_crtc) {
3561 case 1:
3562 return 0x1;
3563 case 2:
3564 return 0x3;
3565 case 3:
3566 return 0x7;
3567 case 4:
3568 return 0xf;
3569 case 5:
3570 return 0x1f;
3571 case 6:
3572 default:
3573 return 0x3f;
3574 }
3575}
3576
3577int amdgpu_dm_encoder_init(
3578 struct drm_device *dev,
3579 struct amdgpu_encoder *aencoder,
3580 uint32_t link_index)
3581{
3582 struct amdgpu_device *adev = dev->dev_private;
3583
3584 int res = drm_encoder_init(dev,
3585 &aencoder->base,
3586 &amdgpu_dm_encoder_funcs,
3587 DRM_MODE_ENCODER_TMDS,
3588 NULL);
3589
3590 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3591
3592 if (!res)
3593 aencoder->encoder_id = link_index;
3594 else
3595 aencoder->encoder_id = -1;
3596
3597 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3598
3599 return res;
3600}
3601
3602static void manage_dm_interrupts(
3603 struct amdgpu_device *adev,
3604 struct amdgpu_crtc *acrtc,
3605 bool enable)
3606{
3607 /*
3608 * this is not correct translation but will work as soon as VBLANK
3609 * constant is the same as PFLIP
3610 */
3611 int irq_type =
3612 amdgpu_crtc_idx_to_irq_type(
3613 adev,
3614 acrtc->crtc_id);
3615
3616 if (enable) {
3617 drm_crtc_vblank_on(&acrtc->base);
3618 amdgpu_irq_get(
3619 adev,
3620 &adev->pageflip_irq,
3621 irq_type);
3622 } else {
3623
3624 amdgpu_irq_put(
3625 adev,
3626 &adev->pageflip_irq,
3627 irq_type);
3628 drm_crtc_vblank_off(&acrtc->base);
3629 }
3630}
3631
3632static bool is_scaling_state_different(
3633 const struct dm_connector_state *dm_state,
3634 const struct dm_connector_state *old_dm_state)
3635{
3636 if (dm_state->scaling != old_dm_state->scaling)
3637 return true;
3638 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3639 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3640 return true;
3641 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3642 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3643 return true;
b830ebc9
HW
3644 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3645 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3646 return true;
e7b07cee
HW
3647 return false;
3648}
3649
3650static void remove_stream(
3651 struct amdgpu_device *adev,
3652 struct amdgpu_crtc *acrtc,
0971c40e 3653 struct dc_stream_state *stream)
e7b07cee
HW
3654{
3655 /* this is the update mode case */
3656 if (adev->dm.freesync_module)
3657 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3658
3659 acrtc->otg_inst = -1;
3660 acrtc->enabled = false;
3661}
3662
2a8f6ccb
HW
3663int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3664 struct dc_cursor_position *position)
3665{
3666 struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3667 int x, y;
3668 int xorigin = 0, yorigin = 0;
3669
3670 if (!crtc || !plane->state->fb) {
3671 position->enable = false;
3672 position->x = 0;
3673 position->y = 0;
3674 return 0;
3675 }
3676
3677 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3678 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3679 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3680 __func__,
3681 plane->state->crtc_w,
3682 plane->state->crtc_h);
3683 return -EINVAL;
3684 }
3685
3686 x = plane->state->crtc_x;
3687 y = plane->state->crtc_y;
3688 /* avivo cursor are offset into the total surface */
3689 x += crtc->primary->state->src_x >> 16;
3690 y += crtc->primary->state->src_y >> 16;
3691 if (x < 0) {
3692 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3693 x = 0;
3694 }
3695 if (y < 0) {
3696 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3697 y = 0;
3698 }
3699 position->enable = true;
3700 position->x = x;
3701 position->y = y;
3702 position->x_hotspot = xorigin;
3703 position->y_hotspot = yorigin;
3704
3705 return 0;
3706}
3707
e7b07cee
HW
3708static void handle_cursor_update(
3709 struct drm_plane *plane,
3710 struct drm_plane_state *old_plane_state)
3711{
2a8f6ccb
HW
3712 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3713 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3714 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3715 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3716 uint64_t address = afb ? afb->address : 0;
3717 struct dc_cursor_position position;
3718 struct dc_cursor_attributes attributes;
3719 int ret;
3720
e7b07cee
HW
3721 if (!plane->state->fb && !old_plane_state->fb)
3722 return;
3723
2a8f6ccb
HW
3724 DRM_DEBUG_KMS("%s: crtc_id=%d with size %d to %d\n",
3725 __func__,
3726 amdgpu_crtc->crtc_id,
3727 plane->state->crtc_w,
3728 plane->state->crtc_h);
3729
3730 ret = get_cursor_position(plane, crtc, &position);
3731 if (ret)
3732 return;
3733
3734 if (!position.enable) {
3735 /* turn off cursor */
3736 if (crtc_state && crtc_state->stream)
3737 dc_stream_set_cursor_position(crtc_state->stream,
3738 &position);
3739 return;
e7b07cee 3740 }
e7b07cee 3741
2a8f6ccb
HW
3742 amdgpu_crtc->cursor_width = plane->state->crtc_w;
3743 amdgpu_crtc->cursor_height = plane->state->crtc_h;
3744
3745 attributes.address.high_part = upper_32_bits(address);
3746 attributes.address.low_part = lower_32_bits(address);
3747 attributes.width = plane->state->crtc_w;
3748 attributes.height = plane->state->crtc_h;
3749 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3750 attributes.rotation_angle = 0;
3751 attributes.attribute_flags.value = 0;
3752
3753 attributes.pitch = attributes.width;
3754
3755 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3756 &attributes))
3757 DRM_ERROR("DC failed to set cursor attributes\n");
3758
3759 if (crtc_state->stream)
3760 if (!dc_stream_set_cursor_position(crtc_state->stream,
3761 &position))
3762 DRM_ERROR("DC failed to set cursor position\n");
3763}
e7b07cee
HW
3764
3765static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3766{
3767
3768 assert_spin_locked(&acrtc->base.dev->event_lock);
3769 WARN_ON(acrtc->event);
3770
3771 acrtc->event = acrtc->base.state->event;
3772
3773 /* Set the flip status */
3774 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3775
3776 /* Mark this event as consumed */
3777 acrtc->base.state->event = NULL;
3778
3779 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3780 acrtc->crtc_id);
3781}
3782
3783/*
3784 * Executes flip
3785 *
3786 * Waits on all BO's fences and for proper vblank count
3787 */
3788static void amdgpu_dm_do_flip(
3789 struct drm_crtc *crtc,
3790 struct drm_framebuffer *fb,
3791 uint32_t target)
3792{
3793 unsigned long flags;
3794 uint32_t target_vblank;
3795 int r, vpos, hpos;
3796 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3797 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3798 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3799 struct amdgpu_device *adev = crtc->dev->dev_private;
aac6a07e 3800 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
e7b07cee 3801 struct dc_flip_addrs addr = { {0} };
3be5262e 3802 /* TODO eliminate or rename surface_update */
e7b07cee
HW
3803 struct dc_surface_update surface_updates[1] = { {0} };
3804 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3805
3806
3807 /* Prepare wait for target vblank early - before the fence-waits */
3808 target_vblank = target - drm_crtc_vblank_count(crtc) +
3809 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3810
b830ebc9 3811 /* TODO This might fail and hence better not used, wait
e7b07cee
HW
3812 * explicitly on fences instead
3813 * and in general should be called for
3814 * blocking commit to as per framework helpers
b830ebc9 3815 */
e7b07cee
HW
3816 r = amdgpu_bo_reserve(abo, true);
3817 if (unlikely(r != 0)) {
3818 DRM_ERROR("failed to reserve buffer before flip\n");
3819 WARN_ON(1);
3820 }
3821
3822 /* Wait for all fences on this FB */
3823 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3824 MAX_SCHEDULE_TIMEOUT) < 0);
3825
3826 amdgpu_bo_unreserve(abo);
3827
3828 /* Wait until we're out of the vertical blank period before the one
3829 * targeted by the flip
3830 */
3831 while ((acrtc->enabled &&
3832 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3833 &vpos, &hpos, NULL, NULL,
3834 &crtc->hwmode)
3835 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3836 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3837 (int)(target_vblank -
3838 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3839 usleep_range(1000, 1100);
3840 }
3841
3842 /* Flip */
3843 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3844 /* update crtc fb */
3845 crtc->primary->fb = fb;
3846
3847 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3848 WARN_ON(!acrtc_state->stream);
3849
3850 addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3851 addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3852 addr.flip_immediate = async_flip;
3853
3854
3855 if (acrtc->base.state->event)
3856 prepare_flip_isr(acrtc);
3857
3be5262e 3858 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
e7b07cee
HW
3859 surface_updates->flip_addr = &addr;
3860
3861
3be5262e 3862 dc_update_planes_and_stream(adev->dm.dc, surface_updates, 1, acrtc_state->stream, NULL);
e7b07cee
HW
3863
3864 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3865 __func__,
3866 addr.address.grph.addr.high_part,
3867 addr.address.grph.addr.low_part);
3868
3869
3870 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3871}
3872
3be5262e 3873static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
e7b07cee
HW
3874 struct drm_device *dev,
3875 struct amdgpu_display_manager *dm,
3876 struct drm_crtc *pcrtc,
3877 bool *wait_for_vblank)
3878{
3879 uint32_t i;
3880 struct drm_plane *plane;
3881 struct drm_plane_state *old_plane_state;
0971c40e 3882 struct dc_stream_state *dc_stream_attach;
3be5262e 3883 struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
e7b07cee
HW
3884 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3885 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(pcrtc->state);
3886 int planes_count = 0;
3887 unsigned long flags;
3888
3889 /* update planes when needed */
3890 for_each_plane_in_state(state, plane, old_plane_state, i) {
3891 struct drm_plane_state *plane_state = plane->state;
3892 struct drm_crtc *crtc = plane_state->crtc;
3893 struct drm_framebuffer *fb = plane_state->fb;
3894 bool pflip_needed;
3895 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
3896
3897 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3898 handle_cursor_update(plane, old_plane_state);
3899 continue;
3900 }
3901
62f55537 3902 if (!fb || !crtc || pcrtc != crtc || !crtc->state->active)
e7b07cee
HW
3903 continue;
3904
3905 pflip_needed = !state->allow_modeset;
3906
3907 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3908 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3be5262e
HW
3909 DRM_ERROR("%s: acrtc %d, already busy\n",
3910 __func__,
3911 acrtc_attach->crtc_id);
e7b07cee 3912 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
b830ebc9 3913 /* In commit tail framework this cannot happen */
e7b07cee
HW
3914 WARN_ON(1);
3915 }
3916 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3917
3918 if (!pflip_needed) {
3be5262e 3919 WARN_ON(!dm_plane_state->dc_state);
e7b07cee 3920
3be5262e 3921 plane_states_constructed[planes_count] = dm_plane_state->dc_state;
e7b07cee
HW
3922
3923 dc_stream_attach = acrtc_state->stream;
3924 planes_count++;
3925
3926 } else if (crtc->state->planes_changed) {
3927 /* Assume even ONE crtc with immediate flip means
3928 * entire can't wait for VBLANK
3929 * TODO Check if it's correct
3930 */
3931 *wait_for_vblank =
aac6a07e 3932 pcrtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
e7b07cee
HW
3933 false : true;
3934
3935 /* TODO: Needs rework for multiplane flip */
3936 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3937 drm_crtc_vblank_get(crtc);
3938
3939 amdgpu_dm_do_flip(
3940 crtc,
3941 fb,
3942 drm_crtc_vblank_count(crtc) + *wait_for_vblank);
e7b07cee
HW
3943 }
3944
3945 }
3946
3947 if (planes_count) {
3948 unsigned long flags;
3949
3950 if (pcrtc->state->event) {
3951
3952 drm_crtc_vblank_get(pcrtc);
3953
3954 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3955 prepare_flip_isr(acrtc_attach);
3956 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
3957 }
3958
3be5262e
HW
3959 if (false == dc_commit_planes_to_stream(dm->dc,
3960 plane_states_constructed,
3961 planes_count,
3962 dc_stream_attach))
3963 dm_error("%s: Failed to attach plane!\n", __func__);
e7b07cee
HW
3964 } else {
3965 /*TODO BUG Here should go disable planes on CRTC. */
3966 }
3967}
3968
3969
3970int amdgpu_dm_atomic_commit(
3971 struct drm_device *dev,
3972 struct drm_atomic_state *state,
3973 bool nonblock)
3974{
3975 struct drm_crtc *crtc;
3976 struct drm_crtc_state *new_state;
3977 struct amdgpu_device *adev = dev->dev_private;
3978 int i;
3979
3980 /*
3981 * We evade vblanks and pflips on crtc that
3982 * should be changed. We do it here to flush & disable
3983 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
3984 * it will update crtc->dm_crtc_state->stream pointer which is used in
3985 * the ISRs.
3986 */
3987 for_each_crtc_in_state(state, crtc, new_state, i) {
3988 struct dm_crtc_state *old_acrtc_state = to_dm_crtc_state(crtc->state);
3989 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3990
3991 if (drm_atomic_crtc_needs_modeset(new_state) && old_acrtc_state->stream)
3992 manage_dm_interrupts(adev, acrtc, false);
3993 }
3994
3995 return drm_atomic_helper_commit(dev, state, nonblock);
3996
3997 /*TODO Handle EINTR, reenable IRQ*/
3998}
3999
4000void amdgpu_dm_atomic_commit_tail(
4001 struct drm_atomic_state *state)
4002{
4003 struct drm_device *dev = state->dev;
4004 struct amdgpu_device *adev = dev->dev_private;
4005 struct amdgpu_display_manager *dm = &adev->dm;
4006 struct dm_atomic_state *dm_state;
4007 uint32_t i, j;
4008 uint32_t new_crtcs_count = 0;
4009 struct drm_crtc *crtc, *pcrtc;
4010 struct drm_crtc_state *old_crtc_state;
4011 struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
0971c40e 4012 struct dc_stream_state *new_stream = NULL;
e7b07cee
HW
4013 unsigned long flags;
4014 bool wait_for_vblank = true;
4015 struct drm_connector *connector;
4016 struct drm_connector_state *old_conn_state;
4017 struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
4018
4019 drm_atomic_helper_update_legacy_modeset_state(dev, state);
4020
4021 dm_state = to_dm_atomic_state(state);
4022
4023 /* update changed items */
4024 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
4025 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4026 struct drm_crtc_state *new_state = crtc->state;
b830ebc9 4027
e7b07cee
HW
4028 new_acrtc_state = to_dm_crtc_state(new_state);
4029 old_acrtc_state = to_dm_crtc_state(old_crtc_state);
4030
4031 DRM_DEBUG_KMS(
4032 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4033 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4034 "connectors_changed:%d\n",
4035 acrtc->crtc_id,
4036 new_state->enable,
4037 new_state->active,
4038 new_state->planes_changed,
4039 new_state->mode_changed,
4040 new_state->active_changed,
4041 new_state->connectors_changed);
4042
4043 /* handles headless hotplug case, updating new_state and
4044 * aconnector as needed
4045 */
4046
9b690ef3 4047 if (modeset_required(new_state, new_acrtc_state->stream, old_acrtc_state->stream)) {
e7b07cee
HW
4048
4049 DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4050
4051 if (!new_acrtc_state->stream) {
4052 /*
b830ebc9
HW
4053 * this could happen because of issues with
4054 * userspace notifications delivery.
4055 * In this case userspace tries to set mode on
4056 * display which is disconnect in fact.
4057 * dc_sink in NULL in this case on aconnector.
4058 * We expect reset mode will come soon.
4059 *
4060 * This can also happen when unplug is done
4061 * during resume sequence ended
4062 *
4063 * In this case, we want to pretend we still
4064 * have a sink to keep the pipe running so that
4065 * hw state is consistent with the sw state
4066 */
e7b07cee
HW
4067 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4068 __func__, acrtc->base.base.id);
4069 continue;
4070 }
4071
4072
4073 if (old_acrtc_state->stream)
4074 remove_stream(adev, acrtc, old_acrtc_state->stream);
4075
4076
4077 /*
4078 * this loop saves set mode crtcs
4079 * we needed to enable vblanks once all
4080 * resources acquired in dc after dc_commit_streams
4081 */
4082
4083 /*TODO move all this into dm_crtc_state, get rid of
4084 * new_crtcs array and use old and new atomic states
4085 * instead
4086 */
4087 new_crtcs[new_crtcs_count] = acrtc;
4088 new_crtcs_count++;
4089
4090 acrtc->enabled = true;
4091 acrtc->hw_mode = crtc->state->mode;
4092 crtc->hwmode = crtc->state->mode;
4093 } else if (modereset_required(new_state)) {
4094 DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4095
4096 /* i.e. reset mode */
4097 if (old_acrtc_state->stream)
4098 remove_stream(adev, acrtc, old_acrtc_state->stream);
4099 }
4100 } /* for_each_crtc_in_state() */
4101
4102 /*
4103 * Add streams after required streams from new and replaced streams
4104 * are removed from freesync module
4105 */
4106 if (adev->dm.freesync_module) {
4107 for (i = 0; i < new_crtcs_count; i++) {
c84dec2f 4108 struct amdgpu_dm_connector *aconnector = NULL;
b830ebc9 4109
e7b07cee
HW
4110 new_acrtc_state = to_dm_crtc_state(new_crtcs[i]->base.state);
4111
4112 new_stream = new_acrtc_state->stream;
4113 aconnector =
4114 amdgpu_dm_find_first_crct_matching_connector(
4115 state,
4116 &new_crtcs[i]->base,
4117 false);
4118 if (!aconnector) {
b830ebc9
HW
4119 DRM_INFO("Atomic commit: Failed to find connector for acrtc id:%d "
4120 "skipping freesync init\n",
4121 new_crtcs[i]->crtc_id);
e7b07cee
HW
4122 continue;
4123 }
4124
4125 mod_freesync_add_stream(adev->dm.freesync_module,
4126 new_stream, &aconnector->caps);
4127 }
4128 }
4129
4130 if (dm_state->context)
608ac7bb 4131 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
e7b07cee
HW
4132
4133
4134 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4135 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 4136
e7b07cee
HW
4137 new_acrtc_state = to_dm_crtc_state(crtc->state);
4138
4139 if (new_acrtc_state->stream != NULL) {
4140 const struct dc_stream_status *status =
4141 dc_stream_get_status(new_acrtc_state->stream);
4142
4143 if (!status)
4144 DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state->stream, acrtc);
4145 else
4146 acrtc->otg_inst = status->primary_otg_inst;
4147 }
4148 }
4149
4150 /* Handle scaling and undersacn changes*/
4151 for_each_connector_in_state(state, connector, old_conn_state, i) {
c84dec2f 4152 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4153 struct dm_connector_state *con_new_state =
4154 to_dm_connector_state(aconnector->base.state);
4155 struct dm_connector_state *con_old_state =
4156 to_dm_connector_state(old_conn_state);
4157 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc);
4158 struct dc_stream_status *status = NULL;
4159
4160 /* Skip any modesets/resets */
4161 if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state))
4162 continue;
4163
4164 /* Skip any thing not scale or underscan changes */
4165 if (!is_scaling_state_different(con_new_state, con_old_state))
4166 continue;
4167
4168 new_acrtc_state = to_dm_crtc_state(acrtc->base.state);
4169
4170 update_stream_scaling_settings(&con_new_state->base.crtc->mode,
0971c40e 4171 con_new_state, (struct dc_stream_state *)new_acrtc_state->stream);
e7b07cee
HW
4172
4173 status = dc_stream_get_status(new_acrtc_state->stream);
4174 WARN_ON(!status);
3be5262e 4175 WARN_ON(!status->plane_count);
e7b07cee
HW
4176
4177 if (!new_acrtc_state->stream)
4178 continue;
4179
4180 /*TODO How it works with MPO ?*/
3be5262e 4181 if (!dc_commit_planes_to_stream(
e7b07cee 4182 dm->dc,
3be5262e
HW
4183 status->plane_states,
4184 status->plane_count,
e7b07cee
HW
4185 new_acrtc_state->stream))
4186 dm_error("%s: Failed to update stream scaling!\n", __func__);
4187 }
4188
4189 for (i = 0; i < new_crtcs_count; i++) {
4190 /*
4191 * loop to enable interrupts on newly arrived crtc
4192 */
4193 struct amdgpu_crtc *acrtc = new_crtcs[i];
b830ebc9 4194
e7b07cee
HW
4195 new_acrtc_state = to_dm_crtc_state(acrtc->base.state);
4196
4197 if (adev->dm.freesync_module)
4198 mod_freesync_notify_mode_change(
4199 adev->dm.freesync_module, &new_acrtc_state->stream, 1);
4200
4201 manage_dm_interrupts(adev, acrtc, true);
4202 }
4203
4204 /* update planes when needed per crtc*/
4205 for_each_crtc_in_state(state, pcrtc, old_crtc_state, j) {
4206 new_acrtc_state = to_dm_crtc_state(pcrtc->state);
4207
4208 if (new_acrtc_state->stream)
3be5262e 4209 amdgpu_dm_commit_planes(state, dev, dm, pcrtc, &wait_for_vblank);
e7b07cee
HW
4210 }
4211
4212
4213 /*
4214 * send vblank event on all events not handled in flip and
4215 * mark consumed event for drm_atomic_helper_commit_hw_done
4216 */
4217 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4218 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
4219 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4220
4221 if (acrtc->base.state->event)
4222 drm_send_event_locked(dev, &crtc->state->event->base);
4223
4224 acrtc->base.state->event = NULL;
4225 }
4226 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4227
4228 /* Signal HW programming completion */
4229 drm_atomic_helper_commit_hw_done(state);
4230
4231 if (wait_for_vblank)
4232 drm_atomic_helper_wait_for_vblanks(dev, state);
4233
4234 drm_atomic_helper_cleanup_planes(dev, state);
4235}
4236
4237
4238static int dm_force_atomic_commit(struct drm_connector *connector)
4239{
4240 int ret = 0;
4241 struct drm_device *ddev = connector->dev;
4242 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4243 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4244 struct drm_plane *plane = disconnected_acrtc->base.primary;
4245 struct drm_connector_state *conn_state;
4246 struct drm_crtc_state *crtc_state;
4247 struct drm_plane_state *plane_state;
4248
4249 if (!state)
4250 return -ENOMEM;
4251
4252 state->acquire_ctx = ddev->mode_config.acquire_ctx;
4253
4254 /* Construct an atomic state to restore previous display setting */
4255
4256 /*
4257 * Attach connectors to drm_atomic_state
4258 */
4259 conn_state = drm_atomic_get_connector_state(state, connector);
4260
4261 ret = PTR_ERR_OR_ZERO(conn_state);
4262 if (ret)
4263 goto err;
4264
4265 /* Attach crtc to drm_atomic_state*/
4266 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4267
4268 ret = PTR_ERR_OR_ZERO(crtc_state);
4269 if (ret)
4270 goto err;
4271
4272 /* force a restore */
4273 crtc_state->mode_changed = true;
4274
4275 /* Attach plane to drm_atomic_state */
4276 plane_state = drm_atomic_get_plane_state(state, plane);
4277
4278 ret = PTR_ERR_OR_ZERO(plane_state);
4279 if (ret)
4280 goto err;
4281
4282
4283 /* Call commit internally with the state we just constructed */
4284 ret = drm_atomic_commit(state);
4285 if (!ret)
4286 return 0;
4287
4288err:
4289 DRM_ERROR("Restoring old state failed with %i\n", ret);
4290 drm_atomic_state_put(state);
4291
4292 return ret;
4293}
4294
4295/*
4296 * This functions handle all cases when set mode does not come upon hotplug.
4297 * This include when the same display is unplugged then plugged back into the
4298 * same port and when we are running without usermode desktop manager supprot
4299 */
4300void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
4301{
c84dec2f 4302 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4303 struct amdgpu_crtc *disconnected_acrtc;
4304 struct dm_crtc_state *acrtc_state;
4305
4306 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4307 return;
4308
4309 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4310 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4311
4312 if (!disconnected_acrtc || !acrtc_state->stream)
4313 return;
4314
4315 /*
4316 * If the previous sink is not released and different from the current,
4317 * we deduce we are in a state where we can not rely on usermode call
4318 * to turn on the display, so we do it here
4319 */
4320 if (acrtc_state->stream->sink != aconnector->dc_sink)
4321 dm_force_atomic_commit(&aconnector->base);
4322}
4323
e7b07cee
HW
4324/*`
4325 * Grabs all modesetting locks to serialize against any blocking commits,
4326 * Waits for completion of all non blocking commits.
4327 */
4328static int do_aquire_global_lock(
4329 struct drm_device *dev,
4330 struct drm_atomic_state *state)
4331{
4332 struct drm_crtc *crtc;
4333 struct drm_crtc_commit *commit;
4334 long ret;
4335
4336 /* Adding all modeset locks to aquire_ctx will
4337 * ensure that when the framework release it the
4338 * extra locks we are locking here will get released to
4339 */
4340 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4341 if (ret)
4342 return ret;
4343
4344 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4345 spin_lock(&crtc->commit_lock);
4346 commit = list_first_entry_or_null(&crtc->commit_list,
4347 struct drm_crtc_commit, commit_entry);
4348 if (commit)
4349 drm_crtc_commit_get(commit);
4350 spin_unlock(&crtc->commit_lock);
4351
4352 if (!commit)
4353 continue;
4354
4355 /* Make sure all pending HW programming completed and
4356 * page flips done
4357 */
4358 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4359
4360 if (ret > 0)
4361 ret = wait_for_completion_interruptible_timeout(
4362 &commit->flip_done, 10*HZ);
4363
4364 if (ret == 0)
4365 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 4366 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
4367
4368 drm_crtc_commit_put(commit);
4369 }
4370
4371 return ret < 0 ? ret : 0;
4372}
4373
62f55537
AG
4374static int dm_update_crtcs_state(
4375 struct dc *dc,
4376 struct drm_atomic_state *state,
4377 bool enable,
4378 bool *lock_and_validation_needed)
e7b07cee 4379{
e7b07cee
HW
4380 struct drm_crtc *crtc;
4381 struct drm_crtc_state *crtc_state;
62f55537 4382 int i;
e7b07cee 4383 struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
1dc90497 4384 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
9635b754 4385 struct dc_stream_state *new_stream;
62f55537 4386 int ret = 0;
d4d4a645 4387
62f55537
AG
4388 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4389 /* update changed items */
19f89e23 4390 for_each_crtc_in_state(state, crtc, crtc_state, i) {
62f55537 4391 struct amdgpu_crtc *acrtc = NULL;
c84dec2f 4392 struct amdgpu_dm_connector *aconnector = NULL;
62f55537
AG
4393 struct drm_connector_state *conn_state = NULL;
4394 struct dm_connector_state *dm_conn_state = NULL;
e7b07cee 4395
9635b754
DS
4396 new_stream = NULL;
4397
62f55537
AG
4398 old_acrtc_state = to_dm_crtc_state(crtc->state);
4399 new_acrtc_state = to_dm_crtc_state(crtc_state);
4400 acrtc = to_amdgpu_crtc(crtc);
e7b07cee 4401
62f55537 4402 aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
19f89e23 4403
62f55537 4404 /* TODO This hack should go away */
2e0ac3d6 4405 if (aconnector) {
62f55537
AG
4406 conn_state = drm_atomic_get_connector_state(state,
4407 &aconnector->base);
19f89e23 4408
62f55537
AG
4409 if (IS_ERR(conn_state)) {
4410 ret = PTR_ERR_OR_ZERO(conn_state);
4411 break;
4412 }
19f89e23 4413
62f55537 4414 dm_conn_state = to_dm_connector_state(conn_state);
19f89e23 4415
62f55537
AG
4416 new_stream = create_stream_for_sink(aconnector,
4417 &crtc_state->mode,
4418 dm_conn_state);
19f89e23 4419
62f55537
AG
4420 /*
4421 * we can have no stream on ACTION_SET if a display
4422 * was disconnected during S3, in this case it not and
4423 * error, the OS will be updated after detection, and
4424 * do the right thing on next atomic commit
4425 */
19f89e23 4426
62f55537
AG
4427 if (!new_stream) {
4428 DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
4429 __func__, acrtc->base.base.id);
4430 break;
19f89e23 4431 }
62f55537 4432 }
19f89e23 4433
62f55537
AG
4434 if (dc_is_stream_unchanged(new_stream,
4435 old_acrtc_state->stream)) {
19f89e23 4436
62f55537 4437 crtc_state->mode_changed = false;
e7b07cee 4438
62f55537
AG
4439 DRM_DEBUG_KMS("Mode change not required, setting mode_changed to %d",
4440 crtc_state->mode_changed);
4441 }
b830ebc9 4442
e7b07cee 4443
62f55537 4444 if (!drm_atomic_crtc_needs_modeset(crtc_state))
9635b754 4445 goto next_crtc;
e7b07cee
HW
4446
4447 DRM_DEBUG_KMS(
4448 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4449 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4450 "connectors_changed:%d\n",
4451 acrtc->crtc_id,
4452 crtc_state->enable,
4453 crtc_state->active,
4454 crtc_state->planes_changed,
4455 crtc_state->mode_changed,
4456 crtc_state->active_changed,
4457 crtc_state->connectors_changed);
4458
62f55537
AG
4459 /* Remove stream for any changed/disabled CRTC */
4460 if (!enable) {
4461
4462 if (!old_acrtc_state->stream)
9635b754 4463 goto next_crtc;
62f55537
AG
4464
4465 DRM_DEBUG_KMS("Disabling DRM crtc: %d\n",
4466 crtc->base.id);
e7b07cee 4467
1dc90497 4468 /* i.e. reset mode */
62f55537
AG
4469 if (!dc_remove_stream_from_ctx(
4470 dc,
4471 dm_state->context,
4472 old_acrtc_state->stream)) {
4473 ret = -EINVAL;
9635b754 4474 goto fail;
62f55537
AG
4475 }
4476
4477 dc_stream_release(old_acrtc_state->stream);
4478 new_acrtc_state->stream = NULL;
4479
4480 *lock_and_validation_needed = true;
4481
4482 } else {/* Add stream for any updated/enabled CRTC */
4483
4484 if (modereset_required(crtc_state))
9635b754 4485 goto next_crtc;
62f55537
AG
4486
4487 if (modeset_required(crtc_state, new_stream,
4488 old_acrtc_state->stream)) {
4489
4490 WARN_ON(new_acrtc_state->stream);
4491
4492 new_acrtc_state->stream = new_stream;
4493 dc_stream_retain(new_stream);
4494
4495 DRM_DEBUG_KMS("Enabling DRM crtc: %d\n",
4496 crtc->base.id);
1dc90497 4497
62f55537 4498 if (!dc_add_stream_to_ctx(
1dc90497
AG
4499 dc,
4500 dm_state->context,
4501 new_acrtc_state->stream)) {
4502 ret = -EINVAL;
9635b754 4503 goto fail;
1dc90497
AG
4504 }
4505
62f55537 4506 *lock_and_validation_needed = true;
9b690ef3 4507 }
62f55537 4508 }
9b690ef3 4509
9635b754 4510next_crtc:
62f55537
AG
4511 /* Release extra reference */
4512 if (new_stream)
4513 dc_stream_release(new_stream);
4514 }
e7b07cee 4515
62f55537 4516 return ret;
9635b754
DS
4517
4518fail:
4519 if (new_stream)
4520 dc_stream_release(new_stream);
4521 return ret;
62f55537 4522}
9b690ef3 4523
62f55537
AG
4524static int dm_update_planes_state(
4525 struct dc *dc,
4526 struct drm_atomic_state *state,
4527 bool enable,
4528 bool *lock_and_validation_needed)
4529{
4530 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4531 struct drm_crtc_state *new_crtc_state;
4532 struct drm_plane *plane;
4533 struct drm_plane_state *old_plane_state, *new_plane_state;
4534 struct dm_crtc_state *new_acrtc_state, *old_acrtc_state;
4535 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4536 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
4537 int i ;
4538 /* TODO return page_flip_needed() function */
4539 bool pflip_needed = !state->allow_modeset;
4540 int ret = 0;
e7b07cee 4541
62f55537
AG
4542 if (pflip_needed)
4543 return ret;
9b690ef3 4544
62f55537
AG
4545 /* Add new planes */
4546 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4547 new_plane_crtc = new_plane_state->crtc;
4548 old_plane_crtc = old_plane_state->crtc;
4549 new_dm_plane_state = to_dm_plane_state(new_plane_state);
4550 old_dm_plane_state = to_dm_plane_state(old_plane_state);
4551
4552 /*TODO Implement atomic check for cursor plane */
4553 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4554 continue;
9b690ef3 4555
62f55537
AG
4556 /* Remove any changed/removed planes */
4557 if (!enable) {
a7b06724 4558
62f55537
AG
4559 if (!old_plane_crtc)
4560 continue;
4561
4562 old_acrtc_state = to_dm_crtc_state(
4563 drm_atomic_get_old_crtc_state(
4564 state,
4565 old_plane_crtc));
9b690ef3 4566
62f55537
AG
4567 if (!old_acrtc_state->stream)
4568 continue;
4569
4570 DRM_DEBUG_KMS("Disabling DRM plane: %d on DRM crtc %d\n",
4571 plane->base.id, old_plane_crtc->base.id);
9b690ef3 4572
62f55537
AG
4573 if (!dc_remove_plane_from_context(
4574 dc,
4575 old_acrtc_state->stream,
4576 old_dm_plane_state->dc_state,
4577 dm_state->context)) {
4578
4579 ret = EINVAL;
4580 return ret;
e7b07cee
HW
4581 }
4582
9b690ef3 4583
62f55537
AG
4584 dc_plane_state_release(old_dm_plane_state->dc_state);
4585 new_dm_plane_state->dc_state = NULL;
1dc90497 4586
62f55537 4587 *lock_and_validation_needed = true;
1dc90497 4588
62f55537 4589 } else { /* Add new planes */
1dc90497 4590
62f55537
AG
4591 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4592 continue;
e7b07cee 4593
62f55537
AG
4594 if (!new_plane_crtc)
4595 continue;
e7b07cee 4596
62f55537
AG
4597 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4598 new_acrtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 4599
62f55537
AG
4600 if (!new_acrtc_state->stream)
4601 continue;
4602
4603
4604 WARN_ON(new_dm_plane_state->dc_state);
9b690ef3 4605
62f55537
AG
4606 new_dm_plane_state->dc_state = dc_create_plane_state(dc);
4607
4608 DRM_DEBUG_KMS("Enabling DRM plane: %d on DRM crtc %d\n",
4609 plane->base.id, new_plane_crtc->base.id);
4610
4611 if (!new_dm_plane_state->dc_state) {
4612 ret = -EINVAL;
4613 return ret;
4614 }
4615
4616 ret = fill_plane_attributes(
4617 new_plane_crtc->dev->dev_private,
4618 new_dm_plane_state->dc_state,
4619 new_plane_state,
4620 new_crtc_state,
4621 false);
4622 if (ret)
4623 return ret;
4624
4625
4626 if (!dc_add_plane_to_context(
4627 dc,
4628 new_acrtc_state->stream,
4629 new_dm_plane_state->dc_state,
4630 dm_state->context)) {
4631
4632 ret = -EINVAL;
4633 return ret;
e7b07cee 4634 }
62f55537
AG
4635
4636 *lock_and_validation_needed = true;
e7b07cee 4637 }
62f55537 4638 }
e7b07cee
HW
4639
4640
62f55537
AG
4641 return ret;
4642}
4643
4644int amdgpu_dm_atomic_check(struct drm_device *dev,
4645 struct drm_atomic_state *state)
4646{
4647 int i;
4648 int ret;
4649 struct amdgpu_device *adev = dev->dev_private;
4650 struct dc *dc = adev->dm.dc;
4651 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4652 struct drm_connector *connector;
4653 struct drm_connector_state *conn_state;
4654 struct drm_crtc *crtc;
4655 struct drm_crtc_state *crtc_state;
e7b07cee 4656
62f55537
AG
4657 /*
4658 * This bool will be set for true for any modeset/reset
4659 * or plane update which implies non fast surface update.
4660 */
4661 bool lock_and_validation_needed = false;
4662
4663 ret = drm_atomic_helper_check_modeset(dev, state);
4664
4665 if (ret) {
4666 DRM_ERROR("Atomic state validation failed with error :%d !\n", ret);
4667 return ret;
4668 }
4669
4670 /*
4671 * Hack: Commit needs planes right now, specifically for gamma
4672 * TODO rework commit to check CRTC for gamma change
4673 */
4674 for_each_crtc_in_state(state, crtc, crtc_state, i) {
4675 if (crtc_state->color_mgmt_changed) {
e7b07cee
HW
4676 ret = drm_atomic_add_affected_planes(state, crtc);
4677 if (ret)
4678 goto fail;
4679 }
4680 }
4681
62f55537
AG
4682 dm_state->context = dc_create_state();
4683 ASSERT(dm_state->context);
f36cc577 4684 dc_resource_state_copy_construct_current(dc, dm_state->context);
62f55537
AG
4685
4686 /* Remove exiting planes if they are modified */
4687 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4688 if (ret) {
4689 goto fail;
4690 }
4691
4692 /* Disable all crtcs which require disable */
4693 ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4694 if (ret) {
4695 goto fail;
4696 }
4697
4698 /* Enable all crtcs which require enable */
4699 ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4700 if (ret) {
4701 goto fail;
4702 }
4703
4704 /* Add new/modified planes */
4705 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4706 if (ret) {
4707 goto fail;
4708 }
4709
4710 /* Run this here since we want to validate the streams we created */
4711 ret = drm_atomic_helper_check_planes(dev, state);
4712 if (ret)
4713 goto fail;
4714
e7b07cee
HW
4715 /* Check scaling and undersacn changes*/
4716 /*TODO Removed scaling changes validation due to inability to commit
4717 * new stream into context w\o causing full reset. Need to
4718 * decide how to handle.
4719 */
4720 for_each_connector_in_state(state, connector, conn_state, i) {
c84dec2f 4721 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4722 struct dm_connector_state *con_old_state =
4723 to_dm_connector_state(aconnector->base.state);
4724 struct dm_connector_state *con_new_state =
4725 to_dm_connector_state(conn_state);
4726 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc);
4727
4728 /* Skip any modesets/resets */
4729 if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state))
4730 continue;
4731
b830ebc9 4732 /* Skip any thing not scale or underscan changes */
e7b07cee
HW
4733 if (!is_scaling_state_different(con_new_state, con_old_state))
4734 continue;
4735
4736 lock_and_validation_needed = true;
4737 }
4738
e7b07cee
HW
4739 /*
4740 * For full updates case when
4741 * removing/adding/updating streams on once CRTC while flipping
4742 * on another CRTC,
4743 * acquiring global lock will guarantee that any such full
4744 * update commit
4745 * will wait for completion of any outstanding flip using DRMs
4746 * synchronization events.
4747 */
4748
4749 if (lock_and_validation_needed) {
4750
4751 ret = do_aquire_global_lock(dev, state);
4752 if (ret)
4753 goto fail;
1dc90497 4754
19f89e23 4755 if (!dc_validate_global_state(dc, dm_state->context)) {
e7b07cee
HW
4756 ret = -EINVAL;
4757 goto fail;
4758 }
4759 }
4760
4761 /* Must be success */
4762 WARN_ON(ret);
4763 return ret;
4764
4765fail:
4766 if (ret == -EDEADLK)
4767 DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
4768 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4769 DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
4770 else
62f55537 4771 DRM_ERROR("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
4772
4773 return ret;
4774}
4775
4776static bool is_dp_capable_without_timing_msa(
4777 struct dc *dc,
c84dec2f 4778 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
4779{
4780 uint8_t dpcd_data;
4781 bool capable = false;
4782
c84dec2f 4783 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
4784 dm_helpers_dp_read_dpcd(
4785 NULL,
c84dec2f 4786 amdgpu_dm_connector->dc_link,
e7b07cee
HW
4787 DP_DOWN_STREAM_PORT_COUNT,
4788 &dpcd_data,
4789 sizeof(dpcd_data))) {
4790 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4791 }
4792
4793 return capable;
4794}
4795void amdgpu_dm_add_sink_to_freesync_module(
4796 struct drm_connector *connector,
4797 struct edid *edid)
4798{
4799 int i;
4800 uint64_t val_capable;
4801 bool edid_check_required;
4802 struct detailed_timing *timing;
4803 struct detailed_non_pixel *data;
4804 struct detailed_data_monitor_range *range;
c84dec2f
HW
4805 struct amdgpu_dm_connector *amdgpu_dm_connector =
4806 to_amdgpu_dm_connector(connector);
e7b07cee
HW
4807
4808 struct drm_device *dev = connector->dev;
4809 struct amdgpu_device *adev = dev->dev_private;
b830ebc9 4810
e7b07cee 4811 edid_check_required = false;
c84dec2f 4812 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee
HW
4813 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4814 return;
4815 }
4816 if (!adev->dm.freesync_module)
4817 return;
4818 /*
4819 * if edid non zero restrict freesync only for dp and edp
4820 */
4821 if (edid) {
c84dec2f
HW
4822 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4823 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
4824 edid_check_required = is_dp_capable_without_timing_msa(
4825 adev->dm.dc,
c84dec2f 4826 amdgpu_dm_connector);
e7b07cee
HW
4827 }
4828 }
4829 val_capable = 0;
4830 if (edid_check_required == true && (edid->version > 1 ||
4831 (edid->version == 1 && edid->revision > 1))) {
4832 for (i = 0; i < 4; i++) {
4833
4834 timing = &edid->detailed_timings[i];
4835 data = &timing->data.other_data;
4836 range = &data->data.range;
4837 /*
4838 * Check if monitor has continuous frequency mode
4839 */
4840 if (data->type != EDID_DETAIL_MONITOR_RANGE)
4841 continue;
4842 /*
4843 * Check for flag range limits only. If flag == 1 then
4844 * no additional timing information provided.
4845 * Default GTF, GTF Secondary curve and CVT are not
4846 * supported
4847 */
4848 if (range->flags != 1)
4849 continue;
4850
c84dec2f
HW
4851 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4852 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4853 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
4854 range->pixel_clock_mhz * 10;
4855 break;
4856 }
4857
c84dec2f
HW
4858 if (amdgpu_dm_connector->max_vfreq -
4859 amdgpu_dm_connector->min_vfreq > 10) {
4860 amdgpu_dm_connector->caps.supported = true;
4861 amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4862 amdgpu_dm_connector->min_vfreq * 1000000;
4863 amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4864 amdgpu_dm_connector->max_vfreq * 1000000;
e7b07cee
HW
4865 val_capable = 1;
4866 }
4867 }
4868
4869 /*
4870 * TODO figure out how to notify user-mode or DRM of freesync caps
4871 * once we figure out how to deal with freesync in an upstreamable
4872 * fashion
4873 */
4874
4875}
4876
4877void amdgpu_dm_remove_sink_from_freesync_module(
4878 struct drm_connector *connector)
4879{
4880 /*
4881 * TODO fill in once we figure out how to deal with freesync in
4882 * an upstreamable fashion
4883 */
4884}