drm/amd/display: use full surface update when stream is NULL
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28
29 #include "vid.h"
30 #include "amdgpu.h"
31 #include "amdgpu_display.h"
32 #include "atom.h"
33 #include "amdgpu_dm.h"
34 #include "amdgpu_dm_types.h"
35
36 #include "amd_shared.h"
37 #include "amdgpu_dm_irq.h"
38 #include "dm_helpers.h"
39
40 #include "ivsrcid/ivsrcid_vislands30.h"
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/version.h>
45
46 #include <drm/drm_atomic.h>
47 #include <drm/drm_atomic_helper.h>
48 #include <drm/drm_dp_mst_helper.h>
49
50 #include "modules/inc/mod_freesync.h"
51
52 static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
53         DRM_PLANE_TYPE_PRIMARY,
54         DRM_PLANE_TYPE_PRIMARY,
55         DRM_PLANE_TYPE_PRIMARY,
56         DRM_PLANE_TYPE_PRIMARY,
57         DRM_PLANE_TYPE_PRIMARY,
58         DRM_PLANE_TYPE_PRIMARY,
59 };
60
61 static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
62         DRM_PLANE_TYPE_PRIMARY,
63         DRM_PLANE_TYPE_PRIMARY,
64         DRM_PLANE_TYPE_PRIMARY,
65         DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
66 };
67
68 static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
69         DRM_PLANE_TYPE_PRIMARY,
70         DRM_PLANE_TYPE_PRIMARY,
71         DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
72 };
73
74 /*
75  * dm_vblank_get_counter
76  *
77  * @brief
78  * Get counter for number of vertical blanks
79  *
80  * @param
81  * struct amdgpu_device *adev - [in] desired amdgpu device
82  * int disp_idx - [in] which CRTC to get the counter from
83  *
84  * @return
85  * Counter for vertical blanks
86  */
87 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
88 {
89         if (crtc >= adev->mode_info.num_crtc)
90                 return 0;
91         else {
92                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
93
94                 if (NULL == acrtc->stream) {
95                         DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
96                         return 0;
97                 }
98
99                 return dc_stream_get_vblank_counter(acrtc->stream);
100         }
101 }
102
103 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
104                                         u32 *vbl, u32 *position)
105 {
106         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
107                 return -EINVAL;
108         else {
109                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
110
111                 if (NULL == acrtc->stream) {
112                         DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
113                         return 0;
114                 }
115
116                 return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
117         }
118
119         return 0;
120 }
121
122 static bool dm_is_idle(void *handle)
123 {
124         /* XXX todo */
125         return true;
126 }
127
128 static int dm_wait_for_idle(void *handle)
129 {
130         /* XXX todo */
131         return 0;
132 }
133
134 static bool dm_check_soft_reset(void *handle)
135 {
136         return false;
137 }
138
139 static int dm_soft_reset(void *handle)
140 {
141         /* XXX todo */
142         return 0;
143 }
144
145 static struct amdgpu_crtc *get_crtc_by_otg_inst(
146         struct amdgpu_device *adev,
147         int otg_inst)
148 {
149         struct drm_device *dev = adev->ddev;
150         struct drm_crtc *crtc;
151         struct amdgpu_crtc *amdgpu_crtc;
152
153         /*
154          * following if is check inherited from both functions where this one is
155          * used now. Need to be checked why it could happen.
156          */
157         if (otg_inst == -1) {
158                 WARN_ON(1);
159                 return adev->mode_info.crtcs[0];
160         }
161
162         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163                 amdgpu_crtc = to_amdgpu_crtc(crtc);
164
165                 if (amdgpu_crtc->otg_inst == otg_inst)
166                         return amdgpu_crtc;
167         }
168
169         return NULL;
170 }
171
172 static void dm_pflip_high_irq(void *interrupt_params)
173 {
174         struct amdgpu_crtc *amdgpu_crtc;
175         struct common_irq_params *irq_params = interrupt_params;
176         struct amdgpu_device *adev = irq_params->adev;
177         unsigned long flags;
178
179         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
180
181         /* IRQ could occur when in initial stage */
182         /*TODO work and BO cleanup */
183         if (amdgpu_crtc == NULL) {
184                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
185                 return;
186         }
187
188         spin_lock_irqsave(&adev->ddev->event_lock, flags);
189
190         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
191                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
192                                                  amdgpu_crtc->pflip_status,
193                                                  AMDGPU_FLIP_SUBMITTED,
194                                                  amdgpu_crtc->crtc_id,
195                                                  amdgpu_crtc);
196                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
197                 return;
198         }
199
200
201         /* wakeup usersapce */
202         if (amdgpu_crtc->event
203                         && amdgpu_crtc->event->event.base.type
204                         == DRM_EVENT_FLIP_COMPLETE) {
205                 /* Update to correct count/ts if racing with vblank irq */
206                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
207
208                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
209                 /* page flip completed. clean up */
210                 amdgpu_crtc->event = NULL;
211         } else
212                 WARN_ON(1);
213
214         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
215         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
216
217         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
218                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
219
220         drm_crtc_vblank_put(&amdgpu_crtc->base);
221 }
222
223 static void dm_crtc_high_irq(void *interrupt_params)
224 {
225         struct common_irq_params *irq_params = interrupt_params;
226         struct amdgpu_device *adev = irq_params->adev;
227         uint8_t crtc_index = 0;
228         struct amdgpu_crtc *acrtc;
229
230         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
231
232         if (acrtc)
233                 crtc_index = acrtc->crtc_id;
234
235         drm_handle_vblank(adev->ddev, crtc_index);
236 }
237
238 static int dm_set_clockgating_state(void *handle,
239                   enum amd_clockgating_state state)
240 {
241         return 0;
242 }
243
244 static int dm_set_powergating_state(void *handle,
245                   enum amd_powergating_state state)
246 {
247         return 0;
248 }
249
250 /* Prototypes of private functions */
251 static int dm_early_init(void* handle);
252
253 static void hotplug_notify_work_func(struct work_struct *work)
254 {
255         struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
256         struct drm_device *dev = dm->ddev;
257
258         drm_kms_helper_hotplug_event(dev);
259 }
260
261 /* Init display KMS
262  *
263  * Returns 0 on success
264  */
265 int amdgpu_dm_init(struct amdgpu_device *adev)
266 {
267         struct dc_init_data init_data;
268         adev->dm.ddev = adev->ddev;
269         adev->dm.adev = adev;
270
271         DRM_INFO("DAL is enabled\n");
272         /* Zero all the fields */
273         memset(&init_data, 0, sizeof(init_data));
274
275         /* initialize DAL's lock (for SYNC context use) */
276         spin_lock_init(&adev->dm.dal_lock);
277
278         /* initialize DAL's mutex */
279         mutex_init(&adev->dm.dal_mutex);
280
281         if(amdgpu_dm_irq_init(adev)) {
282                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
283                 goto error;
284         }
285
286         init_data.asic_id.chip_family = adev->family;
287
288         init_data.asic_id.pci_revision_id = adev->rev_id;
289         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
290
291         init_data.asic_id.vram_width = adev->mc.vram_width;
292         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
293         init_data.asic_id.atombios_base_address =
294                 adev->mode_info.atom_context->bios;
295
296         init_data.driver = adev;
297
298         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
299
300         if (!adev->dm.cgs_device) {
301                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
302                 goto error;
303         }
304
305         init_data.cgs_device = adev->dm.cgs_device;
306
307         adev->dm.dal = NULL;
308
309         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
310
311         /* Display Core create. */
312         adev->dm.dc = dc_create(&init_data);
313
314         if (!adev->dm.dc)
315                 DRM_INFO("Display Core failed to initialize!\n");
316
317         INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
318
319         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
320         if (!adev->dm.freesync_module) {
321                 DRM_ERROR(
322                 "amdgpu: failed to initialize freesync_module.\n");
323         } else
324                 DRM_INFO("amdgpu: freesync_module init done %p.\n",
325                                 adev->dm.freesync_module);
326
327         if (amdgpu_dm_initialize_drm_device(adev)) {
328                 DRM_ERROR(
329                 "amdgpu: failed to initialize sw for display support.\n");
330                 goto error;
331         }
332
333         /* Update the actual used number of crtc */
334         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
335
336         /* TODO: Add_display_info? */
337
338         /* TODO use dynamic cursor width */
339         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
340         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
341
342         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
343                 DRM_ERROR(
344                 "amdgpu: failed to initialize sw for display support.\n");
345                 goto error;
346         }
347
348         DRM_INFO("KMS initialized.\n");
349
350         return 0;
351 error:
352         amdgpu_dm_fini(adev);
353
354         return -1;
355 }
356
357 void amdgpu_dm_fini(struct amdgpu_device *adev)
358 {
359         amdgpu_dm_destroy_drm_device(&adev->dm);
360         /*
361          * TODO: pageflip, vlank interrupt
362          *
363          * amdgpu_dm_irq_fini(adev);
364          */
365
366         if (adev->dm.cgs_device) {
367                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
368                 adev->dm.cgs_device = NULL;
369         }
370         if (adev->dm.freesync_module) {
371                 mod_freesync_destroy(adev->dm.freesync_module);
372                 adev->dm.freesync_module = NULL;
373         }
374         /* DC Destroy TODO: Replace destroy DAL */
375         {
376                 dc_destroy(&adev->dm.dc);
377         }
378         return;
379 }
380
381 /* moved from amdgpu_dm_kms.c */
382 void amdgpu_dm_destroy()
383 {
384 }
385
386 static int dm_sw_init(void *handle)
387 {
388         return 0;
389 }
390
391 static int dm_sw_fini(void *handle)
392 {
393         return 0;
394 }
395
396 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
397 {
398         struct amdgpu_connector *aconnector;
399         struct drm_connector *connector;
400         int ret = 0;
401
402         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
403
404         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
405                    aconnector = to_amdgpu_connector(connector);
406                 if (aconnector->dc_link->type == dc_connection_mst_branch) {
407                         DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
408                                         aconnector, aconnector->base.base.id);
409
410                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
411                         if (ret < 0) {
412                                 DRM_ERROR("DM_MST: Failed to start MST\n");
413                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
414                                 return ret;
415                                 }
416                         }
417         }
418
419         drm_modeset_unlock(&dev->mode_config.connection_mutex);
420         return ret;
421 }
422
423 static int dm_late_init(void *handle)
424 {
425         struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
426         int r = detect_mst_link_for_all_connectors(dev);
427
428         return r;
429 }
430
431 static void s3_handle_mst(struct drm_device *dev, bool suspend)
432 {
433         struct amdgpu_connector *aconnector;
434         struct drm_connector *connector;
435
436         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
437
438         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
439                    aconnector = to_amdgpu_connector(connector);
440                    if (aconnector->dc_link->type == dc_connection_mst_branch &&
441                                    !aconnector->mst_port) {
442
443                            if (suspend)
444                                    drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
445                            else
446                                    drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
447                    }
448         }
449
450         drm_modeset_unlock(&dev->mode_config.connection_mutex);
451 }
452
453 static int dm_hw_init(void *handle)
454 {
455         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
456         /* Create DAL display manager */
457         amdgpu_dm_init(adev);
458         amdgpu_dm_hpd_init(adev);
459
460         return 0;
461 }
462
463 static int dm_hw_fini(void *handle)
464 {
465         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
466
467         amdgpu_dm_hpd_fini(adev);
468
469         amdgpu_dm_irq_fini(adev);
470
471         return 0;
472 }
473
474 static int dm_suspend(void *handle)
475 {
476         struct amdgpu_device *adev = handle;
477         struct amdgpu_display_manager *dm = &adev->dm;
478         int ret = 0;
479
480         s3_handle_mst(adev->ddev, true);
481
482         amdgpu_dm_irq_suspend(adev);
483
484         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
485
486         dc_set_power_state(
487                 dm->dc,
488                 DC_ACPI_CM_POWER_STATE_D3
489                 );
490
491         return ret;
492 }
493
494 struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
495         struct drm_atomic_state *state,
496         struct drm_crtc *crtc,
497         bool from_state_var)
498 {
499         uint32_t i;
500         struct drm_connector_state *conn_state;
501         struct drm_connector *connector;
502         struct drm_crtc *crtc_from_state;
503
504         for_each_connector_in_state(
505                 state,
506                 connector,
507                 conn_state,
508                 i) {
509                 crtc_from_state =
510                         from_state_var ?
511                                 conn_state->crtc :
512                                 connector->state->crtc;
513
514                 if (crtc_from_state == crtc)
515                         return to_amdgpu_connector(connector);
516         }
517
518         return NULL;
519 }
520
521 static int dm_resume(void *handle)
522 {
523         struct amdgpu_device *adev = handle;
524         struct amdgpu_display_manager *dm = &adev->dm;
525
526         /* power on hardware */
527         dc_set_power_state(
528                 dm->dc,
529                 DC_ACPI_CM_POWER_STATE_D0
530                 );
531
532         return 0;
533 }
534
535 int amdgpu_dm_display_resume(struct amdgpu_device *adev )
536 {
537         struct drm_device *ddev = adev->ddev;
538         struct amdgpu_display_manager *dm = &adev->dm;
539         struct amdgpu_connector *aconnector;
540         struct drm_connector *connector;
541         struct drm_crtc *crtc;
542         struct drm_crtc_state *crtc_state;
543         int ret = 0;
544         int i;
545
546         /* program HPD filter */
547         dc_resume(dm->dc);
548
549         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
550         s3_handle_mst(ddev, false);
551
552         /*
553          * early enable HPD Rx IRQ, should be done before set mode as short
554          * pulse interrupts are used for MST
555          */
556         amdgpu_dm_irq_resume_early(adev);
557
558         /* Do detection*/
559         list_for_each_entry(connector,
560                         &ddev->mode_config.connector_list, head) {
561                 aconnector = to_amdgpu_connector(connector);
562
563                 /*
564                  * this is the case when traversing through already created
565                  * MST connectors, should be skipped
566                  */
567                 if (aconnector->mst_port)
568                         continue;
569
570                 dc_link_detect(aconnector->dc_link, false);
571                 aconnector->dc_sink = NULL;
572                 amdgpu_dm_update_connector_after_detect(aconnector);
573         }
574
575         /* Force mode set in atomic comit */
576         for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
577                         crtc_state->active_changed = true;
578
579         ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
580
581         amdgpu_dm_irq_resume_late(adev);
582
583         return ret;
584 }
585
586 static const struct amd_ip_funcs amdgpu_dm_funcs = {
587         .name = "dm",
588         .early_init = dm_early_init,
589         .late_init = dm_late_init,
590         .sw_init = dm_sw_init,
591         .sw_fini = dm_sw_fini,
592         .hw_init = dm_hw_init,
593         .hw_fini = dm_hw_fini,
594         .suspend = dm_suspend,
595         .resume = dm_resume,
596         .is_idle = dm_is_idle,
597         .wait_for_idle = dm_wait_for_idle,
598         .check_soft_reset = dm_check_soft_reset,
599         .soft_reset = dm_soft_reset,
600         .set_clockgating_state = dm_set_clockgating_state,
601         .set_powergating_state = dm_set_powergating_state,
602 };
603
604 const struct amdgpu_ip_block_version dm_ip_block =
605 {
606         .type = AMD_IP_BLOCK_TYPE_DCE,
607         .major = 1,
608         .minor = 0,
609         .rev = 0,
610         .funcs = &amdgpu_dm_funcs,
611 };
612
613 /* TODO: it is temporary non-const, should fixed later */
614 static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
615         .fb_create = amdgpu_user_framebuffer_create,
616         .output_poll_changed = amdgpu_output_poll_changed,
617         .atomic_check = amdgpu_dm_atomic_check,
618         .atomic_commit = drm_atomic_helper_commit
619 };
620
621 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
622         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
623 };
624
625 void amdgpu_dm_update_connector_after_detect(
626         struct amdgpu_connector *aconnector)
627 {
628         struct drm_connector *connector = &aconnector->base;
629         struct drm_device *dev = connector->dev;
630         const struct dc_sink *sink;
631
632         /* MST handled by drm_mst framework */
633         if (aconnector->mst_mgr.mst_state == true)
634                 return;
635
636
637         sink = aconnector->dc_link->local_sink;
638
639         /* Edid mgmt connector gets first update only in mode_valid hook and then
640          * the connector sink is set to either fake or physical sink depends on link status.
641          * don't do it here if u are during boot
642          */
643         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
644                         && aconnector->dc_em_sink) {
645
646                 /* For S3 resume with headless use eml_sink to fake stream
647                  * because on resume connecotr->sink is set ti NULL
648                  */
649                 mutex_lock(&dev->mode_config.mutex);
650
651                 if (sink) {
652                         if (aconnector->dc_sink) {
653                                 amdgpu_dm_remove_sink_from_freesync_module(
654                                                                 connector);
655                                 /* retain and release bellow are used for
656                                  * bump up refcount for sink because the link don't point
657                                  * to it anymore after disconnect so on next crtc to connector
658                                  * reshuffle by UMD we will get into unwanted dc_sink release
659                                  */
660                                 if (aconnector->dc_sink != aconnector->dc_em_sink)
661                                         dc_sink_release(aconnector->dc_sink);
662                         }
663                         aconnector->dc_sink = sink;
664                         amdgpu_dm_add_sink_to_freesync_module(
665                                                 connector, aconnector->edid);
666                 } else {
667                         amdgpu_dm_remove_sink_from_freesync_module(connector);
668                         if (!aconnector->dc_sink)
669                                 aconnector->dc_sink = aconnector->dc_em_sink;
670                         else if (aconnector->dc_sink != aconnector->dc_em_sink)
671                                 dc_sink_retain(aconnector->dc_sink);
672                 }
673
674                 mutex_unlock(&dev->mode_config.mutex);
675                 return;
676         }
677
678         /*
679          * TODO: temporary guard to look for proper fix
680          * if this sink is MST sink, we should not do anything
681          */
682         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
683                 return;
684
685         if (aconnector->dc_sink == sink) {
686                 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
687                  * Do nothing!! */
688                 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
689                                 aconnector->connector_id);
690                 return;
691         }
692
693         DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
694                 aconnector->connector_id, aconnector->dc_sink, sink);
695
696         mutex_lock(&dev->mode_config.mutex);
697
698         /* 1. Update status of the drm connector
699          * 2. Send an event and let userspace tell us what to do */
700         if (sink) {
701                 /* TODO: check if we still need the S3 mode update workaround.
702                  * If yes, put it here. */
703                 if (aconnector->dc_sink)
704                         amdgpu_dm_remove_sink_from_freesync_module(
705                                                         connector);
706
707                 aconnector->dc_sink = sink;
708                 if (sink->dc_edid.length == 0)
709                         aconnector->edid = NULL;
710                 else {
711                         aconnector->edid =
712                                 (struct edid *) sink->dc_edid.raw_edid;
713
714
715                         drm_mode_connector_update_edid_property(connector,
716                                         aconnector->edid);
717                 }
718                 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
719
720         } else {
721                 amdgpu_dm_remove_sink_from_freesync_module(connector);
722                 drm_mode_connector_update_edid_property(connector, NULL);
723                 aconnector->num_modes = 0;
724                 aconnector->dc_sink = NULL;
725         }
726
727         mutex_unlock(&dev->mode_config.mutex);
728 }
729
730 static void handle_hpd_irq(void *param)
731 {
732         struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
733         struct drm_connector *connector = &aconnector->base;
734         struct drm_device *dev = connector->dev;
735
736         /* In case of failure or MST no need to update connector status or notify the OS
737          * since (for MST case) MST does this in it's own context.
738          */
739         mutex_lock(&aconnector->hpd_lock);
740         if (dc_link_detect(aconnector->dc_link, false)) {
741                 amdgpu_dm_update_connector_after_detect(aconnector);
742
743
744                 drm_modeset_lock_all(dev);
745                 dm_restore_drm_connector_state(dev, connector);
746                 drm_modeset_unlock_all(dev);
747
748                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
749                         drm_kms_helper_hotplug_event(dev);
750         }
751         mutex_unlock(&aconnector->hpd_lock);
752
753 }
754
755 static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
756 {
757         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
758         uint8_t dret;
759         bool new_irq_handled = false;
760         int dpcd_addr;
761         int dpcd_bytes_to_read;
762
763         const int max_process_count = 30;
764         int process_count = 0;
765
766         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
767
768         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
769                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
770                 /* DPCD 0x200 - 0x201 for downstream IRQ */
771                 dpcd_addr = DP_SINK_COUNT;
772         } else {
773                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
774                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
775                 dpcd_addr = DP_SINK_COUNT_ESI;
776         }
777
778         dret = drm_dp_dpcd_read(
779                 &aconnector->dm_dp_aux.aux,
780                 dpcd_addr,
781                 esi,
782                 dpcd_bytes_to_read);
783
784         while (dret == dpcd_bytes_to_read &&
785                 process_count < max_process_count) {
786                 uint8_t retry;
787                 dret = 0;
788
789                 process_count++;
790
791                 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
792                 /* handle HPD short pulse irq */
793                 if (aconnector->mst_mgr.mst_state)
794                         drm_dp_mst_hpd_irq(
795                                 &aconnector->mst_mgr,
796                                 esi,
797                                 &new_irq_handled);
798
799                 if (new_irq_handled) {
800                         /* ACK at DPCD to notify down stream */
801                         const int ack_dpcd_bytes_to_write =
802                                 dpcd_bytes_to_read - 1;
803
804                         for (retry = 0; retry < 3; retry++) {
805                                 uint8_t wret;
806
807                                 wret = drm_dp_dpcd_write(
808                                         &aconnector->dm_dp_aux.aux,
809                                         dpcd_addr + 1,
810                                         &esi[1],
811                                         ack_dpcd_bytes_to_write);
812                                 if (wret == ack_dpcd_bytes_to_write)
813                                         break;
814                         }
815
816                         /* check if there is new irq to be handle */
817                         dret = drm_dp_dpcd_read(
818                                 &aconnector->dm_dp_aux.aux,
819                                 dpcd_addr,
820                                 esi,
821                                 dpcd_bytes_to_read);
822
823                         new_irq_handled = false;
824                 } else
825                         break;
826         }
827
828         if (process_count == max_process_count)
829                 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
830 }
831
832 static void handle_hpd_rx_irq(void *param)
833 {
834         struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
835         struct drm_connector *connector = &aconnector->base;
836         struct drm_device *dev = connector->dev;
837         const struct dc_link *dc_link = aconnector->dc_link;
838         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
839
840         /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
841          * conflict, after implement i2c helper, this mutex should be
842          * retired.
843          */
844         if (aconnector->dc_link->type != dc_connection_mst_branch)
845                 mutex_lock(&aconnector->hpd_lock);
846
847         if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
848                         !is_mst_root_connector) {
849                 /* Downstream Port status changed. */
850                 if (dc_link_detect(aconnector->dc_link, false)) {
851                         amdgpu_dm_update_connector_after_detect(aconnector);
852
853
854                         drm_modeset_lock_all(dev);
855                         dm_restore_drm_connector_state(dev, connector);
856                         drm_modeset_unlock_all(dev);
857
858                         drm_kms_helper_hotplug_event(dev);
859                 }
860         }
861         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
862                                 (dc_link->type == dc_connection_mst_branch))
863                 dm_handle_hpd_rx_irq(aconnector);
864
865         if (aconnector->dc_link->type != dc_connection_mst_branch)
866                 mutex_unlock(&aconnector->hpd_lock);
867 }
868
869 static void register_hpd_handlers(struct amdgpu_device *adev)
870 {
871         struct drm_device *dev = adev->ddev;
872         struct drm_connector *connector;
873         struct amdgpu_connector *aconnector;
874         const struct dc_link *dc_link;
875         struct dc_interrupt_params int_params = {0};
876
877         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
878         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
879
880         list_for_each_entry(connector,
881                         &dev->mode_config.connector_list, head) {
882
883                 aconnector = to_amdgpu_connector(connector);
884                 dc_link = aconnector->dc_link;
885
886                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
887                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
888                         int_params.irq_source = dc_link->irq_source_hpd;
889
890                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
891                                         handle_hpd_irq,
892                                         (void *) aconnector);
893                 }
894
895                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
896
897                         /* Also register for DP short pulse (hpd_rx). */
898                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
899                         int_params.irq_source = dc_link->irq_source_hpd_rx;
900
901                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
902                                         handle_hpd_rx_irq,
903                                         (void *) aconnector);
904                 }
905         }
906 }
907
908 /* Register IRQ sources and initialize IRQ callbacks */
909 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
910 {
911         struct dc *dc = adev->dm.dc;
912         struct common_irq_params *c_irq_params;
913         struct dc_interrupt_params int_params = {0};
914         int r;
915         int i;
916         unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
917
918         if (adev->asic_type == CHIP_VEGA10)
919                 client_id = AMDGPU_IH_CLIENTID_DCE;
920
921         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
922         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
923
924         /* Actions of amdgpu_irq_add_id():
925          * 1. Register a set() function with base driver.
926          *    Base driver will call set() function to enable/disable an
927          *    interrupt in DC hardware.
928          * 2. Register amdgpu_dm_irq_handler().
929          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
930          *    coming from DC hardware.
931          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
932          *    for acknowledging and handling. */
933
934         /* Use VBLANK interrupt */
935         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
936                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
937                 if (r) {
938                         DRM_ERROR("Failed to add crtc irq id!\n");
939                         return r;
940                 }
941
942                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
943                 int_params.irq_source =
944                         dc_interrupt_to_irq_source(dc, i, 0);
945
946                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
947
948                 c_irq_params->adev = adev;
949                 c_irq_params->irq_src = int_params.irq_source;
950
951                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
952                                 dm_crtc_high_irq, c_irq_params);
953         }
954
955         /* Use GRPH_PFLIP interrupt */
956         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
957                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
958                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
959                 if (r) {
960                         DRM_ERROR("Failed to add page flip irq id!\n");
961                         return r;
962                 }
963
964                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
965                 int_params.irq_source =
966                         dc_interrupt_to_irq_source(dc, i, 0);
967
968                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
969
970                 c_irq_params->adev = adev;
971                 c_irq_params->irq_src = int_params.irq_source;
972
973                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
974                                 dm_pflip_high_irq, c_irq_params);
975
976         }
977
978         /* HPD */
979         r = amdgpu_irq_add_id(adev, client_id,
980                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
981         if (r) {
982                 DRM_ERROR("Failed to add hpd irq id!\n");
983                 return r;
984         }
985
986         register_hpd_handlers(adev);
987
988         return 0;
989 }
990
991 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
992 {
993         int r;
994
995         adev->mode_info.mode_config_initialized = true;
996
997         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
998         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
999
1000         adev->ddev->mode_config.max_width = 16384;
1001         adev->ddev->mode_config.max_height = 16384;
1002
1003         adev->ddev->mode_config.preferred_depth = 24;
1004         adev->ddev->mode_config.prefer_shadow = 1;
1005         /* indicate support of immediate flip */
1006         adev->ddev->mode_config.async_page_flip = true;
1007
1008         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1009
1010         r = amdgpu_modeset_create_props(adev);
1011         if (r)
1012                 return r;
1013
1014         return 0;
1015 }
1016
1017 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1018         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1019
1020 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1021 {
1022         struct amdgpu_display_manager *dm = bl_get_data(bd);
1023
1024         if (dc_link_set_backlight_level(dm->backlight_link,
1025                         bd->props.brightness, 0, 0))
1026                 return 0;
1027         else
1028                 return 1;
1029 }
1030
1031 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1032 {
1033         return bd->props.brightness;
1034 }
1035
1036 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1037         .get_brightness = amdgpu_dm_backlight_get_brightness,
1038         .update_status  = amdgpu_dm_backlight_update_status,
1039 };
1040
1041 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1042 {
1043         char bl_name[16];
1044         struct backlight_properties props = { 0 };
1045
1046         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1047         props.type = BACKLIGHT_RAW;
1048
1049         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1050                         dm->adev->ddev->primary->index);
1051
1052         dm->backlight_dev = backlight_device_register(bl_name,
1053                         dm->adev->ddev->dev,
1054                         dm,
1055                         &amdgpu_dm_backlight_ops,
1056                         &props);
1057
1058         if (NULL == dm->backlight_dev)
1059                 DRM_ERROR("DM: Backlight registration failed!\n");
1060         else
1061                 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1062 }
1063
1064 #endif
1065
1066 /* In this architecture, the association
1067  * connector -> encoder -> crtc
1068  * id not really requried. The crtc and connector will hold the
1069  * display_index as an abstraction to use with DAL component
1070  *
1071  * Returns 0 on success
1072  */
1073 int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1074 {
1075         struct amdgpu_display_manager *dm = &adev->dm;
1076         uint32_t i;
1077         struct amdgpu_connector *aconnector = NULL;
1078         struct amdgpu_encoder *aencoder = NULL;
1079         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1080         uint32_t link_cnt;
1081
1082         link_cnt = dm->dc->caps.max_links;
1083         if (amdgpu_dm_mode_config_init(dm->adev)) {
1084                 DRM_ERROR("DM: Failed to initialize mode config\n");
1085                 return -1;
1086         }
1087
1088         for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
1089                 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1090                                                                  GFP_KERNEL);
1091                 if (!mode_info->planes[i]) {
1092                         DRM_ERROR("KMS: Failed to allocate surface\n");
1093                         goto fail_free_planes;
1094                 }
1095                 mode_info->planes[i]->plane_type = mode_info->plane_type[i];
1096                 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], 0xff)) {
1097                         DRM_ERROR("KMS: Failed to initialize plane\n");
1098                         goto fail_free_planes;
1099                 }
1100         }
1101
1102         for (i = 0; i < dm->dc->caps.max_streams; i++)
1103                 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1104                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1105                         goto fail_free_planes;
1106                 }
1107
1108         dm->display_indexes_num = dm->dc->caps.max_streams;
1109
1110         /* loops over all connectors on the board */
1111         for (i = 0; i < link_cnt; i++) {
1112
1113                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1114                         DRM_ERROR(
1115                                 "KMS: Cannot support more than %d display indexes\n",
1116                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1117                         continue;
1118                 }
1119
1120                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1121                 if (!aconnector)
1122                         goto fail_free_planes;
1123
1124                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1125                 if (!aencoder) {
1126                         goto fail_free_connector;
1127                 }
1128
1129                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1130                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1131                         goto fail_free_encoder;
1132                 }
1133
1134                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1135                         DRM_ERROR("KMS: Failed to initialize connector\n");
1136                         goto fail_free_encoder;
1137                 }
1138
1139                 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1140                         amdgpu_dm_update_connector_after_detect(aconnector);
1141         }
1142
1143         /* Software is initialized. Now we can register interrupt handlers. */
1144         switch (adev->asic_type) {
1145         case CHIP_BONAIRE:
1146         case CHIP_HAWAII:
1147         case CHIP_TONGA:
1148         case CHIP_FIJI:
1149         case CHIP_CARRIZO:
1150         case CHIP_STONEY:
1151         case CHIP_POLARIS11:
1152         case CHIP_POLARIS10:
1153         case CHIP_POLARIS12:
1154         case CHIP_VEGA10:
1155                 if (dce110_register_irq_handlers(dm->adev)) {
1156                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1157                         goto fail_free_encoder;
1158                 }
1159                 break;
1160         default:
1161                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1162                 goto fail_free_encoder;
1163         }
1164
1165         drm_mode_config_reset(dm->ddev);
1166
1167         return 0;
1168 fail_free_encoder:
1169         kfree(aencoder);
1170 fail_free_connector:
1171         kfree(aconnector);
1172 fail_free_planes:
1173         for (i = 0; i < dm->dc->caps.max_surfaces; i++)
1174                 kfree(mode_info->planes[i]);
1175         return -1;
1176 }
1177
1178 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1179 {
1180         drm_mode_config_cleanup(dm->ddev);
1181         return;
1182 }
1183
1184 /******************************************************************************
1185  * amdgpu_display_funcs functions
1186  *****************************************************************************/
1187
1188 /**
1189  * dm_bandwidth_update - program display watermarks
1190  *
1191  * @adev: amdgpu_device pointer
1192  *
1193  * Calculate and program the display watermarks and line buffer allocation.
1194  */
1195 static void dm_bandwidth_update(struct amdgpu_device *adev)
1196 {
1197         /* TODO: implement later */
1198 }
1199
1200 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1201                                      u8 level)
1202 {
1203         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1204 }
1205
1206 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1207 {
1208         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1209         return 0;
1210 }
1211
1212 /******************************************************************************
1213  * Page Flip functions
1214  ******************************************************************************/
1215
1216 /**
1217  * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1218  *                      via DRM IOCTL, by user mode.
1219  *
1220  * @adev: amdgpu_device pointer
1221  * @crtc_id: crtc to cleanup pageflip on
1222  * @crtc_base: new address of the crtc (GPU MC address)
1223  *
1224  * Does the actual pageflip (surface address update).
1225  */
1226 static void dm_page_flip(struct amdgpu_device *adev,
1227                          int crtc_id, u64 crtc_base, bool async)
1228 {
1229         struct amdgpu_crtc *acrtc;
1230         const struct dc_stream *stream;
1231         struct dc_flip_addrs addr = { {0} };
1232
1233         /*
1234          * TODO risk of concurrency issues
1235          *
1236          * This should guarded by the dal_mutex but we can't do this since the
1237          * caller uses a spin_lock on event_lock.
1238          *
1239          * If we wait on the dal_mutex a second page flip interrupt might come,
1240          * spin on the event_lock, disabling interrupts while it does so. At
1241          * this point the core can no longer be pre-empted and return to the
1242          * thread that waited on the dal_mutex and we're deadlocked.
1243          *
1244          * With multiple cores the same essentially happens but might just take
1245          * a little longer to lock up all cores.
1246          *
1247          * The reason we should lock on dal_mutex is so that we can be sure
1248          * nobody messes with acrtc->stream after we read and check its value.
1249          *
1250          * We might be able to fix our concurrency issues with a work queue
1251          * where we schedule all work items (mode_set, page_flip, etc.) and
1252          * execute them one by one. Care needs to be taken to still deal with
1253          * any potential concurrency issues arising from interrupt calls.
1254          */
1255
1256         acrtc = adev->mode_info.crtcs[crtc_id];
1257         stream = acrtc->stream;
1258
1259
1260         if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
1261                 DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
1262                 /* In commit tail framework this cannot happen */
1263                 BUG_ON(0);
1264         }
1265
1266
1267         /*
1268          * Received a page flip call after the display has been reset.
1269          * Just return in this case. Everything should be clean-up on reset.
1270          */
1271
1272         if (!stream) {
1273                 WARN_ON(1);
1274                 return;
1275         }
1276
1277         addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1278         addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1279         addr.flip_immediate = async;
1280
1281
1282         if (acrtc->base.state->event &&
1283             acrtc->base.state->event->event.base.type ==
1284                             DRM_EVENT_FLIP_COMPLETE) {
1285                 acrtc->event = acrtc->base.state->event;
1286
1287                 /* Set the flip status */
1288                 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
1289
1290                 /* Mark this event as consumed */
1291                 acrtc->base.state->event = NULL;
1292         }
1293
1294         dc_flip_surface_addrs(adev->dm.dc,
1295                               dc_stream_get_status(stream)->surfaces,
1296                               &addr, 1);
1297
1298         DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1299                          __func__,
1300                          addr.address.grph.addr.high_part,
1301                          addr.address.grph.addr.low_part);
1302
1303 }
1304
1305 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1306                                 struct drm_file *filp)
1307 {
1308         struct mod_freesync_params freesync_params;
1309         uint8_t num_streams;
1310         uint8_t i;
1311
1312         struct amdgpu_device *adev = dev->dev_private;
1313         int r = 0;
1314
1315         /* Get freesync enable flag from DRM */
1316
1317         num_streams = dc_get_current_stream_count(adev->dm.dc);
1318
1319         for (i = 0; i < num_streams; i++) {
1320                 const struct dc_stream *stream;
1321                 stream = dc_get_stream_at_index(adev->dm.dc, i);
1322
1323                 mod_freesync_update_state(adev->dm.freesync_module,
1324                                           &stream, 1, &freesync_params);
1325         }
1326
1327         return r;
1328 }
1329
1330 static const struct amdgpu_display_funcs dm_display_funcs = {
1331         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1332         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1333         .vblank_wait = NULL,
1334         .backlight_set_level =
1335                 dm_set_backlight_level,/* called unconditionally */
1336         .backlight_get_level =
1337                 dm_get_backlight_level,/* called unconditionally */
1338         .hpd_sense = NULL,/* called unconditionally */
1339         .hpd_set_polarity = NULL, /* called unconditionally */
1340         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1341         .page_flip = dm_page_flip, /* called unconditionally */
1342         .page_flip_get_scanoutpos =
1343                 dm_crtc_get_scanoutpos,/* called unconditionally */
1344         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1345         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1346         .notify_freesync = amdgpu_notify_freesync,
1347
1348 };
1349
1350
1351 #if defined(CONFIG_DEBUG_KERNEL_DC)
1352
1353 static ssize_t s3_debug_store(
1354         struct device *device,
1355         struct device_attribute *attr,
1356         const char *buf,
1357         size_t count)
1358 {
1359         int ret;
1360         int s3_state;
1361         struct pci_dev *pdev = to_pci_dev(device);
1362         struct drm_device *drm_dev = pci_get_drvdata(pdev);
1363         struct amdgpu_device *adev = drm_dev->dev_private;
1364
1365         ret = kstrtoint(buf, 0, &s3_state);
1366
1367         if (ret == 0) {
1368                 if (s3_state) {
1369                         dm_resume(adev);
1370                         amdgpu_dm_display_resume(adev);
1371                         drm_kms_helper_hotplug_event(adev->ddev);
1372                 } else
1373                         dm_suspend(adev);
1374         }
1375
1376         return ret == 0 ? count : 0;
1377 }
1378
1379 DEVICE_ATTR_WO(s3_debug);
1380
1381 #endif
1382
1383 static int dm_early_init(void *handle)
1384 {
1385         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386
1387         amdgpu_dm_set_irq_funcs(adev);
1388
1389         switch (adev->asic_type) {
1390         case CHIP_BONAIRE:
1391         case CHIP_HAWAII:
1392                 adev->mode_info.num_crtc = 6;
1393                 adev->mode_info.num_hpd = 6;
1394                 adev->mode_info.num_dig = 6;
1395                 adev->mode_info.plane_type = dm_surfaces_type_default;
1396                 break;
1397         case CHIP_FIJI:
1398         case CHIP_TONGA:
1399                 adev->mode_info.num_crtc = 6;
1400                 adev->mode_info.num_hpd = 6;
1401                 adev->mode_info.num_dig = 7;
1402                 adev->mode_info.plane_type = dm_surfaces_type_default;
1403                 break;
1404         case CHIP_CARRIZO:
1405                 adev->mode_info.num_crtc = 3;
1406                 adev->mode_info.num_hpd = 6;
1407                 adev->mode_info.num_dig = 9;
1408                 adev->mode_info.plane_type = dm_surfaces_type_carizzo;
1409                 break;
1410         case CHIP_STONEY:
1411                 adev->mode_info.num_crtc = 2;
1412                 adev->mode_info.num_hpd = 6;
1413                 adev->mode_info.num_dig = 9;
1414                 adev->mode_info.plane_type = dm_surfaces_type_stoney;
1415                 break;
1416         case CHIP_POLARIS11:
1417         case CHIP_POLARIS12:
1418                 adev->mode_info.num_crtc = 5;
1419                 adev->mode_info.num_hpd = 5;
1420                 adev->mode_info.num_dig = 5;
1421                 adev->mode_info.plane_type = dm_surfaces_type_default;
1422                 break;
1423         case CHIP_POLARIS10:
1424                 adev->mode_info.num_crtc = 6;
1425                 adev->mode_info.num_hpd = 6;
1426                 adev->mode_info.num_dig = 6;
1427                 adev->mode_info.plane_type = dm_surfaces_type_default;
1428                 break;
1429         case CHIP_VEGA10:
1430                 adev->mode_info.num_crtc = 6;
1431                 adev->mode_info.num_hpd = 6;
1432                 adev->mode_info.num_dig = 6;
1433                 adev->mode_info.plane_type = dm_surfaces_type_default;
1434                 break;
1435         default:
1436                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1437                 return -EINVAL;
1438         }
1439
1440         if (adev->mode_info.funcs == NULL)
1441                 adev->mode_info.funcs = &dm_display_funcs;
1442
1443         /* Note: Do NOT change adev->audio_endpt_rreg and
1444          * adev->audio_endpt_wreg because they are initialised in
1445          * amdgpu_device_init() */
1446 #if defined(CONFIG_DEBUG_KERNEL_DC)
1447         device_create_file(
1448                 adev->ddev->dev,
1449                 &dev_attr_s3_debug);
1450 #endif
1451
1452         return 0;
1453 }
1454
1455 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1456 {
1457         /* TODO */
1458         return true;
1459 }
1460
1461 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1462 {
1463         /* TODO */
1464         return true;
1465 }
1466
1467