drm/amd/display: Use real number of CRTCs and HPDs in set_irq_funcs
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_irq.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include <drm/drmP.h>
27
28 #include "dm_services_types.h"
29 #include "dc.h"
30
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_irq.h"
34
35 /******************************************************************************
36  * Private declarations.
37  *****************************************************************************/
38
39 struct handler_common_data {
40         struct list_head list;
41         interrupt_handler handler;
42         void *handler_arg;
43
44         /* DM which this handler belongs to */
45         struct amdgpu_display_manager *dm;
46 };
47
48 struct amdgpu_dm_irq_handler_data {
49         struct handler_common_data hcd;
50         /* DAL irq source which registered for this interrupt. */
51         enum dc_irq_source irq_source;
52 };
53
54 struct amdgpu_dm_timer_handler_data {
55         struct handler_common_data hcd;
56         struct delayed_work d_work;
57 };
58
59 #define DM_IRQ_TABLE_LOCK(adev, flags) \
60         spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
61
62 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
63         spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
64
65 /******************************************************************************
66  * Private functions.
67  *****************************************************************************/
68
69 static void init_handler_common_data(struct handler_common_data *hcd,
70                                      void (*ih)(void *),
71                                      void *args,
72                                      struct amdgpu_display_manager *dm)
73 {
74         hcd->handler = ih;
75         hcd->handler_arg = args;
76         hcd->dm = dm;
77 }
78
79 /**
80  * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
81  *
82  * @work: work struct
83  */
84 static void dm_irq_work_func(struct work_struct *work)
85 {
86         struct list_head *entry;
87         struct irq_list_head *irq_list_head =
88                 container_of(work, struct irq_list_head, work);
89         struct list_head *handler_list = &irq_list_head->head;
90         struct amdgpu_dm_irq_handler_data *handler_data;
91
92         list_for_each(entry, handler_list) {
93                 handler_data =
94                         list_entry(
95                                 entry,
96                                 struct amdgpu_dm_irq_handler_data,
97                                 hcd.list);
98
99                 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
100                                 handler_data->irq_source);
101
102                 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
103                         handler_data->irq_source);
104
105                 handler_data->hcd.handler(handler_data->hcd.handler_arg);
106         }
107
108         /* Call a DAL subcomponent which registered for interrupt notification
109          * at INTERRUPT_LOW_IRQ_CONTEXT.
110          * (The most common use is HPD interrupt) */
111 }
112
113 /**
114  * Remove a handler and return a pointer to hander list from which the
115  * handler was removed.
116  */
117 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
118                                             void *ih,
119                                             const struct dc_interrupt_params *int_params)
120 {
121         struct list_head *hnd_list;
122         struct list_head *entry, *tmp;
123         struct amdgpu_dm_irq_handler_data *handler;
124         unsigned long irq_table_flags;
125         bool handler_removed = false;
126         enum dc_irq_source irq_source;
127
128         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
129
130         irq_source = int_params->irq_source;
131
132         switch (int_params->int_context) {
133         case INTERRUPT_HIGH_IRQ_CONTEXT:
134                 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
135                 break;
136         case INTERRUPT_LOW_IRQ_CONTEXT:
137         default:
138                 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
139                 break;
140         }
141
142         list_for_each_safe(entry, tmp, hnd_list) {
143
144                 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
145                                 hcd.list);
146
147                 if (ih == handler) {
148                         /* Found our handler. Remove it from the list. */
149                         list_del(&handler->hcd.list);
150                         handler_removed = true;
151                         break;
152                 }
153         }
154
155         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
156
157         if (handler_removed == false) {
158                 /* Not necessarily an error - caller may not
159                  * know the context. */
160                 return NULL;
161         }
162
163         kfree(handler);
164
165         DRM_DEBUG_KMS(
166         "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
167                 ih, int_params->irq_source, int_params->int_context);
168
169         return hnd_list;
170 }
171
172 /* If 'handler_in == NULL' then remove ALL handlers. */
173 static void remove_timer_handler(struct amdgpu_device *adev,
174                                  struct amdgpu_dm_timer_handler_data *handler_in)
175 {
176         struct amdgpu_dm_timer_handler_data *handler_temp;
177         struct list_head *handler_list;
178         struct list_head *entry, *tmp;
179         unsigned long irq_table_flags;
180         bool handler_removed = false;
181
182         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
183
184         handler_list = &adev->dm.timer_handler_list;
185
186         list_for_each_safe(entry, tmp, handler_list) {
187                 /* Note that list_for_each_safe() guarantees that
188                  * handler_temp is NOT null. */
189                 handler_temp = list_entry(entry,
190                                 struct amdgpu_dm_timer_handler_data, hcd.list);
191
192                 if (handler_in == NULL || handler_in == handler_temp) {
193                         list_del(&handler_temp->hcd.list);
194                         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
195
196                         DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
197                                         handler_temp);
198
199                         if (handler_in == NULL) {
200                                 /* Since it is still in the queue, it must
201                                  * be cancelled. */
202                                 cancel_delayed_work_sync(&handler_temp->d_work);
203                         }
204
205                         kfree(handler_temp);
206                         handler_removed = true;
207
208                         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
209                 }
210
211                 /* Remove ALL handlers. */
212                 if (handler_in == NULL)
213                         continue;
214
215                 /* Remove a SPECIFIC handler.
216                  * Found our handler - we can stop here. */
217                 if (handler_in == handler_temp)
218                         break;
219         }
220
221         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
222
223         if (handler_in != NULL && handler_removed == false)
224                 DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
225                                 handler_in);
226 }
227
228 static bool
229 validate_irq_registration_params(struct dc_interrupt_params *int_params,
230                                  void (*ih)(void *))
231 {
232         if (NULL == int_params || NULL == ih) {
233                 DRM_ERROR("DM_IRQ: invalid input!\n");
234                 return false;
235         }
236
237         if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
238                 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
239                                 int_params->int_context);
240                 return false;
241         }
242
243         if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
244                 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
245                                 int_params->irq_source);
246                 return false;
247         }
248
249         return true;
250 }
251
252 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
253                                                irq_handler_idx handler_idx)
254 {
255         if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
256                 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
257                 return false;
258         }
259
260         if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
261                 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
262                 return false;
263         }
264
265         return true;
266 }
267 /******************************************************************************
268  * Public functions.
269  *
270  * Note: caller is responsible for input validation.
271  *****************************************************************************/
272
273 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
274                                        struct dc_interrupt_params *int_params,
275                                        void (*ih)(void *),
276                                        void *handler_args)
277 {
278         struct list_head *hnd_list;
279         struct amdgpu_dm_irq_handler_data *handler_data;
280         unsigned long irq_table_flags;
281         enum dc_irq_source irq_source;
282
283         if (false == validate_irq_registration_params(int_params, ih))
284                 return DAL_INVALID_IRQ_HANDLER_IDX;
285
286         handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
287         if (!handler_data) {
288                 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
289                 return DAL_INVALID_IRQ_HANDLER_IDX;
290         }
291
292         memset(handler_data, 0, sizeof(*handler_data));
293
294         init_handler_common_data(&handler_data->hcd, ih, handler_args,
295                         &adev->dm);
296
297         irq_source = int_params->irq_source;
298
299         handler_data->irq_source = irq_source;
300
301         /* Lock the list, add the handler. */
302         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
303
304         switch (int_params->int_context) {
305         case INTERRUPT_HIGH_IRQ_CONTEXT:
306                 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
307                 break;
308         case INTERRUPT_LOW_IRQ_CONTEXT:
309         default:
310                 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
311                 break;
312         }
313
314         list_add_tail(&handler_data->hcd.list, hnd_list);
315
316         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
317
318         /* This pointer will be stored by code which requested interrupt
319          * registration.
320          * The same pointer will be needed in order to unregister the
321          * interrupt. */
322
323         DRM_DEBUG_KMS(
324                 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
325                 handler_data,
326                 irq_source,
327                 int_params->int_context);
328
329         return handler_data;
330 }
331
332 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
333                                         enum dc_irq_source irq_source,
334                                         void *ih)
335 {
336         struct list_head *handler_list;
337         struct dc_interrupt_params int_params;
338         int i;
339
340         if (false == validate_irq_unregistration_params(irq_source, ih))
341                 return;
342
343         memset(&int_params, 0, sizeof(int_params));
344
345         int_params.irq_source = irq_source;
346
347         for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
348
349                 int_params.int_context = i;
350
351                 handler_list = remove_irq_handler(adev, ih, &int_params);
352
353                 if (handler_list != NULL)
354                         break;
355         }
356
357         if (handler_list == NULL) {
358                 /* If we got here, it means we searched all irq contexts
359                  * for this irq source, but the handler was not found. */
360                 DRM_ERROR(
361                 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
362                         ih, irq_source);
363         }
364 }
365
366 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
367 {
368         int src;
369         struct irq_list_head *lh;
370
371         DRM_DEBUG_KMS("DM_IRQ\n");
372
373         spin_lock_init(&adev->dm.irq_handler_list_table_lock);
374
375         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
376                 /* low context handler list init */
377                 lh = &adev->dm.irq_handler_list_low_tab[src];
378                 INIT_LIST_HEAD(&lh->head);
379                 INIT_WORK(&lh->work, dm_irq_work_func);
380
381                 /* high context handler init */
382                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
383         }
384
385         INIT_LIST_HEAD(&adev->dm.timer_handler_list);
386
387         /* allocate and initialize the workqueue for DM timer */
388         adev->dm.timer_workqueue = create_singlethread_workqueue(
389                         "dm_timer_queue");
390         if (adev->dm.timer_workqueue == NULL) {
391                 DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
392                 return -1;
393         }
394
395         return 0;
396 }
397
398 /* DM IRQ and timer resource release */
399 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
400 {
401         int src;
402         struct irq_list_head *lh;
403         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
404
405         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
406
407                 /* The handler was removed from the table,
408                  * it means it is safe to flush all the 'work'
409                  * (because no code can schedule a new one). */
410                 lh = &adev->dm.irq_handler_list_low_tab[src];
411                 flush_work(&lh->work);
412         }
413
414         /* Cancel ALL timers and release handlers (if any). */
415         remove_timer_handler(adev, NULL);
416         /* Release the queue itself. */
417         destroy_workqueue(adev->dm.timer_workqueue);
418 }
419
420 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
421 {
422         int src;
423         struct list_head *hnd_list_h;
424         struct list_head *hnd_list_l;
425         unsigned long irq_table_flags;
426
427         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
428
429         DRM_DEBUG_KMS("DM_IRQ: suspend\n");
430
431         /**
432          * Disable HW interrupt  for HPD and HPDRX only since FLIP and VBLANK
433          * will be disabled from manage_dm_interrupts on disable CRTC.
434          */
435         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
436                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
437                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
438                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
439                         dc_interrupt_set(adev->dm.dc, src, false);
440
441                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
442                 flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
443
444                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
445         }
446
447         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
448         return 0;
449 }
450
451 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
452 {
453         int src;
454         struct list_head *hnd_list_h, *hnd_list_l;
455         unsigned long irq_table_flags;
456
457         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
458
459         DRM_DEBUG_KMS("DM_IRQ: early resume\n");
460
461         /* re-enable short pulse interrupts HW interrupt */
462         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
463                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
464                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
465                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
466                         dc_interrupt_set(adev->dm.dc, src, true);
467         }
468
469         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
470
471         return 0;
472 }
473
474 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
475 {
476         int src;
477         struct list_head *hnd_list_h, *hnd_list_l;
478         unsigned long irq_table_flags;
479
480         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
481
482         DRM_DEBUG_KMS("DM_IRQ: resume\n");
483
484         /**
485          * Renable HW interrupt  for HPD and only since FLIP and VBLANK
486          * will be enabled from manage_dm_interrupts on enable CRTC.
487          */
488         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
489                 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
490                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
491                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
492                         dc_interrupt_set(adev->dm.dc, src, true);
493         }
494
495         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
496         return 0;
497 }
498
499 /**
500  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
501  * "irq_source".
502  */
503 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
504                                         enum dc_irq_source irq_source)
505 {
506         unsigned long irq_table_flags;
507         struct work_struct *work = NULL;
508
509         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
510
511         if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
512                 work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
513
514         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
515
516         if (work) {
517                 if (!schedule_work(work))
518                         DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
519                                                 irq_source);
520         }
521
522 }
523
524 /** amdgpu_dm_irq_immediate_work
525  *  Callback high irq work immediately, don't send to work queue
526  */
527 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
528                                          enum dc_irq_source irq_source)
529 {
530         struct amdgpu_dm_irq_handler_data *handler_data;
531         struct list_head *entry;
532         unsigned long irq_table_flags;
533
534         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
535
536         list_for_each(
537                 entry,
538                 &adev->dm.irq_handler_list_high_tab[irq_source]) {
539
540                 handler_data =
541                         list_entry(
542                                 entry,
543                                 struct amdgpu_dm_irq_handler_data,
544                                 hcd.list);
545
546                 /* Call a subcomponent which registered for immediate
547                  * interrupt notification */
548                 handler_data->hcd.handler(handler_data->hcd.handler_arg);
549         }
550
551         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
552 }
553
554 /*
555  * amdgpu_dm_irq_handler
556  *
557  * Generic IRQ handler, calls all registered high irq work immediately, and
558  * schedules work for low irq
559  */
560 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
561                                  struct amdgpu_irq_src *source,
562                                  struct amdgpu_iv_entry *entry)
563 {
564
565         enum dc_irq_source src =
566                 dc_interrupt_to_irq_source(
567                         adev->dm.dc,
568                         entry->src_id,
569                         entry->src_data[0]);
570
571         dc_interrupt_ack(adev->dm.dc, src);
572
573         /* Call high irq work immediately */
574         amdgpu_dm_irq_immediate_work(adev, src);
575         /*Schedule low_irq work */
576         amdgpu_dm_irq_schedule_work(adev, src);
577
578         return 0;
579 }
580
581 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
582 {
583         switch (type) {
584         case AMDGPU_HPD_1:
585                 return DC_IRQ_SOURCE_HPD1;
586         case AMDGPU_HPD_2:
587                 return DC_IRQ_SOURCE_HPD2;
588         case AMDGPU_HPD_3:
589                 return DC_IRQ_SOURCE_HPD3;
590         case AMDGPU_HPD_4:
591                 return DC_IRQ_SOURCE_HPD4;
592         case AMDGPU_HPD_5:
593                 return DC_IRQ_SOURCE_HPD5;
594         case AMDGPU_HPD_6:
595                 return DC_IRQ_SOURCE_HPD6;
596         default:
597                 return DC_IRQ_SOURCE_INVALID;
598         }
599 }
600
601 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
602                                        struct amdgpu_irq_src *source,
603                                        unsigned type,
604                                        enum amdgpu_interrupt_state state)
605 {
606         enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
607         bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
608
609         dc_interrupt_set(adev->dm.dc, src, st);
610         return 0;
611 }
612
613 static inline int dm_irq_state(struct amdgpu_device *adev,
614                                struct amdgpu_irq_src *source,
615                                unsigned crtc_id,
616                                enum amdgpu_interrupt_state state,
617                                const enum irq_type dal_irq_type,
618                                const char *func)
619 {
620         bool st;
621         enum dc_irq_source irq_source;
622
623         struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
624
625         if (!acrtc) {
626                 DRM_ERROR(
627                         "%s: crtc is NULL at id :%d\n",
628                         func,
629                         crtc_id);
630                 return 0;
631         }
632
633         irq_source = dal_irq_type + acrtc->otg_inst;
634
635         st = (state == AMDGPU_IRQ_STATE_ENABLE);
636
637         dc_interrupt_set(adev->dm.dc, irq_source, st);
638         return 0;
639 }
640
641 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
642                                          struct amdgpu_irq_src *source,
643                                          unsigned crtc_id,
644                                          enum amdgpu_interrupt_state state)
645 {
646         return dm_irq_state(
647                 adev,
648                 source,
649                 crtc_id,
650                 state,
651                 IRQ_TYPE_PFLIP,
652                 __func__);
653 }
654
655 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
656                                         struct amdgpu_irq_src *source,
657                                         unsigned crtc_id,
658                                         enum amdgpu_interrupt_state state)
659 {
660         return dm_irq_state(
661                 adev,
662                 source,
663                 crtc_id,
664                 state,
665                 IRQ_TYPE_VBLANK,
666                 __func__);
667 }
668
669 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
670         .set = amdgpu_dm_set_crtc_irq_state,
671         .process = amdgpu_dm_irq_handler,
672 };
673
674 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
675         .set = amdgpu_dm_set_pflip_irq_state,
676         .process = amdgpu_dm_irq_handler,
677 };
678
679 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
680         .set = amdgpu_dm_set_hpd_irq_state,
681         .process = amdgpu_dm_irq_handler,
682 };
683
684 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
685 {
686         if (adev->mode_info.num_crtc > 0)
687                 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
688         else
689                 adev->crtc_irq.num_types = 0;
690         adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
691
692         adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
693         adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
694
695         adev->hpd_irq.num_types = adev->mode_info.num_hpd;
696         adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
697 }
698
699 /*
700  * amdgpu_dm_hpd_init - hpd setup callback.
701  *
702  * @adev: amdgpu_device pointer
703  *
704  * Setup the hpd pins used by the card (evergreen+).
705  * Enable the pin, set the polarity, and enable the hpd interrupts.
706  */
707 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
708 {
709         struct drm_device *dev = adev->ddev;
710         struct drm_connector *connector;
711
712         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
713                 struct amdgpu_dm_connector *amdgpu_dm_connector =
714                                 to_amdgpu_dm_connector(connector);
715
716                 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
717
718                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
719                         dc_interrupt_set(adev->dm.dc,
720                                         dc_link->irq_source_hpd,
721                                         true);
722                 }
723
724                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
725                         dc_interrupt_set(adev->dm.dc,
726                                         dc_link->irq_source_hpd_rx,
727                                         true);
728                 }
729         }
730 }
731
732 /**
733  * amdgpu_dm_hpd_fini - hpd tear down callback.
734  *
735  * @adev: amdgpu_device pointer
736  *
737  * Tear down the hpd pins used by the card (evergreen+).
738  * Disable the hpd interrupts.
739  */
740 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
741 {
742         struct drm_device *dev = adev->ddev;
743         struct drm_connector *connector;
744
745         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
746                 struct amdgpu_dm_connector *amdgpu_dm_connector =
747                                 to_amdgpu_dm_connector(connector);
748                 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
749
750                 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
751
752                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
753                         dc_interrupt_set(adev->dm.dc,
754                                         dc_link->irq_source_hpd_rx,
755                                         false);
756                 }
757         }
758 }