95042879bec4a363b6453b6b379de2fc72ed7d4c
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33
34 #include <drm/drm_drv.h>
35
36 #include "display/intel_display_irq.h"
37 #include "display/intel_hotplug.h"
38 #include "display/intel_hotplug_irq.h"
39 #include "display/intel_lpe_audio.h"
40 #include "display/intel_psr_regs.h"
41
42 #include "gt/intel_breadcrumbs.h"
43 #include "gt/intel_gt.h"
44 #include "gt/intel_gt_irq.h"
45 #include "gt/intel_gt_pm_irq.h"
46 #include "gt/intel_gt_regs.h"
47 #include "gt/intel_rps.h"
48
49 #include "i915_driver.h"
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_reg.h"
53
54 /**
55  * DOC: interrupt handling
56  *
57  * These functions provide the basic support for enabling and disabling the
58  * interrupt handling support. There's a lot more functionality in i915_irq.c
59  * and related files, but that will be described in separate chapters.
60  */
61
62 /*
63  * Interrupt statistic for PMU. Increments the counter only if the
64  * interrupt originated from the GPU so interrupts from a device which
65  * shares the interrupt line are not accounted.
66  */
67 static inline void pmu_irq_stats(struct drm_i915_private *i915,
68                                  irqreturn_t res)
69 {
70         if (unlikely(res != IRQ_HANDLED))
71                 return;
72
73         /*
74          * A clever compiler translates that into INC. A not so clever one
75          * should at least prevent store tearing.
76          */
77         WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
78 }
79
80 void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
81 {
82         intel_uncore_write(uncore, regs.imr, 0xffffffff);
83         intel_uncore_posting_read(uncore, regs.imr);
84
85         intel_uncore_write(uncore, regs.ier, 0);
86
87         /* IIR can theoretically queue up two events. Be paranoid. */
88         intel_uncore_write(uncore, regs.iir, 0xffffffff);
89         intel_uncore_posting_read(uncore, regs.iir);
90         intel_uncore_write(uncore, regs.iir, 0xffffffff);
91         intel_uncore_posting_read(uncore, regs.iir);
92 }
93
94 /*
95  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
96  */
97 void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
98 {
99         u32 val = intel_uncore_read(uncore, reg);
100
101         if (val == 0)
102                 return;
103
104         drm_WARN(&uncore->i915->drm, 1,
105                  "Interrupt register 0x%x is not zero: 0x%08x\n",
106                  i915_mmio_reg_offset(reg), val);
107         intel_uncore_write(uncore, reg, 0xffffffff);
108         intel_uncore_posting_read(uncore, reg);
109         intel_uncore_write(uncore, reg, 0xffffffff);
110         intel_uncore_posting_read(uncore, reg);
111 }
112
113 void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
114                    u32 imr_val, u32 ier_val)
115 {
116         gen2_assert_iir_is_zero(uncore, regs.iir);
117
118         intel_uncore_write(uncore, regs.ier, ier_val);
119         intel_uncore_write(uncore, regs.imr, imr_val);
120         intel_uncore_posting_read(uncore, regs.imr);
121 }
122
123 void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
124 {
125         intel_uncore_write(uncore, regs.emr, 0xffffffff);
126         intel_uncore_posting_read(uncore, regs.emr);
127
128         intel_uncore_write(uncore, regs.eir, 0xffffffff);
129         intel_uncore_posting_read(uncore, regs.eir);
130         intel_uncore_write(uncore, regs.eir, 0xffffffff);
131         intel_uncore_posting_read(uncore, regs.eir);
132 }
133
134 void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
135                      u32 emr_val)
136 {
137         intel_uncore_write(uncore, regs.eir, 0xffffffff);
138         intel_uncore_posting_read(uncore, regs.eir);
139         intel_uncore_write(uncore, regs.eir, 0xffffffff);
140         intel_uncore_posting_read(uncore, regs.eir);
141
142         intel_uncore_write(uncore, regs.emr, emr_val);
143         intel_uncore_posting_read(uncore, regs.emr);
144 }
145
146 /**
147  * ivb_parity_work - Workqueue called when a parity error interrupt
148  * occurred.
149  * @work: workqueue struct
150  *
151  * Doesn't actually do anything except notify userspace. As a consequence of
152  * this event, userspace should try to remap the bad rows since statistically
153  * it is likely the same row is more likely to go bad again.
154  */
155 static void ivb_parity_work(struct work_struct *work)
156 {
157         struct drm_i915_private *dev_priv =
158                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
159         struct intel_gt *gt = to_gt(dev_priv);
160         u32 error_status, row, bank, subbank;
161         char *parity_event[6];
162         u32 misccpctl;
163         u8 slice = 0;
164
165         /* We must turn off DOP level clock gating to access the L3 registers.
166          * In order to prevent a get/put style interface, acquire struct mutex
167          * any time we access those registers.
168          */
169         mutex_lock(&dev_priv->drm.struct_mutex);
170
171         /* If we've screwed up tracking, just let the interrupt fire again */
172         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
173                 goto out;
174
175         misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
176                                      GEN7_DOP_CLOCK_GATE_ENABLE, 0);
177         intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
178
179         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
180                 i915_reg_t reg;
181
182                 slice--;
183                 if (drm_WARN_ON_ONCE(&dev_priv->drm,
184                                      slice >= NUM_L3_SLICES(dev_priv)))
185                         break;
186
187                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
188
189                 reg = GEN7_L3CDERRST1(slice);
190
191                 error_status = intel_uncore_read(&dev_priv->uncore, reg);
192                 row = GEN7_PARITY_ERROR_ROW(error_status);
193                 bank = GEN7_PARITY_ERROR_BANK(error_status);
194                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
195
196                 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
197                 intel_uncore_posting_read(&dev_priv->uncore, reg);
198
199                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
200                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
201                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
202                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
203                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
204                 parity_event[5] = NULL;
205
206                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
207                                    KOBJ_CHANGE, parity_event);
208
209                 drm_dbg(&dev_priv->drm,
210                         "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
211                         slice, row, bank, subbank);
212
213                 kfree(parity_event[4]);
214                 kfree(parity_event[3]);
215                 kfree(parity_event[2]);
216                 kfree(parity_event[1]);
217         }
218
219         intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
220
221 out:
222         drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
223         spin_lock_irq(gt->irq_lock);
224         gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
225         spin_unlock_irq(gt->irq_lock);
226
227         mutex_unlock(&dev_priv->drm.struct_mutex);
228 }
229
230 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
231 {
232         struct drm_i915_private *dev_priv = arg;
233         struct intel_display *display = &dev_priv->display;
234         irqreturn_t ret = IRQ_NONE;
235
236         if (!intel_irqs_enabled(dev_priv))
237                 return IRQ_NONE;
238
239         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
240         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
241
242         do {
243                 u32 iir, gt_iir, pm_iir;
244                 u32 eir = 0, dpinvgtt = 0;
245                 u32 pipe_stats[I915_MAX_PIPES] = {};
246                 u32 hotplug_status = 0;
247                 u32 ier = 0;
248
249                 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
250                 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
251                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
252
253                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
254                         break;
255
256                 ret = IRQ_HANDLED;
257
258                 /*
259                  * Theory on interrupt generation, based on empirical evidence:
260                  *
261                  * x = ((VLV_IIR & VLV_IER) ||
262                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
263                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
264                  *
265                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
266                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
267                  * guarantee the CPU interrupt will be raised again even if we
268                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
269                  * bits this time around.
270                  */
271                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
272                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
273
274                 if (gt_iir)
275                         intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
276                 if (pm_iir)
277                         intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
278
279                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
280                         hotplug_status = i9xx_hpd_irq_ack(display);
281
282                 if (iir & I915_MASTER_ERROR_INTERRUPT)
283                         vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
284
285                 /* Call regardless, as some status bits might not be
286                  * signalled in IIR */
287                 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
288
289                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
290                            I915_LPE_PIPE_B_INTERRUPT))
291                         intel_lpe_audio_irq_handler(display);
292
293                 /*
294                  * VLV_IIR is single buffered, and reflects the level
295                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
296                  */
297                 if (iir)
298                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
299
300                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
301                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
302
303                 if (gt_iir)
304                         gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
305                 if (pm_iir)
306                         gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
307
308                 if (hotplug_status)
309                         i9xx_hpd_irq_handler(display, hotplug_status);
310
311                 if (iir & I915_MASTER_ERROR_INTERRUPT)
312                         vlv_display_error_irq_handler(display, eir, dpinvgtt);
313
314                 valleyview_pipestat_irq_handler(display, pipe_stats);
315         } while (0);
316
317         pmu_irq_stats(dev_priv, ret);
318
319         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
320
321         return ret;
322 }
323
324 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
325 {
326         struct drm_i915_private *dev_priv = arg;
327         struct intel_display *display = &dev_priv->display;
328         irqreturn_t ret = IRQ_NONE;
329
330         if (!intel_irqs_enabled(dev_priv))
331                 return IRQ_NONE;
332
333         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
334         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
335
336         do {
337                 u32 master_ctl, iir;
338                 u32 eir = 0, dpinvgtt = 0;
339                 u32 pipe_stats[I915_MAX_PIPES] = {};
340                 u32 hotplug_status = 0;
341                 u32 ier = 0;
342
343                 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
344                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
345
346                 if (master_ctl == 0 && iir == 0)
347                         break;
348
349                 ret = IRQ_HANDLED;
350
351                 /*
352                  * Theory on interrupt generation, based on empirical evidence:
353                  *
354                  * x = ((VLV_IIR & VLV_IER) ||
355                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
356                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
357                  *
358                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
359                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
360                  * guarantee the CPU interrupt will be raised again even if we
361                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
362                  * bits this time around.
363                  */
364                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
365                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
366
367                 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
368
369                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
370                         hotplug_status = i9xx_hpd_irq_ack(display);
371
372                 if (iir & I915_MASTER_ERROR_INTERRUPT)
373                         vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
374
375                 /* Call regardless, as some status bits might not be
376                  * signalled in IIR */
377                 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
378
379                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
380                            I915_LPE_PIPE_B_INTERRUPT |
381                            I915_LPE_PIPE_C_INTERRUPT))
382                         intel_lpe_audio_irq_handler(display);
383
384                 /*
385                  * VLV_IIR is single buffered, and reflects the level
386                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
387                  */
388                 if (iir)
389                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
390
391                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
392                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
393
394                 if (hotplug_status)
395                         i9xx_hpd_irq_handler(display, hotplug_status);
396
397                 if (iir & I915_MASTER_ERROR_INTERRUPT)
398                         vlv_display_error_irq_handler(display, eir, dpinvgtt);
399
400                 valleyview_pipestat_irq_handler(display, pipe_stats);
401         } while (0);
402
403         pmu_irq_stats(dev_priv, ret);
404
405         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
406
407         return ret;
408 }
409
410 /*
411  * To handle irqs with the minimum potential races with fresh interrupts, we:
412  * 1 - Disable Master Interrupt Control.
413  * 2 - Find the source(s) of the interrupt.
414  * 3 - Clear the Interrupt Identity bits (IIR).
415  * 4 - Process the interrupt(s) that had bits set in the IIRs.
416  * 5 - Re-enable Master Interrupt Control.
417  */
418 static irqreturn_t ilk_irq_handler(int irq, void *arg)
419 {
420         struct drm_i915_private *i915 = arg;
421         struct intel_display *display = &i915->display;
422         void __iomem * const regs = intel_uncore_regs(&i915->uncore);
423         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
424         irqreturn_t ret = IRQ_NONE;
425
426         if (unlikely(!intel_irqs_enabled(i915)))
427                 return IRQ_NONE;
428
429         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
430         disable_rpm_wakeref_asserts(&i915->runtime_pm);
431
432         /* disable master interrupt before clearing iir  */
433         de_ier = raw_reg_read(regs, DEIER);
434         raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
435
436         /* Disable south interrupts. We'll only write to SDEIIR once, so further
437          * interrupts will will be stored on its back queue, and then we'll be
438          * able to process them after we restore SDEIER (as soon as we restore
439          * it, we'll get an interrupt if SDEIIR still has something to process
440          * due to its back queue). */
441         if (!HAS_PCH_NOP(i915)) {
442                 sde_ier = raw_reg_read(regs, SDEIER);
443                 raw_reg_write(regs, SDEIER, 0);
444         }
445
446         /* Find, clear, then process each source of interrupt */
447
448         gt_iir = raw_reg_read(regs, GTIIR);
449         if (gt_iir) {
450                 raw_reg_write(regs, GTIIR, gt_iir);
451                 if (GRAPHICS_VER(i915) >= 6)
452                         gen6_gt_irq_handler(to_gt(i915), gt_iir);
453                 else
454                         gen5_gt_irq_handler(to_gt(i915), gt_iir);
455                 ret = IRQ_HANDLED;
456         }
457
458         de_iir = raw_reg_read(regs, DEIIR);
459         if (de_iir) {
460                 raw_reg_write(regs, DEIIR, de_iir);
461                 if (DISPLAY_VER(i915) >= 7)
462                         ivb_display_irq_handler(display, de_iir);
463                 else
464                         ilk_display_irq_handler(display, de_iir);
465                 ret = IRQ_HANDLED;
466         }
467
468         if (GRAPHICS_VER(i915) >= 6) {
469                 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
470                 if (pm_iir) {
471                         raw_reg_write(regs, GEN6_PMIIR, pm_iir);
472                         gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
473                         ret = IRQ_HANDLED;
474                 }
475         }
476
477         raw_reg_write(regs, DEIER, de_ier);
478         if (sde_ier)
479                 raw_reg_write(regs, SDEIER, sde_ier);
480
481         pmu_irq_stats(i915, ret);
482
483         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
484         enable_rpm_wakeref_asserts(&i915->runtime_pm);
485
486         return ret;
487 }
488
489 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
490 {
491         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
492
493         /*
494          * Now with master disabled, get a sample of level indications
495          * for this interrupt. Indications will be cleared on related acks.
496          * New indications can and will light up during processing,
497          * and will generate new interrupt after enabling master.
498          */
499         return raw_reg_read(regs, GEN8_MASTER_IRQ);
500 }
501
502 static inline void gen8_master_intr_enable(void __iomem * const regs)
503 {
504         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
505 }
506
507 static irqreturn_t gen8_irq_handler(int irq, void *arg)
508 {
509         struct drm_i915_private *dev_priv = arg;
510         struct intel_display *display = &dev_priv->display;
511         void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
512         u32 master_ctl;
513
514         if (!intel_irqs_enabled(dev_priv))
515                 return IRQ_NONE;
516
517         master_ctl = gen8_master_intr_disable(regs);
518         if (!master_ctl) {
519                 gen8_master_intr_enable(regs);
520                 return IRQ_NONE;
521         }
522
523         /* Find, queue (onto bottom-halves), then clear each source */
524         gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
525
526         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
527         if (master_ctl & ~GEN8_GT_IRQS) {
528                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
529                 gen8_de_irq_handler(display, master_ctl);
530                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
531         }
532
533         gen8_master_intr_enable(regs);
534
535         pmu_irq_stats(dev_priv, IRQ_HANDLED);
536
537         return IRQ_HANDLED;
538 }
539
540 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
541 {
542         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
543
544         /*
545          * Now with master disabled, get a sample of level indications
546          * for this interrupt. Indications will be cleared on related acks.
547          * New indications can and will light up during processing,
548          * and will generate new interrupt after enabling master.
549          */
550         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
551 }
552
553 static inline void gen11_master_intr_enable(void __iomem * const regs)
554 {
555         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
556 }
557
558 static irqreturn_t gen11_irq_handler(int irq, void *arg)
559 {
560         struct drm_i915_private *i915 = arg;
561         struct intel_display *display = &i915->display;
562         void __iomem * const regs = intel_uncore_regs(&i915->uncore);
563         struct intel_gt *gt = to_gt(i915);
564         u32 master_ctl;
565         u32 gu_misc_iir;
566
567         if (!intel_irqs_enabled(i915))
568                 return IRQ_NONE;
569
570         master_ctl = gen11_master_intr_disable(regs);
571         if (!master_ctl) {
572                 gen11_master_intr_enable(regs);
573                 return IRQ_NONE;
574         }
575
576         /* Find, queue (onto bottom-halves), then clear each source */
577         gen11_gt_irq_handler(gt, master_ctl);
578
579         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
580         if (master_ctl & GEN11_DISPLAY_IRQ)
581                 gen11_display_irq_handler(display);
582
583         gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
584
585         gen11_master_intr_enable(regs);
586
587         gen11_gu_misc_irq_handler(display, gu_misc_iir);
588
589         pmu_irq_stats(i915, IRQ_HANDLED);
590
591         return IRQ_HANDLED;
592 }
593
594 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
595 {
596         u32 val;
597
598         /* First disable interrupts */
599         raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
600
601         /* Get the indication levels and ack the master unit */
602         val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
603         if (unlikely(!val))
604                 return 0;
605
606         raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
607
608         return val;
609 }
610
611 static inline void dg1_master_intr_enable(void __iomem * const regs)
612 {
613         raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
614 }
615
616 static irqreturn_t dg1_irq_handler(int irq, void *arg)
617 {
618         struct drm_i915_private * const i915 = arg;
619         struct intel_display *display = &i915->display;
620         struct intel_gt *gt = to_gt(i915);
621         void __iomem * const regs = intel_uncore_regs(gt->uncore);
622         u32 master_tile_ctl, master_ctl;
623         u32 gu_misc_iir;
624
625         if (!intel_irqs_enabled(i915))
626                 return IRQ_NONE;
627
628         master_tile_ctl = dg1_master_intr_disable(regs);
629         if (!master_tile_ctl) {
630                 dg1_master_intr_enable(regs);
631                 return IRQ_NONE;
632         }
633
634         /* FIXME: we only support tile 0 for now. */
635         if (master_tile_ctl & DG1_MSTR_TILE(0)) {
636                 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
637                 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
638         } else {
639                 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
640                         master_tile_ctl);
641                 dg1_master_intr_enable(regs);
642                 return IRQ_NONE;
643         }
644
645         gen11_gt_irq_handler(gt, master_ctl);
646
647         if (master_ctl & GEN11_DISPLAY_IRQ)
648                 gen11_display_irq_handler(display);
649
650         gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
651
652         dg1_master_intr_enable(regs);
653
654         gen11_gu_misc_irq_handler(display, gu_misc_iir);
655
656         pmu_irq_stats(i915, IRQ_HANDLED);
657
658         return IRQ_HANDLED;
659 }
660
661 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
662 {
663         struct intel_display *display = &dev_priv->display;
664         struct intel_uncore *uncore = &dev_priv->uncore;
665
666         gen2_irq_reset(uncore, DE_IRQ_REGS);
667         dev_priv->irq_mask = ~0u;
668
669         if (GRAPHICS_VER(dev_priv) == 7)
670                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
671
672         if (IS_HASWELL(dev_priv)) {
673                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
674                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
675         }
676
677         gen5_gt_irq_reset(to_gt(dev_priv));
678
679         ibx_display_irq_reset(display);
680 }
681
682 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
683 {
684         struct intel_display *display = &dev_priv->display;
685
686         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
687         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
688
689         gen5_gt_irq_reset(to_gt(dev_priv));
690
691         vlv_display_irq_reset(display);
692 }
693
694 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
695 {
696         struct intel_display *display = &dev_priv->display;
697         struct intel_uncore *uncore = &dev_priv->uncore;
698
699         gen8_master_intr_disable(intel_uncore_regs(uncore));
700
701         gen8_gt_irq_reset(to_gt(dev_priv));
702         gen8_display_irq_reset(display);
703         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
704 }
705
706 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
707 {
708         struct intel_display *display = &dev_priv->display;
709         struct intel_gt *gt = to_gt(dev_priv);
710         struct intel_uncore *uncore = gt->uncore;
711
712         gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
713
714         gen11_gt_irq_reset(gt);
715         gen11_display_irq_reset(display);
716
717         gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
718         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
719 }
720
721 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
722 {
723         struct intel_display *display = &dev_priv->display;
724         struct intel_uncore *uncore = &dev_priv->uncore;
725         struct intel_gt *gt;
726         unsigned int i;
727
728         dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
729
730         for_each_gt(gt, dev_priv, i)
731                 gen11_gt_irq_reset(gt);
732
733         gen11_display_irq_reset(display);
734
735         gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
736         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
737
738         intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0);
739 }
740
741 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
742 {
743         struct intel_display *display = &dev_priv->display;
744         struct intel_uncore *uncore = &dev_priv->uncore;
745
746         intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
747         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
748
749         gen8_gt_irq_reset(to_gt(dev_priv));
750
751         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
752
753         vlv_display_irq_reset(display);
754 }
755
756 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
757 {
758         struct intel_display *display = &dev_priv->display;
759
760         gen5_gt_irq_postinstall(to_gt(dev_priv));
761
762         ilk_de_irq_postinstall(display);
763 }
764
765 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
766 {
767         struct intel_display *display = &dev_priv->display;
768
769         gen5_gt_irq_postinstall(to_gt(dev_priv));
770
771         vlv_display_irq_postinstall(display);
772
773         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
774         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
775 }
776
777 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
778 {
779         struct intel_display *display = &dev_priv->display;
780
781         gen8_gt_irq_postinstall(to_gt(dev_priv));
782         gen8_de_irq_postinstall(display);
783
784         gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
785 }
786
787 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
788 {
789         struct intel_display *display = &dev_priv->display;
790         struct intel_gt *gt = to_gt(dev_priv);
791         struct intel_uncore *uncore = gt->uncore;
792         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
793
794         gen11_gt_irq_postinstall(gt);
795         gen11_de_irq_postinstall(display);
796
797         gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
798
799         gen11_master_intr_enable(intel_uncore_regs(uncore));
800         intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
801 }
802
803 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
804 {
805         struct intel_display *display = &dev_priv->display;
806         struct intel_uncore *uncore = &dev_priv->uncore;
807         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
808         struct intel_gt *gt;
809         unsigned int i;
810
811         for_each_gt(gt, dev_priv, i)
812                 gen11_gt_irq_postinstall(gt);
813
814         gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
815
816         dg1_de_irq_postinstall(display);
817
818         dg1_master_intr_enable(intel_uncore_regs(uncore));
819         intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
820 }
821
822 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
823 {
824         struct intel_display *display = &dev_priv->display;
825
826         gen8_gt_irq_postinstall(to_gt(dev_priv));
827
828         vlv_display_irq_postinstall(display);
829
830         intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
831         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
832 }
833
834 static u32 i9xx_error_mask(struct drm_i915_private *i915)
835 {
836         /*
837          * On gen2/3 FBC generates (seemingly spurious)
838          * display INVALID_GTT/INVALID_GTT_PTE table errors.
839          *
840          * Also gen3 bspec has this to say:
841          * "DISPA_INVALID_GTT_PTE
842          "  [DevNapa] : Reserved. This bit does not reflect the page
843          "              table error for the display plane A."
844          *
845          * Unfortunately we can't mask off individual PGTBL_ER bits,
846          * so we just have to mask off all page table errors via EMR.
847          */
848         if (HAS_FBC(i915))
849                 return I915_ERROR_MEMORY_REFRESH;
850         else
851                 return I915_ERROR_PAGE_TABLE |
852                         I915_ERROR_MEMORY_REFRESH;
853 }
854
855 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
856                                u32 *eir, u32 *eir_stuck)
857 {
858         u32 emr;
859
860         *eir = intel_uncore_read(&dev_priv->uncore, EIR);
861         intel_uncore_write(&dev_priv->uncore, EIR, *eir);
862
863         *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
864         if (*eir_stuck == 0)
865                 return;
866
867         /*
868          * Toggle all EMR bits to make sure we get an edge
869          * in the ISR master error bit if we don't clear
870          * all the EIR bits. Otherwise the edge triggered
871          * IIR on i965/g4x wouldn't notice that an interrupt
872          * is still pending. Also some EIR bits can't be
873          * cleared except by handling the underlying error
874          * (or by a GPU reset) so we mask any bit that
875          * remains set.
876          */
877         emr = intel_uncore_read(&dev_priv->uncore, EMR);
878         intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
879         intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
880 }
881
882 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
883                                    u32 eir, u32 eir_stuck)
884 {
885         drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
886
887         if (eir_stuck)
888                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
889                         eir_stuck);
890
891         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
892                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
893 }
894
895 static void i915_irq_reset(struct drm_i915_private *dev_priv)
896 {
897         struct intel_display *display = &dev_priv->display;
898         struct intel_uncore *uncore = &dev_priv->uncore;
899
900         i9xx_display_irq_reset(display);
901
902         gen2_error_reset(uncore, GEN2_ERROR_REGS);
903         gen2_irq_reset(uncore, GEN2_IRQ_REGS);
904         dev_priv->irq_mask = ~0u;
905 }
906
907 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
908 {
909         struct intel_display *display = &dev_priv->display;
910         struct intel_uncore *uncore = &dev_priv->uncore;
911         u32 enable_mask;
912
913         gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
914
915         dev_priv->irq_mask =
916                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
917                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
918                   I915_MASTER_ERROR_INTERRUPT);
919
920         enable_mask =
921                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
922                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
923                 I915_MASTER_ERROR_INTERRUPT |
924                 I915_USER_INTERRUPT;
925
926         if (DISPLAY_VER(dev_priv) >= 3) {
927                 dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
928                 enable_mask |= I915_ASLE_INTERRUPT;
929         }
930
931         if (HAS_HOTPLUG(dev_priv)) {
932                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
933                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
934         }
935
936         gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
937
938         i915_display_irq_postinstall(display);
939 }
940
941 static irqreturn_t i915_irq_handler(int irq, void *arg)
942 {
943         struct drm_i915_private *dev_priv = arg;
944         struct intel_display *display = &dev_priv->display;
945         irqreturn_t ret = IRQ_NONE;
946
947         if (!intel_irqs_enabled(dev_priv))
948                 return IRQ_NONE;
949
950         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
951         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
952
953         do {
954                 u32 pipe_stats[I915_MAX_PIPES] = {};
955                 u32 eir = 0, eir_stuck = 0;
956                 u32 hotplug_status = 0;
957                 u32 iir;
958
959                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
960                 if (iir == 0)
961                         break;
962
963                 ret = IRQ_HANDLED;
964
965                 if (HAS_HOTPLUG(dev_priv) &&
966                     iir & I915_DISPLAY_PORT_INTERRUPT)
967                         hotplug_status = i9xx_hpd_irq_ack(display);
968
969                 /* Call regardless, as some status bits might not be
970                  * signalled in IIR */
971                 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
972
973                 if (iir & I915_MASTER_ERROR_INTERRUPT)
974                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
975
976                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
977
978                 if (iir & I915_USER_INTERRUPT)
979                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
980
981                 if (iir & I915_MASTER_ERROR_INTERRUPT)
982                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
983
984                 if (hotplug_status)
985                         i9xx_hpd_irq_handler(display, hotplug_status);
986
987                 i915_pipestat_irq_handler(display, iir, pipe_stats);
988         } while (0);
989
990         pmu_irq_stats(dev_priv, ret);
991
992         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
993
994         return ret;
995 }
996
997 static void i965_irq_reset(struct drm_i915_private *dev_priv)
998 {
999         struct intel_display *display = &dev_priv->display;
1000         struct intel_uncore *uncore = &dev_priv->uncore;
1001
1002         i9xx_display_irq_reset(display);
1003
1004         gen2_error_reset(uncore, GEN2_ERROR_REGS);
1005         gen2_irq_reset(uncore, GEN2_IRQ_REGS);
1006         dev_priv->irq_mask = ~0u;
1007 }
1008
1009 static u32 i965_error_mask(struct drm_i915_private *i915)
1010 {
1011         /*
1012          * Enable some error detection, note the instruction error mask
1013          * bit is reserved, so we leave it masked.
1014          *
1015          * i965 FBC no longer generates spurious GTT errors,
1016          * so we can always enable the page table errors.
1017          */
1018         if (IS_G4X(i915))
1019                 return GM45_ERROR_PAGE_TABLE |
1020                         GM45_ERROR_MEM_PRIV |
1021                         GM45_ERROR_CP_PRIV |
1022                         I915_ERROR_MEMORY_REFRESH;
1023         else
1024                 return I915_ERROR_PAGE_TABLE |
1025                         I915_ERROR_MEMORY_REFRESH;
1026 }
1027
1028 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
1029 {
1030         struct intel_display *display = &dev_priv->display;
1031         struct intel_uncore *uncore = &dev_priv->uncore;
1032         u32 enable_mask;
1033
1034         gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
1035
1036         dev_priv->irq_mask =
1037                 ~(I915_ASLE_INTERRUPT |
1038                   I915_DISPLAY_PORT_INTERRUPT |
1039                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1040                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1041                   I915_MASTER_ERROR_INTERRUPT);
1042
1043         enable_mask =
1044                 I915_ASLE_INTERRUPT |
1045                 I915_DISPLAY_PORT_INTERRUPT |
1046                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1047                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1048                 I915_MASTER_ERROR_INTERRUPT |
1049                 I915_USER_INTERRUPT;
1050
1051         if (IS_G4X(dev_priv))
1052                 enable_mask |= I915_BSD_USER_INTERRUPT;
1053
1054         gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1055
1056         i965_display_irq_postinstall(display);
1057 }
1058
1059 static irqreturn_t i965_irq_handler(int irq, void *arg)
1060 {
1061         struct drm_i915_private *dev_priv = arg;
1062         struct intel_display *display = &dev_priv->display;
1063         irqreturn_t ret = IRQ_NONE;
1064
1065         if (!intel_irqs_enabled(dev_priv))
1066                 return IRQ_NONE;
1067
1068         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1069         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1070
1071         do {
1072                 u32 pipe_stats[I915_MAX_PIPES] = {};
1073                 u32 eir = 0, eir_stuck = 0;
1074                 u32 hotplug_status = 0;
1075                 u32 iir;
1076
1077                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1078                 if (iir == 0)
1079                         break;
1080
1081                 ret = IRQ_HANDLED;
1082
1083                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1084                         hotplug_status = i9xx_hpd_irq_ack(display);
1085
1086                 /* Call regardless, as some status bits might not be
1087                  * signalled in IIR */
1088                 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
1089
1090                 if (iir & I915_MASTER_ERROR_INTERRUPT)
1091                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
1092
1093                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1094
1095                 if (iir & I915_USER_INTERRUPT)
1096                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
1097                                             iir);
1098
1099                 if (iir & I915_BSD_USER_INTERRUPT)
1100                         intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
1101                                             iir >> 25);
1102
1103                 if (iir & I915_MASTER_ERROR_INTERRUPT)
1104                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1105
1106                 if (hotplug_status)
1107                         i9xx_hpd_irq_handler(display, hotplug_status);
1108
1109                 i965_pipestat_irq_handler(display, iir, pipe_stats);
1110         } while (0);
1111
1112         pmu_irq_stats(dev_priv, IRQ_HANDLED);
1113
1114         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1115
1116         return ret;
1117 }
1118
1119 /**
1120  * intel_irq_init - initializes irq support
1121  * @dev_priv: i915 device instance
1122  *
1123  * This function initializes all the irq support including work items, timers
1124  * and all the vtables. It does not setup the interrupt itself though.
1125  */
1126 void intel_irq_init(struct drm_i915_private *dev_priv)
1127 {
1128         int i;
1129
1130         INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1131         for (i = 0; i < MAX_L3_SLICES; ++i)
1132                 dev_priv->l3_parity.remap_info[i] = NULL;
1133
1134         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1135         if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
1136                 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
1137 }
1138
1139 /**
1140  * intel_irq_fini - deinitializes IRQ support
1141  * @i915: i915 device instance
1142  *
1143  * This function deinitializes all the IRQ support.
1144  */
1145 void intel_irq_fini(struct drm_i915_private *i915)
1146 {
1147         int i;
1148
1149         for (i = 0; i < MAX_L3_SLICES; ++i)
1150                 kfree(i915->l3_parity.remap_info[i]);
1151 }
1152
1153 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1154 {
1155         if (HAS_GMCH(dev_priv)) {
1156                 if (IS_CHERRYVIEW(dev_priv))
1157                         return cherryview_irq_handler;
1158                 else if (IS_VALLEYVIEW(dev_priv))
1159                         return valleyview_irq_handler;
1160                 else if (GRAPHICS_VER(dev_priv) == 4)
1161                         return i965_irq_handler;
1162                 else
1163                         return i915_irq_handler;
1164         } else {
1165                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1166                         return dg1_irq_handler;
1167                 else if (GRAPHICS_VER(dev_priv) >= 11)
1168                         return gen11_irq_handler;
1169                 else if (GRAPHICS_VER(dev_priv) >= 8)
1170                         return gen8_irq_handler;
1171                 else
1172                         return ilk_irq_handler;
1173         }
1174 }
1175
1176 static void intel_irq_reset(struct drm_i915_private *dev_priv)
1177 {
1178         if (HAS_GMCH(dev_priv)) {
1179                 if (IS_CHERRYVIEW(dev_priv))
1180                         cherryview_irq_reset(dev_priv);
1181                 else if (IS_VALLEYVIEW(dev_priv))
1182                         valleyview_irq_reset(dev_priv);
1183                 else if (GRAPHICS_VER(dev_priv) == 4)
1184                         i965_irq_reset(dev_priv);
1185                 else
1186                         i915_irq_reset(dev_priv);
1187         } else {
1188                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1189                         dg1_irq_reset(dev_priv);
1190                 else if (GRAPHICS_VER(dev_priv) >= 11)
1191                         gen11_irq_reset(dev_priv);
1192                 else if (GRAPHICS_VER(dev_priv) >= 8)
1193                         gen8_irq_reset(dev_priv);
1194                 else
1195                         ilk_irq_reset(dev_priv);
1196         }
1197 }
1198
1199 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1200 {
1201         if (HAS_GMCH(dev_priv)) {
1202                 if (IS_CHERRYVIEW(dev_priv))
1203                         cherryview_irq_postinstall(dev_priv);
1204                 else if (IS_VALLEYVIEW(dev_priv))
1205                         valleyview_irq_postinstall(dev_priv);
1206                 else if (GRAPHICS_VER(dev_priv) == 4)
1207                         i965_irq_postinstall(dev_priv);
1208                 else
1209                         i915_irq_postinstall(dev_priv);
1210         } else {
1211                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1212                         dg1_irq_postinstall(dev_priv);
1213                 else if (GRAPHICS_VER(dev_priv) >= 11)
1214                         gen11_irq_postinstall(dev_priv);
1215                 else if (GRAPHICS_VER(dev_priv) >= 8)
1216                         gen8_irq_postinstall(dev_priv);
1217                 else
1218                         ilk_irq_postinstall(dev_priv);
1219         }
1220 }
1221
1222 /**
1223  * intel_irq_install - enables the hardware interrupt
1224  * @dev_priv: i915 device instance
1225  *
1226  * This function enables the hardware interrupt handling, but leaves the hotplug
1227  * handling still disabled. It is called after intel_irq_init().
1228  *
1229  * In the driver load and resume code we need working interrupts in a few places
1230  * but don't want to deal with the hassle of concurrent probe and hotplug
1231  * workers. Hence the split into this two-stage approach.
1232  */
1233 int intel_irq_install(struct drm_i915_private *dev_priv)
1234 {
1235         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1236         int ret;
1237
1238         /*
1239          * We enable some interrupt sources in our postinstall hooks, so mark
1240          * interrupts as enabled _before_ actually enabling them to avoid
1241          * special cases in our ordering checks.
1242          */
1243         dev_priv->irqs_enabled = true;
1244
1245         intel_irq_reset(dev_priv);
1246
1247         ret = request_irq(irq, intel_irq_handler(dev_priv),
1248                           IRQF_SHARED, DRIVER_NAME, dev_priv);
1249         if (ret < 0) {
1250                 dev_priv->irqs_enabled = false;
1251                 return ret;
1252         }
1253
1254         intel_irq_postinstall(dev_priv);
1255
1256         return ret;
1257 }
1258
1259 /**
1260  * intel_irq_uninstall - finalizes all irq handling
1261  * @dev_priv: i915 device instance
1262  *
1263  * This stops interrupt and hotplug handling and unregisters and frees all
1264  * resources acquired in the init functions.
1265  */
1266 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
1267 {
1268         struct intel_display *display = &dev_priv->display;
1269         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1270
1271         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
1272                 return;
1273
1274         intel_irq_reset(dev_priv);
1275
1276         free_irq(irq, dev_priv);
1277
1278         intel_hpd_cancel_work(display);
1279         dev_priv->irqs_enabled = false;
1280 }
1281
1282 /**
1283  * intel_irq_suspend - Suspend interrupts
1284  * @i915: i915 device instance
1285  *
1286  * This function is used to disable interrupts at runtime.
1287  */
1288 void intel_irq_suspend(struct drm_i915_private *i915)
1289 {
1290         intel_irq_reset(i915);
1291         i915->irqs_enabled = false;
1292         intel_synchronize_irq(i915);
1293 }
1294
1295 /**
1296  * intel_irq_resume - Resume interrupts
1297  * @i915: i915 device instance
1298  *
1299  * This function is used to enable interrupts at runtime.
1300  */
1301 void intel_irq_resume(struct drm_i915_private *i915)
1302 {
1303         i915->irqs_enabled = true;
1304         intel_irq_reset(i915);
1305         intel_irq_postinstall(i915);
1306 }
1307
1308 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1309 {
1310         return dev_priv->irqs_enabled;
1311 }
1312
1313 void intel_synchronize_irq(struct drm_i915_private *i915)
1314 {
1315         synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1316 }
1317
1318 void intel_synchronize_hardirq(struct drm_i915_private *i915)
1319 {
1320         synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1321 }