dea1a117f3fa1da806035e3ecc38c36f8c64e156
[linux-block.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33
34 #include <drm/drm_drv.h>
35
36 #include "display/icl_dsi_regs.h"
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fdi_regs.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45 #include "display/intel_psr_regs.h"
46
47 #include "gt/intel_breadcrumbs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_irq.h"
50 #include "gt/intel_gt_pm_irq.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/intel_rps.h"
53
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_irq.h"
57
58 /**
59  * DOC: interrupt handling
60  *
61  * These functions provide the basic support for enabling and disabling the
62  * interrupt handling support. There's a lot more functionality in i915_irq.c
63  * and related files, but that will be described in separate chapters.
64  */
65
66 /*
67  * Interrupt statistic for PMU. Increments the counter only if the
68  * interrupt originated from the GPU so interrupts from a device which
69  * shares the interrupt line are not accounted.
70  */
71 static inline void pmu_irq_stats(struct drm_i915_private *i915,
72                                  irqreturn_t res)
73 {
74         if (unlikely(res != IRQ_HANDLED))
75                 return;
76
77         /*
78          * A clever compiler translates that into INC. A not so clever one
79          * should at least prevent store tearing.
80          */
81         WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
82 }
83
84 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85 typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
86
87 static const u32 hpd_ilk[HPD_NUM_PINS] = {
88         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
89 };
90
91 static const u32 hpd_ivb[HPD_NUM_PINS] = {
92         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
93 };
94
95 static const u32 hpd_bdw[HPD_NUM_PINS] = {
96         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
97 };
98
99 static const u32 hpd_ibx[HPD_NUM_PINS] = {
100         [HPD_CRT] = SDE_CRT_HOTPLUG,
101         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
102         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
103         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
104         [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
105 };
106
107 static const u32 hpd_cpt[HPD_NUM_PINS] = {
108         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
109         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
110         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
111         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
112         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
113 };
114
115 static const u32 hpd_spt[HPD_NUM_PINS] = {
116         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
117         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
118         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
119         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
120         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
121 };
122
123 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
124         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
125         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
126         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
127         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
128         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
129         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
130 };
131
132 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
133         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
134         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
135         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
136         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
137         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
138         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
139 };
140
141 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
142         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
143         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
144         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
145         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
146         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
147         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
148 };
149
150 static const u32 hpd_bxt[HPD_NUM_PINS] = {
151         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
152         [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
153         [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
154 };
155
156 static const u32 hpd_gen11[HPD_NUM_PINS] = {
157         [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
158         [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
159         [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
160         [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
161         [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
162         [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
163 };
164
165 static const u32 hpd_xelpdp[HPD_NUM_PINS] = {
166         [HPD_PORT_TC1] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC1) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC1),
167         [HPD_PORT_TC2] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC2) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC2),
168         [HPD_PORT_TC3] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC3) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC3),
169         [HPD_PORT_TC4] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC4) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC4),
170 };
171
172 static const u32 hpd_icp[HPD_NUM_PINS] = {
173         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
174         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
175         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
176         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
177         [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
178         [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
179         [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
180         [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
181         [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
182 };
183
184 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
185         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
186         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
187         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
188         [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
189         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
190 };
191
192 static const u32 hpd_mtp[HPD_NUM_PINS] = {
193         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
194         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
195         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
196         [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
197         [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
198         [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
199 };
200
201 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
202 {
203         struct intel_hotplug *hpd = &dev_priv->display.hotplug;
204
205         if (HAS_GMCH(dev_priv)) {
206                 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
207                     IS_CHERRYVIEW(dev_priv))
208                         hpd->hpd = hpd_status_g4x;
209                 else
210                         hpd->hpd = hpd_status_i915;
211                 return;
212         }
213
214         if (DISPLAY_VER(dev_priv) >= 14)
215                 hpd->hpd = hpd_xelpdp;
216         else if (DISPLAY_VER(dev_priv) >= 11)
217                 hpd->hpd = hpd_gen11;
218         else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
219                 hpd->hpd = hpd_bxt;
220         else if (DISPLAY_VER(dev_priv) == 9)
221                 hpd->hpd = NULL; /* no north HPD on SKL */
222         else if (DISPLAY_VER(dev_priv) >= 8)
223                 hpd->hpd = hpd_bdw;
224         else if (DISPLAY_VER(dev_priv) >= 7)
225                 hpd->hpd = hpd_ivb;
226         else
227                 hpd->hpd = hpd_ilk;
228
229         if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
230             (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
231                 return;
232
233         if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
234                 hpd->pch_hpd = hpd_sde_dg1;
235         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
236                 hpd->pch_hpd = hpd_mtp;
237         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
238                 hpd->pch_hpd = hpd_icp;
239         else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
240                 hpd->pch_hpd = hpd_spt;
241         else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
242                 hpd->pch_hpd = hpd_cpt;
243         else if (HAS_PCH_IBX(dev_priv))
244                 hpd->pch_hpd = hpd_ibx;
245         else
246                 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
247 }
248
249 static void
250 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
251 {
252         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
253
254         drm_crtc_handle_vblank(&crtc->base);
255 }
256
257 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
258                     i915_reg_t iir, i915_reg_t ier)
259 {
260         intel_uncore_write(uncore, imr, 0xffffffff);
261         intel_uncore_posting_read(uncore, imr);
262
263         intel_uncore_write(uncore, ier, 0);
264
265         /* IIR can theoretically queue up two events. Be paranoid. */
266         intel_uncore_write(uncore, iir, 0xffffffff);
267         intel_uncore_posting_read(uncore, iir);
268         intel_uncore_write(uncore, iir, 0xffffffff);
269         intel_uncore_posting_read(uncore, iir);
270 }
271
272 static void gen2_irq_reset(struct intel_uncore *uncore)
273 {
274         intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
275         intel_uncore_posting_read16(uncore, GEN2_IMR);
276
277         intel_uncore_write16(uncore, GEN2_IER, 0);
278
279         /* IIR can theoretically queue up two events. Be paranoid. */
280         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
281         intel_uncore_posting_read16(uncore, GEN2_IIR);
282         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
283         intel_uncore_posting_read16(uncore, GEN2_IIR);
284 }
285
286 /*
287  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
288  */
289 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
290 {
291         u32 val = intel_uncore_read(uncore, reg);
292
293         if (val == 0)
294                 return;
295
296         drm_WARN(&uncore->i915->drm, 1,
297                  "Interrupt register 0x%x is not zero: 0x%08x\n",
298                  i915_mmio_reg_offset(reg), val);
299         intel_uncore_write(uncore, reg, 0xffffffff);
300         intel_uncore_posting_read(uncore, reg);
301         intel_uncore_write(uncore, reg, 0xffffffff);
302         intel_uncore_posting_read(uncore, reg);
303 }
304
305 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
306 {
307         u16 val = intel_uncore_read16(uncore, GEN2_IIR);
308
309         if (val == 0)
310                 return;
311
312         drm_WARN(&uncore->i915->drm, 1,
313                  "Interrupt register 0x%x is not zero: 0x%08x\n",
314                  i915_mmio_reg_offset(GEN2_IIR), val);
315         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
316         intel_uncore_posting_read16(uncore, GEN2_IIR);
317         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
318         intel_uncore_posting_read16(uncore, GEN2_IIR);
319 }
320
321 void gen3_irq_init(struct intel_uncore *uncore,
322                    i915_reg_t imr, u32 imr_val,
323                    i915_reg_t ier, u32 ier_val,
324                    i915_reg_t iir)
325 {
326         gen3_assert_iir_is_zero(uncore, iir);
327
328         intel_uncore_write(uncore, ier, ier_val);
329         intel_uncore_write(uncore, imr, imr_val);
330         intel_uncore_posting_read(uncore, imr);
331 }
332
333 static void gen2_irq_init(struct intel_uncore *uncore,
334                           u32 imr_val, u32 ier_val)
335 {
336         gen2_assert_iir_is_zero(uncore);
337
338         intel_uncore_write16(uncore, GEN2_IER, ier_val);
339         intel_uncore_write16(uncore, GEN2_IMR, imr_val);
340         intel_uncore_posting_read16(uncore, GEN2_IMR);
341 }
342
343 /* For display hotplug interrupt */
344 static inline void
345 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
346                                      u32 mask,
347                                      u32 bits)
348 {
349         lockdep_assert_held(&dev_priv->irq_lock);
350         drm_WARN_ON(&dev_priv->drm, bits & ~mask);
351
352         intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
353 }
354
355 /**
356  * i915_hotplug_interrupt_update - update hotplug interrupt enable
357  * @dev_priv: driver private
358  * @mask: bits to update
359  * @bits: bits to enable
360  * NOTE: the HPD enable bits are modified both inside and outside
361  * of an interrupt context. To avoid that read-modify-write cycles
362  * interfer, these bits are protected by a spinlock. Since this
363  * function is usually not called from a context where the lock is
364  * held already, this function acquires the lock itself. A non-locking
365  * version is also available.
366  */
367 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
368                                    u32 mask,
369                                    u32 bits)
370 {
371         spin_lock_irq(&dev_priv->irq_lock);
372         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
373         spin_unlock_irq(&dev_priv->irq_lock);
374 }
375
376 /**
377  * ilk_update_display_irq - update DEIMR
378  * @dev_priv: driver private
379  * @interrupt_mask: mask of interrupt bits to update
380  * @enabled_irq_mask: mask of interrupt bits to enable
381  */
382 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
383                                    u32 interrupt_mask, u32 enabled_irq_mask)
384 {
385         u32 new_val;
386
387         lockdep_assert_held(&dev_priv->irq_lock);
388         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
389
390         new_val = dev_priv->irq_mask;
391         new_val &= ~interrupt_mask;
392         new_val |= (~enabled_irq_mask & interrupt_mask);
393
394         if (new_val != dev_priv->irq_mask &&
395             !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
396                 dev_priv->irq_mask = new_val;
397                 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
398                 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
399         }
400 }
401
402 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
403 {
404         ilk_update_display_irq(i915, bits, bits);
405 }
406
407 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
408 {
409         ilk_update_display_irq(i915, bits, 0);
410 }
411
412 /**
413  * bdw_update_port_irq - update DE port interrupt
414  * @dev_priv: driver private
415  * @interrupt_mask: mask of interrupt bits to update
416  * @enabled_irq_mask: mask of interrupt bits to enable
417  */
418 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
419                                 u32 interrupt_mask,
420                                 u32 enabled_irq_mask)
421 {
422         u32 new_val;
423         u32 old_val;
424
425         lockdep_assert_held(&dev_priv->irq_lock);
426
427         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
428
429         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
430                 return;
431
432         old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
433
434         new_val = old_val;
435         new_val &= ~interrupt_mask;
436         new_val |= (~enabled_irq_mask & interrupt_mask);
437
438         if (new_val != old_val) {
439                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
440                 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
441         }
442 }
443
444 /**
445  * bdw_update_pipe_irq - update DE pipe interrupt
446  * @dev_priv: driver private
447  * @pipe: pipe whose interrupt to update
448  * @interrupt_mask: mask of interrupt bits to update
449  * @enabled_irq_mask: mask of interrupt bits to enable
450  */
451 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
452                                 enum pipe pipe, u32 interrupt_mask,
453                                 u32 enabled_irq_mask)
454 {
455         u32 new_val;
456
457         lockdep_assert_held(&dev_priv->irq_lock);
458
459         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
460
461         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
462                 return;
463
464         new_val = dev_priv->de_irq_mask[pipe];
465         new_val &= ~interrupt_mask;
466         new_val |= (~enabled_irq_mask & interrupt_mask);
467
468         if (new_val != dev_priv->de_irq_mask[pipe]) {
469                 dev_priv->de_irq_mask[pipe] = new_val;
470                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
471                 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
472         }
473 }
474
475 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
476                          enum pipe pipe, u32 bits)
477 {
478         bdw_update_pipe_irq(i915, pipe, bits, bits);
479 }
480
481 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
482                           enum pipe pipe, u32 bits)
483 {
484         bdw_update_pipe_irq(i915, pipe, bits, 0);
485 }
486
487 /**
488  * ibx_display_interrupt_update - update SDEIMR
489  * @dev_priv: driver private
490  * @interrupt_mask: mask of interrupt bits to update
491  * @enabled_irq_mask: mask of interrupt bits to enable
492  */
493 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
494                                          u32 interrupt_mask,
495                                          u32 enabled_irq_mask)
496 {
497         u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
498         sdeimr &= ~interrupt_mask;
499         sdeimr |= (~enabled_irq_mask & interrupt_mask);
500
501         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
502
503         lockdep_assert_held(&dev_priv->irq_lock);
504
505         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
506                 return;
507
508         intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
509         intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
510 }
511
512 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
513 {
514         ibx_display_interrupt_update(i915, bits, bits);
515 }
516
517 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
518 {
519         ibx_display_interrupt_update(i915, bits, 0);
520 }
521
522 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
523                               enum pipe pipe)
524 {
525         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
526         u32 enable_mask = status_mask << 16;
527
528         lockdep_assert_held(&dev_priv->irq_lock);
529
530         if (DISPLAY_VER(dev_priv) < 5)
531                 goto out;
532
533         /*
534          * On pipe A we don't support the PSR interrupt yet,
535          * on pipe B and C the same bit MBZ.
536          */
537         if (drm_WARN_ON_ONCE(&dev_priv->drm,
538                              status_mask & PIPE_A_PSR_STATUS_VLV))
539                 return 0;
540         /*
541          * On pipe B and C we don't support the PSR interrupt yet, on pipe
542          * A the same bit is for perf counters which we don't use either.
543          */
544         if (drm_WARN_ON_ONCE(&dev_priv->drm,
545                              status_mask & PIPE_B_PSR_STATUS_VLV))
546                 return 0;
547
548         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
549                          SPRITE0_FLIP_DONE_INT_EN_VLV |
550                          SPRITE1_FLIP_DONE_INT_EN_VLV);
551         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
552                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
553         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
554                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
555
556 out:
557         drm_WARN_ONCE(&dev_priv->drm,
558                       enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
559                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
560                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
561                       pipe_name(pipe), enable_mask, status_mask);
562
563         return enable_mask;
564 }
565
566 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
567                           enum pipe pipe, u32 status_mask)
568 {
569         i915_reg_t reg = PIPESTAT(pipe);
570         u32 enable_mask;
571
572         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
573                       "pipe %c: status_mask=0x%x\n",
574                       pipe_name(pipe), status_mask);
575
576         lockdep_assert_held(&dev_priv->irq_lock);
577         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
578
579         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
580                 return;
581
582         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
583         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
584
585         intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
586         intel_uncore_posting_read(&dev_priv->uncore, reg);
587 }
588
589 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
590                            enum pipe pipe, u32 status_mask)
591 {
592         i915_reg_t reg = PIPESTAT(pipe);
593         u32 enable_mask;
594
595         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
596                       "pipe %c: status_mask=0x%x\n",
597                       pipe_name(pipe), status_mask);
598
599         lockdep_assert_held(&dev_priv->irq_lock);
600         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
601
602         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
603                 return;
604
605         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
606         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
607
608         intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
609         intel_uncore_posting_read(&dev_priv->uncore, reg);
610 }
611
612 static bool i915_has_asle(struct drm_i915_private *dev_priv)
613 {
614         if (!dev_priv->display.opregion.asle)
615                 return false;
616
617         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
618 }
619
620 /**
621  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
622  * @dev_priv: i915 device private
623  */
624 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
625 {
626         if (!i915_has_asle(dev_priv))
627                 return;
628
629         spin_lock_irq(&dev_priv->irq_lock);
630
631         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
632         if (DISPLAY_VER(dev_priv) >= 4)
633                 i915_enable_pipestat(dev_priv, PIPE_A,
634                                      PIPE_LEGACY_BLC_EVENT_STATUS);
635
636         spin_unlock_irq(&dev_priv->irq_lock);
637 }
638
639 /**
640  * ivb_parity_work - Workqueue called when a parity error interrupt
641  * occurred.
642  * @work: workqueue struct
643  *
644  * Doesn't actually do anything except notify userspace. As a consequence of
645  * this event, userspace should try to remap the bad rows since statistically
646  * it is likely the same row is more likely to go bad again.
647  */
648 static void ivb_parity_work(struct work_struct *work)
649 {
650         struct drm_i915_private *dev_priv =
651                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
652         struct intel_gt *gt = to_gt(dev_priv);
653         u32 error_status, row, bank, subbank;
654         char *parity_event[6];
655         u32 misccpctl;
656         u8 slice = 0;
657
658         /* We must turn off DOP level clock gating to access the L3 registers.
659          * In order to prevent a get/put style interface, acquire struct mutex
660          * any time we access those registers.
661          */
662         mutex_lock(&dev_priv->drm.struct_mutex);
663
664         /* If we've screwed up tracking, just let the interrupt fire again */
665         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
666                 goto out;
667
668         misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
669                                      GEN7_DOP_CLOCK_GATE_ENABLE, 0);
670         intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
671
672         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
673                 i915_reg_t reg;
674
675                 slice--;
676                 if (drm_WARN_ON_ONCE(&dev_priv->drm,
677                                      slice >= NUM_L3_SLICES(dev_priv)))
678                         break;
679
680                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
681
682                 reg = GEN7_L3CDERRST1(slice);
683
684                 error_status = intel_uncore_read(&dev_priv->uncore, reg);
685                 row = GEN7_PARITY_ERROR_ROW(error_status);
686                 bank = GEN7_PARITY_ERROR_BANK(error_status);
687                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
688
689                 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
690                 intel_uncore_posting_read(&dev_priv->uncore, reg);
691
692                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
693                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
694                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
695                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
696                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
697                 parity_event[5] = NULL;
698
699                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
700                                    KOBJ_CHANGE, parity_event);
701
702                 drm_dbg(&dev_priv->drm,
703                         "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
704                         slice, row, bank, subbank);
705
706                 kfree(parity_event[4]);
707                 kfree(parity_event[3]);
708                 kfree(parity_event[2]);
709                 kfree(parity_event[1]);
710         }
711
712         intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
713
714 out:
715         drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
716         spin_lock_irq(gt->irq_lock);
717         gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
718         spin_unlock_irq(gt->irq_lock);
719
720         mutex_unlock(&dev_priv->drm.struct_mutex);
721 }
722
723 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
724 {
725         switch (pin) {
726         case HPD_PORT_TC1:
727         case HPD_PORT_TC2:
728         case HPD_PORT_TC3:
729         case HPD_PORT_TC4:
730         case HPD_PORT_TC5:
731         case HPD_PORT_TC6:
732                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
733         default:
734                 return false;
735         }
736 }
737
738 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
739 {
740         switch (pin) {
741         case HPD_PORT_A:
742                 return val & PORTA_HOTPLUG_LONG_DETECT;
743         case HPD_PORT_B:
744                 return val & PORTB_HOTPLUG_LONG_DETECT;
745         case HPD_PORT_C:
746                 return val & PORTC_HOTPLUG_LONG_DETECT;
747         default:
748                 return false;
749         }
750 }
751
752 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
753 {
754         switch (pin) {
755         case HPD_PORT_A:
756         case HPD_PORT_B:
757         case HPD_PORT_C:
758         case HPD_PORT_D:
759                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
760         default:
761                 return false;
762         }
763 }
764
765 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
766 {
767         switch (pin) {
768         case HPD_PORT_TC1:
769         case HPD_PORT_TC2:
770         case HPD_PORT_TC3:
771         case HPD_PORT_TC4:
772         case HPD_PORT_TC5:
773         case HPD_PORT_TC6:
774                 return val & ICP_TC_HPD_LONG_DETECT(pin);
775         default:
776                 return false;
777         }
778 }
779
780 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
781 {
782         switch (pin) {
783         case HPD_PORT_E:
784                 return val & PORTE_HOTPLUG_LONG_DETECT;
785         default:
786                 return false;
787         }
788 }
789
790 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
791 {
792         switch (pin) {
793         case HPD_PORT_A:
794                 return val & PORTA_HOTPLUG_LONG_DETECT;
795         case HPD_PORT_B:
796                 return val & PORTB_HOTPLUG_LONG_DETECT;
797         case HPD_PORT_C:
798                 return val & PORTC_HOTPLUG_LONG_DETECT;
799         case HPD_PORT_D:
800                 return val & PORTD_HOTPLUG_LONG_DETECT;
801         default:
802                 return false;
803         }
804 }
805
806 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
807 {
808         switch (pin) {
809         case HPD_PORT_A:
810                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
811         default:
812                 return false;
813         }
814 }
815
816 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
817 {
818         switch (pin) {
819         case HPD_PORT_B:
820                 return val & PORTB_HOTPLUG_LONG_DETECT;
821         case HPD_PORT_C:
822                 return val & PORTC_HOTPLUG_LONG_DETECT;
823         case HPD_PORT_D:
824                 return val & PORTD_HOTPLUG_LONG_DETECT;
825         default:
826                 return false;
827         }
828 }
829
830 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
831 {
832         switch (pin) {
833         case HPD_PORT_B:
834                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
835         case HPD_PORT_C:
836                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
837         case HPD_PORT_D:
838                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
839         default:
840                 return false;
841         }
842 }
843
844 /*
845  * Get a bit mask of pins that have triggered, and which ones may be long.
846  * This can be called multiple times with the same masks to accumulate
847  * hotplug detection results from several registers.
848  *
849  * Note that the caller is expected to zero out the masks initially.
850  */
851 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
852                                u32 *pin_mask, u32 *long_mask,
853                                u32 hotplug_trigger, u32 dig_hotplug_reg,
854                                const u32 hpd[HPD_NUM_PINS],
855                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
856 {
857         enum hpd_pin pin;
858
859         BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
860
861         for_each_hpd_pin(pin) {
862                 if ((hpd[pin] & hotplug_trigger) == 0)
863                         continue;
864
865                 *pin_mask |= BIT(pin);
866
867                 if (long_pulse_detect(pin, dig_hotplug_reg))
868                         *long_mask |= BIT(pin);
869         }
870
871         drm_dbg(&dev_priv->drm,
872                 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
873                 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
874
875 }
876
877 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
878                                   const u32 hpd[HPD_NUM_PINS])
879 {
880         struct intel_encoder *encoder;
881         u32 enabled_irqs = 0;
882
883         for_each_intel_encoder(&dev_priv->drm, encoder)
884                 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
885                         enabled_irqs |= hpd[encoder->hpd_pin];
886
887         return enabled_irqs;
888 }
889
890 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
891                                   const u32 hpd[HPD_NUM_PINS])
892 {
893         struct intel_encoder *encoder;
894         u32 hotplug_irqs = 0;
895
896         for_each_intel_encoder(&dev_priv->drm, encoder)
897                 hotplug_irqs |= hpd[encoder->hpd_pin];
898
899         return hotplug_irqs;
900 }
901
902 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
903                                      hotplug_enables_func hotplug_enables)
904 {
905         struct intel_encoder *encoder;
906         u32 hotplug = 0;
907
908         for_each_intel_encoder(&i915->drm, encoder)
909                 hotplug |= hotplug_enables(encoder);
910
911         return hotplug;
912 }
913
914 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
915 {
916         wake_up_all(&dev_priv->display.gmbus.wait_queue);
917 }
918
919 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
920 {
921         wake_up_all(&dev_priv->display.gmbus.wait_queue);
922 }
923
924 #if defined(CONFIG_DEBUG_FS)
925 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
926                                          enum pipe pipe,
927                                          u32 crc0, u32 crc1,
928                                          u32 crc2, u32 crc3,
929                                          u32 crc4)
930 {
931         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
932         struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
933         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
934
935         trace_intel_pipe_crc(crtc, crcs);
936
937         spin_lock(&pipe_crc->lock);
938         /*
939          * For some not yet identified reason, the first CRC is
940          * bonkers. So let's just wait for the next vblank and read
941          * out the buggy result.
942          *
943          * On GEN8+ sometimes the second CRC is bonkers as well, so
944          * don't trust that one either.
945          */
946         if (pipe_crc->skipped <= 0 ||
947             (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
948                 pipe_crc->skipped++;
949                 spin_unlock(&pipe_crc->lock);
950                 return;
951         }
952         spin_unlock(&pipe_crc->lock);
953
954         drm_crtc_add_crc_entry(&crtc->base, true,
955                                 drm_crtc_accurate_vblank_count(&crtc->base),
956                                 crcs);
957 }
958 #else
959 static inline void
960 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
961                              enum pipe pipe,
962                              u32 crc0, u32 crc1,
963                              u32 crc2, u32 crc3,
964                              u32 crc4) {}
965 #endif
966
967 static void flip_done_handler(struct drm_i915_private *i915,
968                               enum pipe pipe)
969 {
970         struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
971         struct drm_crtc_state *crtc_state = crtc->base.state;
972         struct drm_pending_vblank_event *e = crtc_state->event;
973         struct drm_device *dev = &i915->drm;
974         unsigned long irqflags;
975
976         spin_lock_irqsave(&dev->event_lock, irqflags);
977
978         crtc_state->event = NULL;
979
980         drm_crtc_send_vblank_event(&crtc->base, e);
981
982         spin_unlock_irqrestore(&dev->event_lock, irqflags);
983 }
984
985 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
986                                      enum pipe pipe)
987 {
988         display_pipe_crc_irq_handler(dev_priv, pipe,
989                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
990                                      0, 0, 0, 0);
991 }
992
993 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
994                                      enum pipe pipe)
995 {
996         display_pipe_crc_irq_handler(dev_priv, pipe,
997                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
998                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
999                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1000                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1001                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1002 }
1003
1004 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1005                                       enum pipe pipe)
1006 {
1007         u32 res1, res2;
1008
1009         if (DISPLAY_VER(dev_priv) >= 3)
1010                 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1011         else
1012                 res1 = 0;
1013
1014         if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1015                 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1016         else
1017                 res2 = 0;
1018
1019         display_pipe_crc_irq_handler(dev_priv, pipe,
1020                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1021                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1022                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1023                                      res1, res2);
1024 }
1025
1026 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1027 {
1028         enum pipe pipe;
1029
1030         for_each_pipe(dev_priv, pipe) {
1031                 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1032                            PIPESTAT_INT_STATUS_MASK |
1033                            PIPE_FIFO_UNDERRUN_STATUS);
1034
1035                 dev_priv->pipestat_irq_mask[pipe] = 0;
1036         }
1037 }
1038
1039 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1040                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1041 {
1042         enum pipe pipe;
1043
1044         spin_lock(&dev_priv->irq_lock);
1045
1046         if (!dev_priv->display_irqs_enabled) {
1047                 spin_unlock(&dev_priv->irq_lock);
1048                 return;
1049         }
1050
1051         for_each_pipe(dev_priv, pipe) {
1052                 i915_reg_t reg;
1053                 u32 status_mask, enable_mask, iir_bit = 0;
1054
1055                 /*
1056                  * PIPESTAT bits get signalled even when the interrupt is
1057                  * disabled with the mask bits, and some of the status bits do
1058                  * not generate interrupts at all (like the underrun bit). Hence
1059                  * we need to be careful that we only handle what we want to
1060                  * handle.
1061                  */
1062
1063                 /* fifo underruns are filterered in the underrun handler. */
1064                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1065
1066                 switch (pipe) {
1067                 default:
1068                 case PIPE_A:
1069                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1070                         break;
1071                 case PIPE_B:
1072                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1073                         break;
1074                 case PIPE_C:
1075                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1076                         break;
1077                 }
1078                 if (iir & iir_bit)
1079                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1080
1081                 if (!status_mask)
1082                         continue;
1083
1084                 reg = PIPESTAT(pipe);
1085                 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1086                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1087
1088                 /*
1089                  * Clear the PIPE*STAT regs before the IIR
1090                  *
1091                  * Toggle the enable bits to make sure we get an
1092                  * edge in the ISR pipe event bit if we don't clear
1093                  * all the enabled status bits. Otherwise the edge
1094                  * triggered IIR on i965/g4x wouldn't notice that
1095                  * an interrupt is still pending.
1096                  */
1097                 if (pipe_stats[pipe]) {
1098                         intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1099                         intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1100                 }
1101         }
1102         spin_unlock(&dev_priv->irq_lock);
1103 }
1104
1105 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1106                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1107 {
1108         enum pipe pipe;
1109
1110         for_each_pipe(dev_priv, pipe) {
1111                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1112                         intel_handle_vblank(dev_priv, pipe);
1113
1114                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1115                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1116
1117                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1118                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1119         }
1120 }
1121
1122 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1123                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1124 {
1125         bool blc_event = false;
1126         enum pipe pipe;
1127
1128         for_each_pipe(dev_priv, pipe) {
1129                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1130                         intel_handle_vblank(dev_priv, pipe);
1131
1132                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1133                         blc_event = true;
1134
1135                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1136                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1137
1138                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1139                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1140         }
1141
1142         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1143                 intel_opregion_asle_intr(dev_priv);
1144 }
1145
1146 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1147                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1148 {
1149         bool blc_event = false;
1150         enum pipe pipe;
1151
1152         for_each_pipe(dev_priv, pipe) {
1153                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1154                         intel_handle_vblank(dev_priv, pipe);
1155
1156                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1157                         blc_event = true;
1158
1159                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1160                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1161
1162                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1163                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1164         }
1165
1166         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1167                 intel_opregion_asle_intr(dev_priv);
1168
1169         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1170                 gmbus_irq_handler(dev_priv);
1171 }
1172
1173 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1174                                             u32 pipe_stats[I915_MAX_PIPES])
1175 {
1176         enum pipe pipe;
1177
1178         for_each_pipe(dev_priv, pipe) {
1179                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1180                         intel_handle_vblank(dev_priv, pipe);
1181
1182                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1183                         flip_done_handler(dev_priv, pipe);
1184
1185                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1186                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1187
1188                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1189                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1190         }
1191
1192         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1193                 gmbus_irq_handler(dev_priv);
1194 }
1195
1196 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1197 {
1198         u32 hotplug_status = 0, hotplug_status_mask;
1199         int i;
1200
1201         if (IS_G4X(dev_priv) ||
1202             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1203                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1204                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1205         else
1206                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1207
1208         /*
1209          * We absolutely have to clear all the pending interrupt
1210          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1211          * interrupt bit won't have an edge, and the i965/g4x
1212          * edge triggered IIR will not notice that an interrupt
1213          * is still pending. We can't use PORT_HOTPLUG_EN to
1214          * guarantee the edge as the act of toggling the enable
1215          * bits can itself generate a new hotplug interrupt :(
1216          */
1217         for (i = 0; i < 10; i++) {
1218                 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1219
1220                 if (tmp == 0)
1221                         return hotplug_status;
1222
1223                 hotplug_status |= tmp;
1224                 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1225         }
1226
1227         drm_WARN_ONCE(&dev_priv->drm, 1,
1228                       "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1229                       intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1230
1231         return hotplug_status;
1232 }
1233
1234 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1235                                  u32 hotplug_status)
1236 {
1237         u32 pin_mask = 0, long_mask = 0;
1238         u32 hotplug_trigger;
1239
1240         if (IS_G4X(dev_priv) ||
1241             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1242                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1243         else
1244                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1245
1246         if (hotplug_trigger) {
1247                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1248                                    hotplug_trigger, hotplug_trigger,
1249                                    dev_priv->display.hotplug.hpd,
1250                                    i9xx_port_hotplug_long_detect);
1251
1252                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1253         }
1254
1255         if ((IS_G4X(dev_priv) ||
1256              IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1257             hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1258                 dp_aux_irq_handler(dev_priv);
1259 }
1260
1261 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1262 {
1263         struct drm_i915_private *dev_priv = arg;
1264         irqreturn_t ret = IRQ_NONE;
1265
1266         if (!intel_irqs_enabled(dev_priv))
1267                 return IRQ_NONE;
1268
1269         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1270         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1271
1272         do {
1273                 u32 iir, gt_iir, pm_iir;
1274                 u32 pipe_stats[I915_MAX_PIPES] = {};
1275                 u32 hotplug_status = 0;
1276                 u32 ier = 0;
1277
1278                 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1279                 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1280                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1281
1282                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1283                         break;
1284
1285                 ret = IRQ_HANDLED;
1286
1287                 /*
1288                  * Theory on interrupt generation, based on empirical evidence:
1289                  *
1290                  * x = ((VLV_IIR & VLV_IER) ||
1291                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1292                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1293                  *
1294                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1295                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1296                  * guarantee the CPU interrupt will be raised again even if we
1297                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1298                  * bits this time around.
1299                  */
1300                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1301                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1302
1303                 if (gt_iir)
1304                         intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1305                 if (pm_iir)
1306                         intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1307
1308                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1309                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1310
1311                 /* Call regardless, as some status bits might not be
1312                  * signalled in iir */
1313                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1314
1315                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1316                            I915_LPE_PIPE_B_INTERRUPT))
1317                         intel_lpe_audio_irq_handler(dev_priv);
1318
1319                 /*
1320                  * VLV_IIR is single buffered, and reflects the level
1321                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1322                  */
1323                 if (iir)
1324                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1325
1326                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1327                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1328
1329                 if (gt_iir)
1330                         gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1331                 if (pm_iir)
1332                         gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1333
1334                 if (hotplug_status)
1335                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1336
1337                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1338         } while (0);
1339
1340         pmu_irq_stats(dev_priv, ret);
1341
1342         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1343
1344         return ret;
1345 }
1346
1347 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1348 {
1349         struct drm_i915_private *dev_priv = arg;
1350         irqreturn_t ret = IRQ_NONE;
1351
1352         if (!intel_irqs_enabled(dev_priv))
1353                 return IRQ_NONE;
1354
1355         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1356         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1357
1358         do {
1359                 u32 master_ctl, iir;
1360                 u32 pipe_stats[I915_MAX_PIPES] = {};
1361                 u32 hotplug_status = 0;
1362                 u32 ier = 0;
1363
1364                 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1365                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1366
1367                 if (master_ctl == 0 && iir == 0)
1368                         break;
1369
1370                 ret = IRQ_HANDLED;
1371
1372                 /*
1373                  * Theory on interrupt generation, based on empirical evidence:
1374                  *
1375                  * x = ((VLV_IIR & VLV_IER) ||
1376                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1377                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1378                  *
1379                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1380                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1381                  * guarantee the CPU interrupt will be raised again even if we
1382                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1383                  * bits this time around.
1384                  */
1385                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1386                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1387
1388                 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1389
1390                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1391                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1392
1393                 /* Call regardless, as some status bits might not be
1394                  * signalled in iir */
1395                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1396
1397                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1398                            I915_LPE_PIPE_B_INTERRUPT |
1399                            I915_LPE_PIPE_C_INTERRUPT))
1400                         intel_lpe_audio_irq_handler(dev_priv);
1401
1402                 /*
1403                  * VLV_IIR is single buffered, and reflects the level
1404                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1405                  */
1406                 if (iir)
1407                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1408
1409                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1410                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1411
1412                 if (hotplug_status)
1413                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1414
1415                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1416         } while (0);
1417
1418         pmu_irq_stats(dev_priv, ret);
1419
1420         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1421
1422         return ret;
1423 }
1424
1425 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1426                                 u32 hotplug_trigger)
1427 {
1428         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1429
1430         /*
1431          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1432          * unless we touch the hotplug register, even if hotplug_trigger is
1433          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1434          * errors.
1435          */
1436         dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1437         if (!hotplug_trigger) {
1438                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1439                         PORTD_HOTPLUG_STATUS_MASK |
1440                         PORTC_HOTPLUG_STATUS_MASK |
1441                         PORTB_HOTPLUG_STATUS_MASK;
1442                 dig_hotplug_reg &= ~mask;
1443         }
1444
1445         intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1446         if (!hotplug_trigger)
1447                 return;
1448
1449         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1450                            hotplug_trigger, dig_hotplug_reg,
1451                            dev_priv->display.hotplug.pch_hpd,
1452                            pch_port_hotplug_long_detect);
1453
1454         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1455 }
1456
1457 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1458 {
1459         enum pipe pipe;
1460         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1461
1462         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1463
1464         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1465                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1466                                SDE_AUDIO_POWER_SHIFT);
1467                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1468                         port_name(port));
1469         }
1470
1471         if (pch_iir & SDE_AUX_MASK)
1472                 dp_aux_irq_handler(dev_priv);
1473
1474         if (pch_iir & SDE_GMBUS)
1475                 gmbus_irq_handler(dev_priv);
1476
1477         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1478                 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1479
1480         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1481                 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1482
1483         if (pch_iir & SDE_POISON)
1484                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1485
1486         if (pch_iir & SDE_FDI_MASK) {
1487                 for_each_pipe(dev_priv, pipe)
1488                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1489                                 pipe_name(pipe),
1490                                 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1491         }
1492
1493         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1494                 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1495
1496         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1497                 drm_dbg(&dev_priv->drm,
1498                         "PCH transcoder CRC error interrupt\n");
1499
1500         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1501                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1502
1503         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1504                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1505 }
1506
1507 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1508 {
1509         u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1510         enum pipe pipe;
1511
1512         if (err_int & ERR_INT_POISON)
1513                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1514
1515         for_each_pipe(dev_priv, pipe) {
1516                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1517                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1518
1519                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1520                         if (IS_IVYBRIDGE(dev_priv))
1521                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1522                         else
1523                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1524                 }
1525         }
1526
1527         intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1528 }
1529
1530 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1531 {
1532         u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1533         enum pipe pipe;
1534
1535         if (serr_int & SERR_INT_POISON)
1536                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1537
1538         for_each_pipe(dev_priv, pipe)
1539                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1540                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1541
1542         intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1543 }
1544
1545 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1546 {
1547         enum pipe pipe;
1548         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1549
1550         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1551
1552         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1553                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1554                                SDE_AUDIO_POWER_SHIFT_CPT);
1555                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1556                         port_name(port));
1557         }
1558
1559         if (pch_iir & SDE_AUX_MASK_CPT)
1560                 dp_aux_irq_handler(dev_priv);
1561
1562         if (pch_iir & SDE_GMBUS_CPT)
1563                 gmbus_irq_handler(dev_priv);
1564
1565         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1566                 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1567
1568         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1569                 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1570
1571         if (pch_iir & SDE_FDI_MASK_CPT) {
1572                 for_each_pipe(dev_priv, pipe)
1573                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1574                                 pipe_name(pipe),
1575                                 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1576         }
1577
1578         if (pch_iir & SDE_ERROR_CPT)
1579                 cpt_serr_int_handler(dev_priv);
1580 }
1581
1582 static void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
1583 {
1584         enum hpd_pin pin;
1585         u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
1586         u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
1587         u32 pin_mask = 0, long_mask = 0;
1588
1589         for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
1590                 u32 val;
1591
1592                 if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
1593                         continue;
1594
1595                 pin_mask |= BIT(pin);
1596
1597                 val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
1598                 intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
1599
1600                 if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
1601                         long_mask |= BIT(pin);
1602         }
1603
1604         if (pin_mask) {
1605                 drm_dbg(&i915->drm,
1606                         "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
1607                         hotplug_trigger, pin_mask, long_mask);
1608
1609                 intel_hpd_irq_handler(i915, pin_mask, long_mask);
1610         }
1611
1612         if (trigger_aux)
1613                 dp_aux_irq_handler(i915);
1614
1615         if (!pin_mask && !trigger_aux)
1616                 drm_err(&i915->drm,
1617                         "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
1618 }
1619
1620 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1621 {
1622         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1623         u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1624         u32 pin_mask = 0, long_mask = 0;
1625
1626         if (ddi_hotplug_trigger) {
1627                 u32 dig_hotplug_reg;
1628
1629                 /* Locking due to DSI native GPIO sequences */
1630                 spin_lock(&dev_priv->irq_lock);
1631                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1632                 spin_unlock(&dev_priv->irq_lock);
1633
1634                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1635                                    ddi_hotplug_trigger, dig_hotplug_reg,
1636                                    dev_priv->display.hotplug.pch_hpd,
1637                                    icp_ddi_port_hotplug_long_detect);
1638         }
1639
1640         if (tc_hotplug_trigger) {
1641                 u32 dig_hotplug_reg;
1642
1643                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1644
1645                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1646                                    tc_hotplug_trigger, dig_hotplug_reg,
1647                                    dev_priv->display.hotplug.pch_hpd,
1648                                    icp_tc_port_hotplug_long_detect);
1649         }
1650
1651         if (pin_mask)
1652                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1653
1654         if (pch_iir & SDE_GMBUS_ICP)
1655                 gmbus_irq_handler(dev_priv);
1656 }
1657
1658 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1659 {
1660         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1661                 ~SDE_PORTE_HOTPLUG_SPT;
1662         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1663         u32 pin_mask = 0, long_mask = 0;
1664
1665         if (hotplug_trigger) {
1666                 u32 dig_hotplug_reg;
1667
1668                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1669
1670                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1671                                    hotplug_trigger, dig_hotplug_reg,
1672                                    dev_priv->display.hotplug.pch_hpd,
1673                                    spt_port_hotplug_long_detect);
1674         }
1675
1676         if (hotplug2_trigger) {
1677                 u32 dig_hotplug_reg;
1678
1679                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
1680
1681                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1682                                    hotplug2_trigger, dig_hotplug_reg,
1683                                    dev_priv->display.hotplug.pch_hpd,
1684                                    spt_port_hotplug2_long_detect);
1685         }
1686
1687         if (pin_mask)
1688                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1689
1690         if (pch_iir & SDE_GMBUS_CPT)
1691                 gmbus_irq_handler(dev_priv);
1692 }
1693
1694 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1695                                 u32 hotplug_trigger)
1696 {
1697         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1698
1699         dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
1700
1701         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1702                            hotplug_trigger, dig_hotplug_reg,
1703                            dev_priv->display.hotplug.hpd,
1704                            ilk_port_hotplug_long_detect);
1705
1706         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1707 }
1708
1709 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1710                                     u32 de_iir)
1711 {
1712         enum pipe pipe;
1713         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1714
1715         if (hotplug_trigger)
1716                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1717
1718         if (de_iir & DE_AUX_CHANNEL_A)
1719                 dp_aux_irq_handler(dev_priv);
1720
1721         if (de_iir & DE_GSE)
1722                 intel_opregion_asle_intr(dev_priv);
1723
1724         if (de_iir & DE_POISON)
1725                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1726
1727         for_each_pipe(dev_priv, pipe) {
1728                 if (de_iir & DE_PIPE_VBLANK(pipe))
1729                         intel_handle_vblank(dev_priv, pipe);
1730
1731                 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
1732                         flip_done_handler(dev_priv, pipe);
1733
1734                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1735                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1736
1737                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1738                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1739         }
1740
1741         /* check event from PCH */
1742         if (de_iir & DE_PCH_EVENT) {
1743                 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1744
1745                 if (HAS_PCH_CPT(dev_priv))
1746                         cpt_irq_handler(dev_priv, pch_iir);
1747                 else
1748                         ibx_irq_handler(dev_priv, pch_iir);
1749
1750                 /* should clear PCH hotplug event before clear CPU irq */
1751                 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1752         }
1753
1754         if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
1755                 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
1756 }
1757
1758 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1759                                     u32 de_iir)
1760 {
1761         enum pipe pipe;
1762         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1763
1764         if (hotplug_trigger)
1765                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1766
1767         if (de_iir & DE_ERR_INT_IVB)
1768                 ivb_err_int_handler(dev_priv);
1769
1770         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1771                 dp_aux_irq_handler(dev_priv);
1772
1773         if (de_iir & DE_GSE_IVB)
1774                 intel_opregion_asle_intr(dev_priv);
1775
1776         for_each_pipe(dev_priv, pipe) {
1777                 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
1778                         intel_handle_vblank(dev_priv, pipe);
1779
1780                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
1781                         flip_done_handler(dev_priv, pipe);
1782         }
1783
1784         /* check event from PCH */
1785         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
1786                 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1787
1788                 cpt_irq_handler(dev_priv, pch_iir);
1789
1790                 /* clear PCH hotplug event before clear CPU irq */
1791                 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1792         }
1793 }
1794
1795 /*
1796  * To handle irqs with the minimum potential races with fresh interrupts, we:
1797  * 1 - Disable Master Interrupt Control.
1798  * 2 - Find the source(s) of the interrupt.
1799  * 3 - Clear the Interrupt Identity bits (IIR).
1800  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1801  * 5 - Re-enable Master Interrupt Control.
1802  */
1803 static irqreturn_t ilk_irq_handler(int irq, void *arg)
1804 {
1805         struct drm_i915_private *i915 = arg;
1806         void __iomem * const regs = i915->uncore.regs;
1807         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1808         irqreturn_t ret = IRQ_NONE;
1809
1810         if (unlikely(!intel_irqs_enabled(i915)))
1811                 return IRQ_NONE;
1812
1813         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1814         disable_rpm_wakeref_asserts(&i915->runtime_pm);
1815
1816         /* disable master interrupt before clearing iir  */
1817         de_ier = raw_reg_read(regs, DEIER);
1818         raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1819
1820         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1821          * interrupts will will be stored on its back queue, and then we'll be
1822          * able to process them after we restore SDEIER (as soon as we restore
1823          * it, we'll get an interrupt if SDEIIR still has something to process
1824          * due to its back queue). */
1825         if (!HAS_PCH_NOP(i915)) {
1826                 sde_ier = raw_reg_read(regs, SDEIER);
1827                 raw_reg_write(regs, SDEIER, 0);
1828         }
1829
1830         /* Find, clear, then process each source of interrupt */
1831
1832         gt_iir = raw_reg_read(regs, GTIIR);
1833         if (gt_iir) {
1834                 raw_reg_write(regs, GTIIR, gt_iir);
1835                 if (GRAPHICS_VER(i915) >= 6)
1836                         gen6_gt_irq_handler(to_gt(i915), gt_iir);
1837                 else
1838                         gen5_gt_irq_handler(to_gt(i915), gt_iir);
1839                 ret = IRQ_HANDLED;
1840         }
1841
1842         de_iir = raw_reg_read(regs, DEIIR);
1843         if (de_iir) {
1844                 raw_reg_write(regs, DEIIR, de_iir);
1845                 if (DISPLAY_VER(i915) >= 7)
1846                         ivb_display_irq_handler(i915, de_iir);
1847                 else
1848                         ilk_display_irq_handler(i915, de_iir);
1849                 ret = IRQ_HANDLED;
1850         }
1851
1852         if (GRAPHICS_VER(i915) >= 6) {
1853                 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
1854                 if (pm_iir) {
1855                         raw_reg_write(regs, GEN6_PMIIR, pm_iir);
1856                         gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
1857                         ret = IRQ_HANDLED;
1858                 }
1859         }
1860
1861         raw_reg_write(regs, DEIER, de_ier);
1862         if (sde_ier)
1863                 raw_reg_write(regs, SDEIER, sde_ier);
1864
1865         pmu_irq_stats(i915, ret);
1866
1867         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1868         enable_rpm_wakeref_asserts(&i915->runtime_pm);
1869
1870         return ret;
1871 }
1872
1873 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
1874                                 u32 hotplug_trigger)
1875 {
1876         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1877
1878         dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1879
1880         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1881                            hotplug_trigger, dig_hotplug_reg,
1882                            dev_priv->display.hotplug.hpd,
1883                            bxt_port_hotplug_long_detect);
1884
1885         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1886 }
1887
1888 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1889 {
1890         u32 pin_mask = 0, long_mask = 0;
1891         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
1892         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
1893
1894         if (trigger_tc) {
1895                 u32 dig_hotplug_reg;
1896
1897                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
1898
1899                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1900                                    trigger_tc, dig_hotplug_reg,
1901                                    dev_priv->display.hotplug.hpd,
1902                                    gen11_port_hotplug_long_detect);
1903         }
1904
1905         if (trigger_tbt) {
1906                 u32 dig_hotplug_reg;
1907
1908                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
1909
1910                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1911                                    trigger_tbt, dig_hotplug_reg,
1912                                    dev_priv->display.hotplug.hpd,
1913                                    gen11_port_hotplug_long_detect);
1914         }
1915
1916         if (pin_mask)
1917                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1918         else
1919                 drm_err(&dev_priv->drm,
1920                         "Unexpected DE HPD interrupt 0x%08x\n", iir);
1921 }
1922
1923 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
1924 {
1925         u32 mask;
1926
1927         if (DISPLAY_VER(dev_priv) >= 13)
1928                 return TGL_DE_PORT_AUX_DDIA |
1929                         TGL_DE_PORT_AUX_DDIB |
1930                         TGL_DE_PORT_AUX_DDIC |
1931                         XELPD_DE_PORT_AUX_DDID |
1932                         XELPD_DE_PORT_AUX_DDIE |
1933                         TGL_DE_PORT_AUX_USBC1 |
1934                         TGL_DE_PORT_AUX_USBC2 |
1935                         TGL_DE_PORT_AUX_USBC3 |
1936                         TGL_DE_PORT_AUX_USBC4;
1937         else if (DISPLAY_VER(dev_priv) >= 12)
1938                 return TGL_DE_PORT_AUX_DDIA |
1939                         TGL_DE_PORT_AUX_DDIB |
1940                         TGL_DE_PORT_AUX_DDIC |
1941                         TGL_DE_PORT_AUX_USBC1 |
1942                         TGL_DE_PORT_AUX_USBC2 |
1943                         TGL_DE_PORT_AUX_USBC3 |
1944                         TGL_DE_PORT_AUX_USBC4 |
1945                         TGL_DE_PORT_AUX_USBC5 |
1946                         TGL_DE_PORT_AUX_USBC6;
1947
1948
1949         mask = GEN8_AUX_CHANNEL_A;
1950         if (DISPLAY_VER(dev_priv) >= 9)
1951                 mask |= GEN9_AUX_CHANNEL_B |
1952                         GEN9_AUX_CHANNEL_C |
1953                         GEN9_AUX_CHANNEL_D;
1954
1955         if (DISPLAY_VER(dev_priv) == 11) {
1956                 mask |= ICL_AUX_CHANNEL_F;
1957                 mask |= ICL_AUX_CHANNEL_E;
1958         }
1959
1960         return mask;
1961 }
1962
1963 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
1964 {
1965         if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
1966                 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
1967         else if (DISPLAY_VER(dev_priv) >= 11)
1968                 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
1969         else if (DISPLAY_VER(dev_priv) >= 9)
1970                 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
1971         else
1972                 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
1973 }
1974
1975 static void
1976 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1977 {
1978         bool found = false;
1979
1980         if (iir & GEN8_DE_MISC_GSE) {
1981                 intel_opregion_asle_intr(dev_priv);
1982                 found = true;
1983         }
1984
1985         if (iir & GEN8_DE_EDP_PSR) {
1986                 struct intel_encoder *encoder;
1987                 u32 psr_iir;
1988                 i915_reg_t iir_reg;
1989
1990                 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1991                         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1992
1993                         if (DISPLAY_VER(dev_priv) >= 12)
1994                                 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
1995                         else
1996                                 iir_reg = EDP_PSR_IIR;
1997
1998                         psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
1999
2000                         if (psr_iir)
2001                                 found = true;
2002
2003                         intel_psr_irq_handler(intel_dp, psr_iir);
2004
2005                         /* prior GEN12 only have one EDP PSR */
2006                         if (DISPLAY_VER(dev_priv) < 12)
2007                                 break;
2008                 }
2009         }
2010
2011         if (!found)
2012                 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2013 }
2014
2015 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2016                                            u32 te_trigger)
2017 {
2018         enum pipe pipe = INVALID_PIPE;
2019         enum transcoder dsi_trans;
2020         enum port port;
2021         u32 val, tmp;
2022
2023         /*
2024          * Incase of dual link, TE comes from DSI_1
2025          * this is to check if dual link is enabled
2026          */
2027         val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2028         val &= PORT_SYNC_MODE_ENABLE;
2029
2030         /*
2031          * if dual link is enabled, then read DSI_0
2032          * transcoder registers
2033          */
2034         port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2035                                                   PORT_A : PORT_B;
2036         dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2037
2038         /* Check if DSI configured in command mode */
2039         val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2040         val = val & OP_MODE_MASK;
2041
2042         if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2043                 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2044                 return;
2045         }
2046
2047         /* Get PIPE for handling VBLANK event */
2048         val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2049         switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2050         case TRANS_DDI_EDP_INPUT_A_ON:
2051                 pipe = PIPE_A;
2052                 break;
2053         case TRANS_DDI_EDP_INPUT_B_ONOFF:
2054                 pipe = PIPE_B;
2055                 break;
2056         case TRANS_DDI_EDP_INPUT_C_ONOFF:
2057                 pipe = PIPE_C;
2058                 break;
2059         default:
2060                 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2061                 return;
2062         }
2063
2064         intel_handle_vblank(dev_priv, pipe);
2065
2066         /* clear TE in dsi IIR */
2067         port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2068         tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2069 }
2070
2071 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2072 {
2073         if (DISPLAY_VER(i915) >= 9)
2074                 return GEN9_PIPE_PLANE1_FLIP_DONE;
2075         else
2076                 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2077 }
2078
2079 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2080 {
2081         u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2082
2083         if (DISPLAY_VER(dev_priv) >= 13)
2084                 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2085                         XELPD_PIPE_HARD_UNDERRUN;
2086
2087         return mask;
2088 }
2089
2090 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
2091 {
2092         u32 pica_ier = 0;
2093
2094         *pica_iir = 0;
2095         *pch_iir = intel_de_read(i915, SDEIIR);
2096         if (!*pch_iir)
2097                 return;
2098
2099         /**
2100          * PICA IER must be disabled/re-enabled around clearing PICA IIR and
2101          * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
2102          * their flags both in the PICA and SDE IIR.
2103          */
2104         if (*pch_iir & SDE_PICAINTERRUPT) {
2105                 drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
2106
2107                 pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
2108                 *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
2109                 intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
2110         }
2111
2112         intel_de_write(i915, SDEIIR, *pch_iir);
2113
2114         if (pica_ier)
2115                 intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
2116 }
2117
2118 static irqreturn_t
2119 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2120 {
2121         irqreturn_t ret = IRQ_NONE;
2122         u32 iir;
2123         enum pipe pipe;
2124
2125         drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2126
2127         if (master_ctl & GEN8_DE_MISC_IRQ) {
2128                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2129                 if (iir) {
2130                         intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2131                         ret = IRQ_HANDLED;
2132                         gen8_de_misc_irq_handler(dev_priv, iir);
2133                 } else {
2134                         drm_err_ratelimited(&dev_priv->drm,
2135                                             "The master control interrupt lied (DE MISC)!\n");
2136                 }
2137         }
2138
2139         if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2140                 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2141                 if (iir) {
2142                         intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2143                         ret = IRQ_HANDLED;
2144                         gen11_hpd_irq_handler(dev_priv, iir);
2145                 } else {
2146                         drm_err_ratelimited(&dev_priv->drm,
2147                                             "The master control interrupt lied, (DE HPD)!\n");
2148                 }
2149         }
2150
2151         if (master_ctl & GEN8_DE_PORT_IRQ) {
2152                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2153                 if (iir) {
2154                         bool found = false;
2155
2156                         intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2157                         ret = IRQ_HANDLED;
2158
2159                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
2160                                 dp_aux_irq_handler(dev_priv);
2161                                 found = true;
2162                         }
2163
2164                         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2165                                 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2166
2167                                 if (hotplug_trigger) {
2168                                         bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2169                                         found = true;
2170                                 }
2171                         } else if (IS_BROADWELL(dev_priv)) {
2172                                 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2173
2174                                 if (hotplug_trigger) {
2175                                         ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2176                                         found = true;
2177                                 }
2178                         }
2179
2180                         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2181                             (iir & BXT_DE_PORT_GMBUS)) {
2182                                 gmbus_irq_handler(dev_priv);
2183                                 found = true;
2184                         }
2185
2186                         if (DISPLAY_VER(dev_priv) >= 11) {
2187                                 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2188
2189                                 if (te_trigger) {
2190                                         gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2191                                         found = true;
2192                                 }
2193                         }
2194
2195                         if (!found)
2196                                 drm_err_ratelimited(&dev_priv->drm,
2197                                                     "Unexpected DE Port interrupt\n");
2198                 }
2199                 else
2200                         drm_err_ratelimited(&dev_priv->drm,
2201                                             "The master control interrupt lied (DE PORT)!\n");
2202         }
2203
2204         for_each_pipe(dev_priv, pipe) {
2205                 u32 fault_errors;
2206
2207                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2208                         continue;
2209
2210                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2211                 if (!iir) {
2212                         drm_err_ratelimited(&dev_priv->drm,
2213                                             "The master control interrupt lied (DE PIPE)!\n");
2214                         continue;
2215                 }
2216
2217                 ret = IRQ_HANDLED;
2218                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2219
2220                 if (iir & GEN8_PIPE_VBLANK)
2221                         intel_handle_vblank(dev_priv, pipe);
2222
2223                 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2224                         flip_done_handler(dev_priv, pipe);
2225
2226                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2227                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2228
2229                 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2230                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2231
2232                 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2233                 if (fault_errors)
2234                         drm_err_ratelimited(&dev_priv->drm,
2235                                             "Fault errors on pipe %c: 0x%08x\n",
2236                                             pipe_name(pipe),
2237                                             fault_errors);
2238         }
2239
2240         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2241             master_ctl & GEN8_DE_PCH_IRQ) {
2242                 u32 pica_iir;
2243
2244                 /*
2245                  * FIXME(BDW): Assume for now that the new interrupt handling
2246                  * scheme also closed the SDE interrupt handling race we've seen
2247                  * on older pch-split platforms. But this needs testing.
2248                  */
2249                 gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
2250                 if (iir) {
2251                         ret = IRQ_HANDLED;
2252
2253                         if (pica_iir)
2254                                 xelpdp_pica_irq_handler(dev_priv, pica_iir);
2255
2256                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2257                                 icp_irq_handler(dev_priv, iir);
2258                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2259                                 spt_irq_handler(dev_priv, iir);
2260                         else
2261                                 cpt_irq_handler(dev_priv, iir);
2262                 } else {
2263                         /*
2264                          * Like on previous PCH there seems to be something
2265                          * fishy going on with forwarding PCH interrupts.
2266                          */
2267                         drm_dbg(&dev_priv->drm,
2268                                 "The master control interrupt lied (SDE)!\n");
2269                 }
2270         }
2271
2272         return ret;
2273 }
2274
2275 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2276 {
2277         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2278
2279         /*
2280          * Now with master disabled, get a sample of level indications
2281          * for this interrupt. Indications will be cleared on related acks.
2282          * New indications can and will light up during processing,
2283          * and will generate new interrupt after enabling master.
2284          */
2285         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2286 }
2287
2288 static inline void gen8_master_intr_enable(void __iomem * const regs)
2289 {
2290         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2291 }
2292
2293 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2294 {
2295         struct drm_i915_private *dev_priv = arg;
2296         void __iomem * const regs = dev_priv->uncore.regs;
2297         u32 master_ctl;
2298
2299         if (!intel_irqs_enabled(dev_priv))
2300                 return IRQ_NONE;
2301
2302         master_ctl = gen8_master_intr_disable(regs);
2303         if (!master_ctl) {
2304                 gen8_master_intr_enable(regs);
2305                 return IRQ_NONE;
2306         }
2307
2308         /* Find, queue (onto bottom-halves), then clear each source */
2309         gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2310
2311         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2312         if (master_ctl & ~GEN8_GT_IRQS) {
2313                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2314                 gen8_de_irq_handler(dev_priv, master_ctl);
2315                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2316         }
2317
2318         gen8_master_intr_enable(regs);
2319
2320         pmu_irq_stats(dev_priv, IRQ_HANDLED);
2321
2322         return IRQ_HANDLED;
2323 }
2324
2325 static u32
2326 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2327 {
2328         void __iomem * const regs = i915->uncore.regs;
2329         u32 iir;
2330
2331         if (!(master_ctl & GEN11_GU_MISC_IRQ))
2332                 return 0;
2333
2334         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2335         if (likely(iir))
2336                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2337
2338         return iir;
2339 }
2340
2341 static void
2342 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2343 {
2344         if (iir & GEN11_GU_MISC_GSE)
2345                 intel_opregion_asle_intr(i915);
2346 }
2347
2348 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2349 {
2350         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2351
2352         /*
2353          * Now with master disabled, get a sample of level indications
2354          * for this interrupt. Indications will be cleared on related acks.
2355          * New indications can and will light up during processing,
2356          * and will generate new interrupt after enabling master.
2357          */
2358         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2359 }
2360
2361 static inline void gen11_master_intr_enable(void __iomem * const regs)
2362 {
2363         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2364 }
2365
2366 static void
2367 gen11_display_irq_handler(struct drm_i915_private *i915)
2368 {
2369         void __iomem * const regs = i915->uncore.regs;
2370         const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2371
2372         disable_rpm_wakeref_asserts(&i915->runtime_pm);
2373         /*
2374          * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2375          * for the display related bits.
2376          */
2377         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2378         gen8_de_irq_handler(i915, disp_ctl);
2379         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2380                       GEN11_DISPLAY_IRQ_ENABLE);
2381
2382         enable_rpm_wakeref_asserts(&i915->runtime_pm);
2383 }
2384
2385 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2386 {
2387         struct drm_i915_private *i915 = arg;
2388         void __iomem * const regs = i915->uncore.regs;
2389         struct intel_gt *gt = to_gt(i915);
2390         u32 master_ctl;
2391         u32 gu_misc_iir;
2392
2393         if (!intel_irqs_enabled(i915))
2394                 return IRQ_NONE;
2395
2396         master_ctl = gen11_master_intr_disable(regs);
2397         if (!master_ctl) {
2398                 gen11_master_intr_enable(regs);
2399                 return IRQ_NONE;
2400         }
2401
2402         /* Find, queue (onto bottom-halves), then clear each source */
2403         gen11_gt_irq_handler(gt, master_ctl);
2404
2405         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2406         if (master_ctl & GEN11_DISPLAY_IRQ)
2407                 gen11_display_irq_handler(i915);
2408
2409         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2410
2411         gen11_master_intr_enable(regs);
2412
2413         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2414
2415         pmu_irq_stats(i915, IRQ_HANDLED);
2416
2417         return IRQ_HANDLED;
2418 }
2419
2420 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2421 {
2422         u32 val;
2423
2424         /* First disable interrupts */
2425         raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2426
2427         /* Get the indication levels and ack the master unit */
2428         val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2429         if (unlikely(!val))
2430                 return 0;
2431
2432         raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2433
2434         return val;
2435 }
2436
2437 static inline void dg1_master_intr_enable(void __iomem * const regs)
2438 {
2439         raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2440 }
2441
2442 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2443 {
2444         struct drm_i915_private * const i915 = arg;
2445         struct intel_gt *gt = to_gt(i915);
2446         void __iomem * const regs = gt->uncore->regs;
2447         u32 master_tile_ctl, master_ctl;
2448         u32 gu_misc_iir;
2449
2450         if (!intel_irqs_enabled(i915))
2451                 return IRQ_NONE;
2452
2453         master_tile_ctl = dg1_master_intr_disable(regs);
2454         if (!master_tile_ctl) {
2455                 dg1_master_intr_enable(regs);
2456                 return IRQ_NONE;
2457         }
2458
2459         /* FIXME: we only support tile 0 for now. */
2460         if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2461                 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2462                 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2463         } else {
2464                 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2465                         master_tile_ctl);
2466                 dg1_master_intr_enable(regs);
2467                 return IRQ_NONE;
2468         }
2469
2470         gen11_gt_irq_handler(gt, master_ctl);
2471
2472         if (master_ctl & GEN11_DISPLAY_IRQ)
2473                 gen11_display_irq_handler(i915);
2474
2475         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2476
2477         dg1_master_intr_enable(regs);
2478
2479         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2480
2481         pmu_irq_stats(i915, IRQ_HANDLED);
2482
2483         return IRQ_HANDLED;
2484 }
2485
2486 /* Called from drm generic code, passed 'crtc' which
2487  * we use as a pipe index
2488  */
2489 int i8xx_enable_vblank(struct drm_crtc *crtc)
2490 {
2491         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2492         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2493         unsigned long irqflags;
2494
2495         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2496         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2497         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2498
2499         return 0;
2500 }
2501
2502 int i915gm_enable_vblank(struct drm_crtc *crtc)
2503 {
2504         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2505
2506         /*
2507          * Vblank interrupts fail to wake the device up from C2+.
2508          * Disabling render clock gating during C-states avoids
2509          * the problem. There is a small power cost so we do this
2510          * only when vblank interrupts are actually enabled.
2511          */
2512         if (dev_priv->vblank_enabled++ == 0)
2513                 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2514
2515         return i8xx_enable_vblank(crtc);
2516 }
2517
2518 int i965_enable_vblank(struct drm_crtc *crtc)
2519 {
2520         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2521         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2522         unsigned long irqflags;
2523
2524         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2525         i915_enable_pipestat(dev_priv, pipe,
2526                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2527         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2528
2529         return 0;
2530 }
2531
2532 int ilk_enable_vblank(struct drm_crtc *crtc)
2533 {
2534         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2535         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2536         unsigned long irqflags;
2537         u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2538                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2539
2540         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2541         ilk_enable_display_irq(dev_priv, bit);
2542         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2543
2544         /* Even though there is no DMC, frame counter can get stuck when
2545          * PSR is active as no frames are generated.
2546          */
2547         if (HAS_PSR(dev_priv))
2548                 drm_crtc_vblank_restore(crtc);
2549
2550         return 0;
2551 }
2552
2553 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2554                                    bool enable)
2555 {
2556         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2557         enum port port;
2558
2559         if (!(intel_crtc->mode_flags &
2560             (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2561                 return false;
2562
2563         /* for dual link cases we consider TE from slave */
2564         if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2565                 port = PORT_B;
2566         else
2567                 port = PORT_A;
2568
2569         intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2570                          enable ? 0 : DSI_TE_EVENT);
2571
2572         intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2573
2574         return true;
2575 }
2576
2577 int bdw_enable_vblank(struct drm_crtc *_crtc)
2578 {
2579         struct intel_crtc *crtc = to_intel_crtc(_crtc);
2580         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2581         enum pipe pipe = crtc->pipe;
2582         unsigned long irqflags;
2583
2584         if (gen11_dsi_configure_te(crtc, true))
2585                 return 0;
2586
2587         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2588         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2589         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2590
2591         /* Even if there is no DMC, frame counter can get stuck when
2592          * PSR is active as no frames are generated, so check only for PSR.
2593          */
2594         if (HAS_PSR(dev_priv))
2595                 drm_crtc_vblank_restore(&crtc->base);
2596
2597         return 0;
2598 }
2599
2600 /* Called from drm generic code, passed 'crtc' which
2601  * we use as a pipe index
2602  */
2603 void i8xx_disable_vblank(struct drm_crtc *crtc)
2604 {
2605         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2606         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2607         unsigned long irqflags;
2608
2609         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2610         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2611         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2612 }
2613
2614 void i915gm_disable_vblank(struct drm_crtc *crtc)
2615 {
2616         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2617
2618         i8xx_disable_vblank(crtc);
2619
2620         if (--dev_priv->vblank_enabled == 0)
2621                 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2622 }
2623
2624 void i965_disable_vblank(struct drm_crtc *crtc)
2625 {
2626         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2627         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2628         unsigned long irqflags;
2629
2630         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2631         i915_disable_pipestat(dev_priv, pipe,
2632                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2633         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2634 }
2635
2636 void ilk_disable_vblank(struct drm_crtc *crtc)
2637 {
2638         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2639         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2640         unsigned long irqflags;
2641         u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2642                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2643
2644         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2645         ilk_disable_display_irq(dev_priv, bit);
2646         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2647 }
2648
2649 void bdw_disable_vblank(struct drm_crtc *_crtc)
2650 {
2651         struct intel_crtc *crtc = to_intel_crtc(_crtc);
2652         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2653         enum pipe pipe = crtc->pipe;
2654         unsigned long irqflags;
2655
2656         if (gen11_dsi_configure_te(crtc, false))
2657                 return;
2658
2659         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2660         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2661         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2662 }
2663
2664 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2665 {
2666         struct intel_uncore *uncore = &dev_priv->uncore;
2667
2668         if (HAS_PCH_NOP(dev_priv))
2669                 return;
2670
2671         GEN3_IRQ_RESET(uncore, SDE);
2672
2673         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2674                 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2675 }
2676
2677 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2678 {
2679         struct intel_uncore *uncore = &dev_priv->uncore;
2680
2681         if (IS_CHERRYVIEW(dev_priv))
2682                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2683         else
2684                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2685
2686         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2687         intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
2688
2689         i9xx_pipestat_irq_reset(dev_priv);
2690
2691         GEN3_IRQ_RESET(uncore, VLV_);
2692         dev_priv->irq_mask = ~0u;
2693 }
2694
2695 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2696 {
2697         struct intel_uncore *uncore = &dev_priv->uncore;
2698
2699         u32 pipestat_mask;
2700         u32 enable_mask;
2701         enum pipe pipe;
2702
2703         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2704
2705         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2706         for_each_pipe(dev_priv, pipe)
2707                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2708
2709         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2710                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2711                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2712                 I915_LPE_PIPE_A_INTERRUPT |
2713                 I915_LPE_PIPE_B_INTERRUPT;
2714
2715         if (IS_CHERRYVIEW(dev_priv))
2716                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2717                         I915_LPE_PIPE_C_INTERRUPT;
2718
2719         drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2720
2721         dev_priv->irq_mask = ~enable_mask;
2722
2723         GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2724 }
2725
2726 /* drm_dma.h hooks
2727 */
2728 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2729 {
2730         struct intel_uncore *uncore = &dev_priv->uncore;
2731
2732         GEN3_IRQ_RESET(uncore, DE);
2733         dev_priv->irq_mask = ~0u;
2734
2735         if (GRAPHICS_VER(dev_priv) == 7)
2736                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2737
2738         if (IS_HASWELL(dev_priv)) {
2739                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2740                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2741         }
2742
2743         gen5_gt_irq_reset(to_gt(dev_priv));
2744
2745         ibx_irq_reset(dev_priv);
2746 }
2747
2748 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2749 {
2750         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2751         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
2752
2753         gen5_gt_irq_reset(to_gt(dev_priv));
2754
2755         spin_lock_irq(&dev_priv->irq_lock);
2756         if (dev_priv->display_irqs_enabled)
2757                 vlv_display_irq_reset(dev_priv);
2758         spin_unlock_irq(&dev_priv->irq_lock);
2759 }
2760
2761 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
2762 {
2763         struct intel_uncore *uncore = &dev_priv->uncore;
2764         enum pipe pipe;
2765
2766         if (!HAS_DISPLAY(dev_priv))
2767                 return;
2768
2769         intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2770         intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2771
2772         for_each_pipe(dev_priv, pipe)
2773                 if (intel_display_power_is_enabled(dev_priv,
2774                                                    POWER_DOMAIN_PIPE(pipe)))
2775                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2776
2777         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2778         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2779 }
2780
2781 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2782 {
2783         struct intel_uncore *uncore = &dev_priv->uncore;
2784
2785         gen8_master_intr_disable(uncore->regs);
2786
2787         gen8_gt_irq_reset(to_gt(dev_priv));
2788         gen8_display_irq_reset(dev_priv);
2789         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2790
2791         if (HAS_PCH_SPLIT(dev_priv))
2792                 ibx_irq_reset(dev_priv);
2793
2794 }
2795
2796 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2797 {
2798         struct intel_uncore *uncore = &dev_priv->uncore;
2799         enum pipe pipe;
2800         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2801                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2802
2803         if (!HAS_DISPLAY(dev_priv))
2804                 return;
2805
2806         intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2807
2808         if (DISPLAY_VER(dev_priv) >= 12) {
2809                 enum transcoder trans;
2810
2811                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2812                         enum intel_display_power_domain domain;
2813
2814                         domain = POWER_DOMAIN_TRANSCODER(trans);
2815                         if (!intel_display_power_is_enabled(dev_priv, domain))
2816                                 continue;
2817
2818                         intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2819                         intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2820                 }
2821         } else {
2822                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2823                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2824         }
2825
2826         for_each_pipe(dev_priv, pipe)
2827                 if (intel_display_power_is_enabled(dev_priv,
2828                                                    POWER_DOMAIN_PIPE(pipe)))
2829                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2830
2831         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2832         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2833
2834         if (DISPLAY_VER(dev_priv) >= 14)
2835                 GEN3_IRQ_RESET(uncore, PICAINTERRUPT_);
2836         else
2837                 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2838
2839         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2840                 GEN3_IRQ_RESET(uncore, SDE);
2841 }
2842
2843 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2844 {
2845         struct intel_gt *gt = to_gt(dev_priv);
2846         struct intel_uncore *uncore = gt->uncore;
2847
2848         gen11_master_intr_disable(dev_priv->uncore.regs);
2849
2850         gen11_gt_irq_reset(gt);
2851         gen11_display_irq_reset(dev_priv);
2852
2853         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2854         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2855 }
2856
2857 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
2858 {
2859         struct intel_gt *gt = to_gt(dev_priv);
2860         struct intel_uncore *uncore = gt->uncore;
2861
2862         dg1_master_intr_disable(dev_priv->uncore.regs);
2863
2864         gen11_gt_irq_reset(gt);
2865         gen11_display_irq_reset(dev_priv);
2866
2867         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2868         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2869 }
2870
2871 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2872                                      u8 pipe_mask)
2873 {
2874         struct intel_uncore *uncore = &dev_priv->uncore;
2875         u32 extra_ier = GEN8_PIPE_VBLANK |
2876                 gen8_de_pipe_underrun_mask(dev_priv) |
2877                 gen8_de_pipe_flip_done_mask(dev_priv);
2878         enum pipe pipe;
2879
2880         spin_lock_irq(&dev_priv->irq_lock);
2881
2882         if (!intel_irqs_enabled(dev_priv)) {
2883                 spin_unlock_irq(&dev_priv->irq_lock);
2884                 return;
2885         }
2886
2887         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2888                 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2889                                   dev_priv->de_irq_mask[pipe],
2890                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
2891
2892         spin_unlock_irq(&dev_priv->irq_lock);
2893 }
2894
2895 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2896                                      u8 pipe_mask)
2897 {
2898         struct intel_uncore *uncore = &dev_priv->uncore;
2899         enum pipe pipe;
2900
2901         spin_lock_irq(&dev_priv->irq_lock);
2902
2903         if (!intel_irqs_enabled(dev_priv)) {
2904                 spin_unlock_irq(&dev_priv->irq_lock);
2905                 return;
2906         }
2907
2908         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2909                 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2910
2911         spin_unlock_irq(&dev_priv->irq_lock);
2912
2913         /* make sure we're done processing display irqs */
2914         intel_synchronize_irq(dev_priv);
2915 }
2916
2917 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2918 {
2919         struct intel_uncore *uncore = &dev_priv->uncore;
2920
2921         intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
2922         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
2923
2924         gen8_gt_irq_reset(to_gt(dev_priv));
2925
2926         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2927
2928         spin_lock_irq(&dev_priv->irq_lock);
2929         if (dev_priv->display_irqs_enabled)
2930                 vlv_display_irq_reset(dev_priv);
2931         spin_unlock_irq(&dev_priv->irq_lock);
2932 }
2933
2934 static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
2935 {
2936         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2937
2938         switch (encoder->hpd_pin) {
2939         case HPD_PORT_A:
2940                 /*
2941                  * When CPU and PCH are on the same package, port A
2942                  * HPD must be enabled in both north and south.
2943                  */
2944                 return HAS_PCH_LPT_LP(i915) ?
2945                         PORTA_HOTPLUG_ENABLE : 0;
2946         case HPD_PORT_B:
2947                 return PORTB_HOTPLUG_ENABLE |
2948                         PORTB_PULSE_DURATION_2ms;
2949         case HPD_PORT_C:
2950                 return PORTC_HOTPLUG_ENABLE |
2951                         PORTC_PULSE_DURATION_2ms;
2952         case HPD_PORT_D:
2953                 return PORTD_HOTPLUG_ENABLE |
2954                         PORTD_PULSE_DURATION_2ms;
2955         default:
2956                 return 0;
2957         }
2958 }
2959
2960 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2961 {
2962         /*
2963          * Enable digital hotplug on the PCH, and configure the DP short pulse
2964          * duration to 2ms (which is the minimum in the Display Port spec).
2965          * The pulse duration bits are reserved on LPT+.
2966          */
2967         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
2968                          PORTA_HOTPLUG_ENABLE |
2969                          PORTB_HOTPLUG_ENABLE |
2970                          PORTC_HOTPLUG_ENABLE |
2971                          PORTD_HOTPLUG_ENABLE |
2972                          PORTB_PULSE_DURATION_MASK |
2973                          PORTC_PULSE_DURATION_MASK |
2974                          PORTD_PULSE_DURATION_MASK,
2975                          intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
2976 }
2977
2978 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2979 {
2980         u32 hotplug_irqs, enabled_irqs;
2981
2982         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2983         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2984
2985         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2986
2987         ibx_hpd_detection_setup(dev_priv);
2988 }
2989
2990 static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
2991 {
2992         switch (encoder->hpd_pin) {
2993         case HPD_PORT_A:
2994         case HPD_PORT_B:
2995         case HPD_PORT_C:
2996         case HPD_PORT_D:
2997                 return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
2998         default:
2999                 return 0;
3000         }
3001 }
3002
3003 static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
3004 {
3005         switch (encoder->hpd_pin) {
3006         case HPD_PORT_TC1:
3007         case HPD_PORT_TC2:
3008         case HPD_PORT_TC3:
3009         case HPD_PORT_TC4:
3010         case HPD_PORT_TC5:
3011         case HPD_PORT_TC6:
3012                 return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
3013         default:
3014                 return 0;
3015         }
3016 }
3017
3018 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3019 {
3020         intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
3021                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3022                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3023                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3024                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
3025                          intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
3026 }
3027
3028 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3029 {
3030         intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
3031                          ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3032                          ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3033                          ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3034                          ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3035                          ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3036                          ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
3037                          intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
3038 }
3039
3040 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3041 {
3042         u32 hotplug_irqs, enabled_irqs;
3043
3044         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3045         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3046
3047         if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3048                 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3049
3050         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3051
3052         icp_ddi_hpd_detection_setup(dev_priv);
3053         icp_tc_hpd_detection_setup(dev_priv);
3054 }
3055
3056 static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
3057 {
3058         switch (encoder->hpd_pin) {
3059         case HPD_PORT_TC1:
3060         case HPD_PORT_TC2:
3061         case HPD_PORT_TC3:
3062         case HPD_PORT_TC4:
3063         case HPD_PORT_TC5:
3064         case HPD_PORT_TC6:
3065                 return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
3066         default:
3067                 return 0;
3068         }
3069 }
3070
3071 static void dg1_hpd_invert(struct drm_i915_private *i915)
3072 {
3073         u32 val = (INVERT_DDIA_HPD |
3074                    INVERT_DDIB_HPD |
3075                    INVERT_DDIC_HPD |
3076                    INVERT_DDID_HPD);
3077         intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
3078 }
3079
3080 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3081 {
3082         dg1_hpd_invert(dev_priv);
3083         icp_hpd_irq_setup(dev_priv);
3084 }
3085
3086 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3087 {
3088         intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
3089                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3090                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3091                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3092                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3093                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3094                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3095                          intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3096 }
3097
3098 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3099 {
3100         intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3101                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3102                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3103                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3104                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3105                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3106                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3107                          intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3108 }
3109
3110 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3111 {
3112         u32 hotplug_irqs, enabled_irqs;
3113
3114         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3115         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3116
3117         intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3118                          ~enabled_irqs & hotplug_irqs);
3119         intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3120
3121         gen11_tc_hpd_detection_setup(dev_priv);
3122         gen11_tbt_hpd_detection_setup(dev_priv);
3123
3124         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3125                 icp_hpd_irq_setup(dev_priv);
3126 }
3127
3128 static u32 mtp_ddi_hotplug_enables(struct intel_encoder *encoder)
3129 {
3130         switch (encoder->hpd_pin) {
3131         case HPD_PORT_A:
3132         case HPD_PORT_B:
3133                 return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
3134         default:
3135                 return 0;
3136         }
3137 }
3138
3139 static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
3140 {
3141         switch (encoder->hpd_pin) {
3142         case HPD_PORT_TC1:
3143         case HPD_PORT_TC2:
3144         case HPD_PORT_TC3:
3145         case HPD_PORT_TC4:
3146                 return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
3147         default:
3148                 return 0;
3149         }
3150 }
3151
3152 static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
3153 {
3154         intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
3155                      (SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3156                       SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B)),
3157                      intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
3158 }
3159
3160 static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
3161 {
3162         intel_de_rmw(i915, SHOTPLUG_CTL_TC,
3163                      (ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3164                       ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3165                       ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3166                       ICP_TC_HPD_ENABLE(HPD_PORT_TC4)),
3167                      intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
3168 }
3169
3170 static void mtp_hpd_invert(struct drm_i915_private *i915)
3171 {
3172         u32 val = (INVERT_DDIA_HPD |
3173                    INVERT_DDIB_HPD |
3174                    INVERT_DDIC_HPD |
3175                    INVERT_TC1_HPD |
3176                    INVERT_TC2_HPD |
3177                    INVERT_TC3_HPD |
3178                    INVERT_TC4_HPD |
3179                    INVERT_DDID_HPD_MTP |
3180                    INVERT_DDIE_HPD);
3181         intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
3182 }
3183
3184 static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
3185 {
3186         u32 hotplug_irqs, enabled_irqs;
3187
3188         enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
3189         hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
3190
3191         intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3192
3193         mtp_hpd_invert(i915);
3194         ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
3195
3196         mtp_ddi_hpd_detection_setup(i915);
3197         mtp_tc_hpd_detection_setup(i915);
3198 }
3199
3200 static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
3201 {
3202         struct intel_encoder *encoder;
3203         enum hpd_pin pin;
3204         u32 available_pins = 0;
3205
3206         BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
3207
3208         for_each_intel_encoder(&i915->drm, encoder)
3209                 available_pins |= BIT(encoder->hpd_pin);
3210
3211         for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
3212                 u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
3213                            XELPDP_DP_ALT_HOTPLUG_ENABLE;
3214
3215                 intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(pin),
3216                              mask,
3217                              available_pins & BIT(pin) ?  mask : 0);
3218         }
3219 }
3220
3221 static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
3222 {
3223         u32 hotplug_irqs, enabled_irqs;
3224
3225         enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
3226         hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
3227
3228         intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
3229                      ~enabled_irqs & hotplug_irqs);
3230         intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
3231
3232         xelpdp_pica_hpd_detection_setup(i915);
3233
3234         if (INTEL_PCH_TYPE(i915) >= PCH_MTP)
3235                 mtp_hpd_irq_setup(i915);
3236 }
3237
3238 static u32 spt_hotplug_enables(struct intel_encoder *encoder)
3239 {
3240         switch (encoder->hpd_pin) {
3241         case HPD_PORT_A:
3242                 return PORTA_HOTPLUG_ENABLE;
3243         case HPD_PORT_B:
3244                 return PORTB_HOTPLUG_ENABLE;
3245         case HPD_PORT_C:
3246                 return PORTC_HOTPLUG_ENABLE;
3247         case HPD_PORT_D:
3248                 return PORTD_HOTPLUG_ENABLE;
3249         default:
3250                 return 0;
3251         }
3252 }
3253
3254 static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
3255 {
3256         switch (encoder->hpd_pin) {
3257         case HPD_PORT_E:
3258                 return PORTE_HOTPLUG_ENABLE;
3259         default:
3260                 return 0;
3261         }
3262 }
3263
3264 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3265 {
3266         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3267         if (HAS_PCH_CNP(dev_priv)) {
3268                 intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3269                                  CHASSIS_CLK_REQ_DURATION(0xf));
3270         }
3271
3272         /* Enable digital hotplug on the PCH */
3273         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3274                          PORTA_HOTPLUG_ENABLE |
3275                          PORTB_HOTPLUG_ENABLE |
3276                          PORTC_HOTPLUG_ENABLE |
3277                          PORTD_HOTPLUG_ENABLE,
3278                          intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3279
3280         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3281                          intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3282 }
3283
3284 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3285 {
3286         u32 hotplug_irqs, enabled_irqs;
3287
3288         if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3289                 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3290
3291         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3292         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3293
3294         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3295
3296         spt_hpd_detection_setup(dev_priv);
3297 }
3298
3299 static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
3300 {
3301         switch (encoder->hpd_pin) {
3302         case HPD_PORT_A:
3303                 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3304                         DIGITAL_PORTA_PULSE_DURATION_2ms;
3305         default:
3306                 return 0;
3307         }
3308 }
3309
3310 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3311 {
3312         /*
3313          * Enable digital hotplug on the CPU, and configure the DP short pulse
3314          * duration to 2ms (which is the minimum in the Display Port spec)
3315          * The pulse duration bits are reserved on HSW+.
3316          */
3317         intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3318                          DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3319                          intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3320 }
3321
3322 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3323 {
3324         u32 hotplug_irqs, enabled_irqs;
3325
3326         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3327         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3328
3329         if (DISPLAY_VER(dev_priv) >= 8)
3330                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3331         else
3332                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3333
3334         ilk_hpd_detection_setup(dev_priv);
3335
3336         ibx_hpd_irq_setup(dev_priv);
3337 }
3338
3339 static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
3340 {
3341         u32 hotplug;
3342
3343         switch (encoder->hpd_pin) {
3344         case HPD_PORT_A:
3345                 hotplug = PORTA_HOTPLUG_ENABLE;
3346                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3347                         hotplug |= BXT_DDIA_HPD_INVERT;
3348                 return hotplug;
3349         case HPD_PORT_B:
3350                 hotplug = PORTB_HOTPLUG_ENABLE;
3351                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3352                         hotplug |= BXT_DDIB_HPD_INVERT;
3353                 return hotplug;
3354         case HPD_PORT_C:
3355                 hotplug = PORTC_HOTPLUG_ENABLE;
3356                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3357                         hotplug |= BXT_DDIC_HPD_INVERT;
3358                 return hotplug;
3359         default:
3360                 return 0;
3361         }
3362 }
3363
3364 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3365 {
3366         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3367                          PORTA_HOTPLUG_ENABLE |
3368                          PORTB_HOTPLUG_ENABLE |
3369                          PORTC_HOTPLUG_ENABLE |
3370                          BXT_DDI_HPD_INVERT_MASK,
3371                          intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3372 }
3373
3374 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3375 {
3376         u32 hotplug_irqs, enabled_irqs;
3377
3378         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3379         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3380
3381         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3382
3383         bxt_hpd_detection_setup(dev_priv);
3384 }
3385
3386 /*
3387  * SDEIER is also touched by the interrupt handler to work around missed PCH
3388  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3389  * instead we unconditionally enable all PCH interrupt sources here, but then
3390  * only unmask them as needed with SDEIMR.
3391  *
3392  * Note that we currently do this after installing the interrupt handler,
3393  * but before we enable the master interrupt. That should be sufficient
3394  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3395  * interrupts could still race.
3396  */
3397 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3398 {
3399         struct intel_uncore *uncore = &dev_priv->uncore;
3400         u32 mask;
3401
3402         if (HAS_PCH_NOP(dev_priv))
3403                 return;
3404
3405         if (HAS_PCH_IBX(dev_priv))
3406                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3407         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3408                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3409         else
3410                 mask = SDE_GMBUS_CPT;
3411
3412         GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3413 }
3414
3415 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3416 {
3417         struct intel_uncore *uncore = &dev_priv->uncore;
3418         u32 display_mask, extra_mask;
3419
3420         if (GRAPHICS_VER(dev_priv) >= 7) {
3421                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3422                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3423                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3424                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3425                               DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3426                               DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3427                               DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3428                               DE_DP_A_HOTPLUG_IVB);
3429         } else {
3430                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3431                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3432                                 DE_PIPEA_CRC_DONE | DE_POISON);
3433                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3434                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3435                               DE_PLANE_FLIP_DONE(PLANE_A) |
3436                               DE_PLANE_FLIP_DONE(PLANE_B) |
3437                               DE_DP_A_HOTPLUG);
3438         }
3439
3440         if (IS_HASWELL(dev_priv)) {
3441                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3442                 display_mask |= DE_EDP_PSR_INT_HSW;
3443         }
3444
3445         if (IS_IRONLAKE_M(dev_priv))
3446                 extra_mask |= DE_PCU_EVENT;
3447
3448         dev_priv->irq_mask = ~display_mask;
3449
3450         ibx_irq_postinstall(dev_priv);
3451
3452         gen5_gt_irq_postinstall(to_gt(dev_priv));
3453
3454         GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3455                       display_mask | extra_mask);
3456 }
3457
3458 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3459 {
3460         lockdep_assert_held(&dev_priv->irq_lock);
3461
3462         if (dev_priv->display_irqs_enabled)
3463                 return;
3464
3465         dev_priv->display_irqs_enabled = true;
3466
3467         if (intel_irqs_enabled(dev_priv)) {
3468                 vlv_display_irq_reset(dev_priv);
3469                 vlv_display_irq_postinstall(dev_priv);
3470         }
3471 }
3472
3473 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3474 {
3475         lockdep_assert_held(&dev_priv->irq_lock);
3476
3477         if (!dev_priv->display_irqs_enabled)
3478                 return;
3479
3480         dev_priv->display_irqs_enabled = false;
3481
3482         if (intel_irqs_enabled(dev_priv))
3483                 vlv_display_irq_reset(dev_priv);
3484 }
3485
3486
3487 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3488 {
3489         gen5_gt_irq_postinstall(to_gt(dev_priv));
3490
3491         spin_lock_irq(&dev_priv->irq_lock);
3492         if (dev_priv->display_irqs_enabled)
3493                 vlv_display_irq_postinstall(dev_priv);
3494         spin_unlock_irq(&dev_priv->irq_lock);
3495
3496         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3497         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3498 }
3499
3500 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3501 {
3502         struct intel_uncore *uncore = &dev_priv->uncore;
3503
3504         u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3505                 GEN8_PIPE_CDCLK_CRC_DONE;
3506         u32 de_pipe_enables;
3507         u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3508         u32 de_port_enables;
3509         u32 de_misc_masked = GEN8_DE_EDP_PSR;
3510         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3511                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3512         enum pipe pipe;
3513
3514         if (!HAS_DISPLAY(dev_priv))
3515                 return;
3516
3517         if (DISPLAY_VER(dev_priv) <= 10)
3518                 de_misc_masked |= GEN8_DE_MISC_GSE;
3519
3520         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3521                 de_port_masked |= BXT_DE_PORT_GMBUS;
3522
3523         if (DISPLAY_VER(dev_priv) >= 11) {
3524                 enum port port;
3525
3526                 if (intel_bios_is_dsi_present(dev_priv, &port))
3527                         de_port_masked |= DSI0_TE | DSI1_TE;
3528         }
3529
3530         de_pipe_enables = de_pipe_masked |
3531                 GEN8_PIPE_VBLANK |
3532                 gen8_de_pipe_underrun_mask(dev_priv) |
3533                 gen8_de_pipe_flip_done_mask(dev_priv);
3534
3535         de_port_enables = de_port_masked;
3536         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3537                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3538         else if (IS_BROADWELL(dev_priv))
3539                 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3540
3541         if (DISPLAY_VER(dev_priv) >= 12) {
3542                 enum transcoder trans;
3543
3544                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3545                         enum intel_display_power_domain domain;
3546
3547                         domain = POWER_DOMAIN_TRANSCODER(trans);
3548                         if (!intel_display_power_is_enabled(dev_priv, domain))
3549                                 continue;
3550
3551                         gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3552                 }
3553         } else {
3554                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3555         }
3556
3557         for_each_pipe(dev_priv, pipe) {
3558                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3559
3560                 if (intel_display_power_is_enabled(dev_priv,
3561                                 POWER_DOMAIN_PIPE(pipe)))
3562                         GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3563                                           dev_priv->de_irq_mask[pipe],
3564                                           de_pipe_enables);
3565         }
3566
3567         GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3568         GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3569
3570         if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
3571                 u32 de_hpd_masked = 0;
3572                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3573                                      GEN11_DE_TBT_HOTPLUG_MASK;
3574
3575                 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3576                               de_hpd_enables);
3577         }
3578 }
3579
3580 static void mtp_irq_postinstall(struct drm_i915_private *i915)
3581 {
3582         struct intel_uncore *uncore = &i915->uncore;
3583         u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
3584         u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
3585         u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
3586                              XELPDP_TBT_HOTPLUG_MASK;
3587
3588         GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask,
3589                       de_hpd_enables);
3590
3591         GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
3592 }
3593
3594 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3595 {
3596         struct intel_uncore *uncore = &dev_priv->uncore;
3597         u32 mask = SDE_GMBUS_ICP;
3598
3599         GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3600 }
3601
3602 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3603 {
3604         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3605                 icp_irq_postinstall(dev_priv);
3606         else if (HAS_PCH_SPLIT(dev_priv))
3607                 ibx_irq_postinstall(dev_priv);
3608
3609         gen8_gt_irq_postinstall(to_gt(dev_priv));
3610         gen8_de_irq_postinstall(dev_priv);
3611
3612         gen8_master_intr_enable(dev_priv->uncore.regs);
3613 }
3614
3615 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3616 {
3617         if (!HAS_DISPLAY(dev_priv))
3618                 return;
3619
3620         gen8_de_irq_postinstall(dev_priv);
3621
3622         intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3623                            GEN11_DISPLAY_IRQ_ENABLE);
3624 }
3625
3626 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3627 {
3628         struct intel_gt *gt = to_gt(dev_priv);
3629         struct intel_uncore *uncore = gt->uncore;
3630         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3631
3632         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3633                 icp_irq_postinstall(dev_priv);
3634
3635         gen11_gt_irq_postinstall(gt);
3636         gen11_de_irq_postinstall(dev_priv);
3637
3638         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3639
3640         gen11_master_intr_enable(uncore->regs);
3641         intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3642 }
3643
3644 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3645 {
3646         struct intel_gt *gt = to_gt(dev_priv);
3647         struct intel_uncore *uncore = gt->uncore;
3648         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3649
3650         gen11_gt_irq_postinstall(gt);
3651
3652         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3653
3654         if (HAS_DISPLAY(dev_priv)) {
3655                 if (DISPLAY_VER(dev_priv) >= 14)
3656                         mtp_irq_postinstall(dev_priv);
3657                 else
3658                         icp_irq_postinstall(dev_priv);
3659
3660                 gen8_de_irq_postinstall(dev_priv);
3661                 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3662                                    GEN11_DISPLAY_IRQ_ENABLE);
3663         }
3664
3665         dg1_master_intr_enable(uncore->regs);
3666         intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3667 }
3668
3669 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3670 {
3671         gen8_gt_irq_postinstall(to_gt(dev_priv));
3672
3673         spin_lock_irq(&dev_priv->irq_lock);
3674         if (dev_priv->display_irqs_enabled)
3675                 vlv_display_irq_postinstall(dev_priv);
3676         spin_unlock_irq(&dev_priv->irq_lock);
3677
3678         intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3679         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3680 }
3681
3682 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3683 {
3684         struct intel_uncore *uncore = &dev_priv->uncore;
3685
3686         i9xx_pipestat_irq_reset(dev_priv);
3687
3688         gen2_irq_reset(uncore);
3689         dev_priv->irq_mask = ~0u;
3690 }
3691
3692 static u32 i9xx_error_mask(struct drm_i915_private *i915)
3693 {
3694         /*
3695          * On gen2/3 FBC generates (seemingly spurious)
3696          * display INVALID_GTT/INVALID_GTT_PTE table errors.
3697          *
3698          * Also gen3 bspec has this to say:
3699          * "DISPA_INVALID_GTT_PTE
3700          "  [DevNapa] : Reserved. This bit does not reflect the page
3701          "              table error for the display plane A."
3702          *
3703          * Unfortunately we can't mask off individual PGTBL_ER bits,
3704          * so we just have to mask off all page table errors via EMR.
3705          */
3706         if (HAS_FBC(i915))
3707                 return ~I915_ERROR_MEMORY_REFRESH;
3708         else
3709                 return ~(I915_ERROR_PAGE_TABLE |
3710                          I915_ERROR_MEMORY_REFRESH);
3711 }
3712
3713 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3714 {
3715         struct intel_uncore *uncore = &dev_priv->uncore;
3716         u16 enable_mask;
3717
3718         intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
3719
3720         /* Unmask the interrupts that we always want on. */
3721         dev_priv->irq_mask =
3722                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3723                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3724                   I915_MASTER_ERROR_INTERRUPT);
3725
3726         enable_mask =
3727                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3728                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3729                 I915_MASTER_ERROR_INTERRUPT |
3730                 I915_USER_INTERRUPT;
3731
3732         gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3733
3734         /* Interrupt setup is already guaranteed to be single-threaded, this is
3735          * just to make the assert_spin_locked check happy. */
3736         spin_lock_irq(&dev_priv->irq_lock);
3737         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3738         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3739         spin_unlock_irq(&dev_priv->irq_lock);
3740 }
3741
3742 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3743                                u16 *eir, u16 *eir_stuck)
3744 {
3745         struct intel_uncore *uncore = &i915->uncore;
3746         u16 emr;
3747
3748         *eir = intel_uncore_read16(uncore, EIR);
3749         intel_uncore_write16(uncore, EIR, *eir);
3750
3751         *eir_stuck = intel_uncore_read16(uncore, EIR);
3752         if (*eir_stuck == 0)
3753                 return;
3754
3755         /*
3756          * Toggle all EMR bits to make sure we get an edge
3757          * in the ISR master error bit if we don't clear
3758          * all the EIR bits. Otherwise the edge triggered
3759          * IIR on i965/g4x wouldn't notice that an interrupt
3760          * is still pending. Also some EIR bits can't be
3761          * cleared except by handling the underlying error
3762          * (or by a GPU reset) so we mask any bit that
3763          * remains set.
3764          */
3765         emr = intel_uncore_read16(uncore, EMR);
3766         intel_uncore_write16(uncore, EMR, 0xffff);
3767         intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3768 }
3769
3770 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3771                                    u16 eir, u16 eir_stuck)
3772 {
3773         drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3774
3775         if (eir_stuck)
3776                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3777                         eir_stuck);
3778
3779         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
3780                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
3781 }
3782
3783 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3784                                u32 *eir, u32 *eir_stuck)
3785 {
3786         u32 emr;
3787
3788         *eir = intel_uncore_read(&dev_priv->uncore, EIR);
3789         intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3790
3791         *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3792         if (*eir_stuck == 0)
3793                 return;
3794
3795         /*
3796          * Toggle all EMR bits to make sure we get an edge
3797          * in the ISR master error bit if we don't clear
3798          * all the EIR bits. Otherwise the edge triggered
3799          * IIR on i965/g4x wouldn't notice that an interrupt
3800          * is still pending. Also some EIR bits can't be
3801          * cleared except by handling the underlying error
3802          * (or by a GPU reset) so we mask any bit that
3803          * remains set.
3804          */
3805         emr = intel_uncore_read(&dev_priv->uncore, EMR);
3806         intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
3807         intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3808 }
3809
3810 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3811                                    u32 eir, u32 eir_stuck)
3812 {
3813         drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3814
3815         if (eir_stuck)
3816                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3817                         eir_stuck);
3818
3819         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
3820                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
3821 }
3822
3823 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3824 {
3825         struct drm_i915_private *dev_priv = arg;
3826         irqreturn_t ret = IRQ_NONE;
3827
3828         if (!intel_irqs_enabled(dev_priv))
3829                 return IRQ_NONE;
3830
3831         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3832         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3833
3834         do {
3835                 u32 pipe_stats[I915_MAX_PIPES] = {};
3836                 u16 eir = 0, eir_stuck = 0;
3837                 u16 iir;
3838
3839                 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3840                 if (iir == 0)
3841                         break;
3842
3843                 ret = IRQ_HANDLED;
3844
3845                 /* Call regardless, as some status bits might not be
3846                  * signalled in iir */
3847                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3848
3849                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3850                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3851
3852                 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3853
3854                 if (iir & I915_USER_INTERRUPT)
3855                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3856
3857                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3858                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3859
3860                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3861         } while (0);
3862
3863         pmu_irq_stats(dev_priv, ret);
3864
3865         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3866
3867         return ret;
3868 }
3869
3870 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3871 {
3872         struct intel_uncore *uncore = &dev_priv->uncore;
3873
3874         if (I915_HAS_HOTPLUG(dev_priv)) {
3875                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3876                 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
3877         }
3878
3879         i9xx_pipestat_irq_reset(dev_priv);
3880
3881         GEN3_IRQ_RESET(uncore, GEN2_);
3882         dev_priv->irq_mask = ~0u;
3883 }
3884
3885 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3886 {
3887         struct intel_uncore *uncore = &dev_priv->uncore;
3888         u32 enable_mask;
3889
3890         intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
3891
3892         /* Unmask the interrupts that we always want on. */
3893         dev_priv->irq_mask =
3894                 ~(I915_ASLE_INTERRUPT |
3895                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3896                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3897                   I915_MASTER_ERROR_INTERRUPT);
3898
3899         enable_mask =
3900                 I915_ASLE_INTERRUPT |
3901                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3902                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3903                 I915_MASTER_ERROR_INTERRUPT |
3904                 I915_USER_INTERRUPT;
3905
3906         if (I915_HAS_HOTPLUG(dev_priv)) {
3907                 /* Enable in IER... */
3908                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3909                 /* and unmask in IMR */
3910                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3911         }
3912
3913         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3914
3915         /* Interrupt setup is already guaranteed to be single-threaded, this is
3916          * just to make the assert_spin_locked check happy. */
3917         spin_lock_irq(&dev_priv->irq_lock);
3918         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3919         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3920         spin_unlock_irq(&dev_priv->irq_lock);
3921
3922         i915_enable_asle_pipestat(dev_priv);
3923 }
3924
3925 static irqreturn_t i915_irq_handler(int irq, void *arg)
3926 {
3927         struct drm_i915_private *dev_priv = arg;
3928         irqreturn_t ret = IRQ_NONE;
3929
3930         if (!intel_irqs_enabled(dev_priv))
3931                 return IRQ_NONE;
3932
3933         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3934         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3935
3936         do {
3937                 u32 pipe_stats[I915_MAX_PIPES] = {};
3938                 u32 eir = 0, eir_stuck = 0;
3939                 u32 hotplug_status = 0;
3940                 u32 iir;
3941
3942                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3943                 if (iir == 0)
3944                         break;
3945
3946                 ret = IRQ_HANDLED;
3947
3948                 if (I915_HAS_HOTPLUG(dev_priv) &&
3949                     iir & I915_DISPLAY_PORT_INTERRUPT)
3950                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3951
3952                 /* Call regardless, as some status bits might not be
3953                  * signalled in iir */
3954                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3955
3956                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3957                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3958
3959                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3960
3961                 if (iir & I915_USER_INTERRUPT)
3962                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3963
3964                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3965                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3966
3967                 if (hotplug_status)
3968                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3969
3970                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3971         } while (0);
3972
3973         pmu_irq_stats(dev_priv, ret);
3974
3975         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3976
3977         return ret;
3978 }
3979
3980 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3981 {
3982         struct intel_uncore *uncore = &dev_priv->uncore;
3983
3984         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3985         intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3986
3987         i9xx_pipestat_irq_reset(dev_priv);
3988
3989         GEN3_IRQ_RESET(uncore, GEN2_);
3990         dev_priv->irq_mask = ~0u;
3991 }
3992
3993 static u32 i965_error_mask(struct drm_i915_private *i915)
3994 {
3995         /*
3996          * Enable some error detection, note the instruction error mask
3997          * bit is reserved, so we leave it masked.
3998          *
3999          * i965 FBC no longer generates spurious GTT errors,
4000          * so we can always enable the page table errors.
4001          */
4002         if (IS_G4X(i915))
4003                 return ~(GM45_ERROR_PAGE_TABLE |
4004                          GM45_ERROR_MEM_PRIV |
4005                          GM45_ERROR_CP_PRIV |
4006                          I915_ERROR_MEMORY_REFRESH);
4007         else
4008                 return ~(I915_ERROR_PAGE_TABLE |
4009                          I915_ERROR_MEMORY_REFRESH);
4010 }
4011
4012 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4013 {
4014         struct intel_uncore *uncore = &dev_priv->uncore;
4015         u32 enable_mask;
4016
4017         intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
4018
4019         /* Unmask the interrupts that we always want on. */
4020         dev_priv->irq_mask =
4021                 ~(I915_ASLE_INTERRUPT |
4022                   I915_DISPLAY_PORT_INTERRUPT |
4023                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4024                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4025                   I915_MASTER_ERROR_INTERRUPT);
4026
4027         enable_mask =
4028                 I915_ASLE_INTERRUPT |
4029                 I915_DISPLAY_PORT_INTERRUPT |
4030                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4031                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4032                 I915_MASTER_ERROR_INTERRUPT |
4033                 I915_USER_INTERRUPT;
4034
4035         if (IS_G4X(dev_priv))
4036                 enable_mask |= I915_BSD_USER_INTERRUPT;
4037
4038         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4039
4040         /* Interrupt setup is already guaranteed to be single-threaded, this is
4041          * just to make the assert_spin_locked check happy. */
4042         spin_lock_irq(&dev_priv->irq_lock);
4043         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4044         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4045         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4046         spin_unlock_irq(&dev_priv->irq_lock);
4047
4048         i915_enable_asle_pipestat(dev_priv);
4049 }
4050
4051 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4052 {
4053         u32 hotplug_en;
4054
4055         lockdep_assert_held(&dev_priv->irq_lock);
4056
4057         /* Note HDMI and DP share hotplug bits */
4058         /* enable bits are the same for all generations */
4059         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4060         /* Programming the CRT detection parameters tends
4061            to generate a spurious hotplug event about three
4062            seconds later.  So just do it once.
4063         */
4064         if (IS_G4X(dev_priv))
4065                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4066         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4067
4068         /* Ignore TV since it's buggy */
4069         i915_hotplug_interrupt_update_locked(dev_priv,
4070                                              HOTPLUG_INT_EN_MASK |
4071                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4072                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4073                                              hotplug_en);
4074 }
4075
4076 static irqreturn_t i965_irq_handler(int irq, void *arg)
4077 {
4078         struct drm_i915_private *dev_priv = arg;
4079         irqreturn_t ret = IRQ_NONE;
4080
4081         if (!intel_irqs_enabled(dev_priv))
4082                 return IRQ_NONE;
4083
4084         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4085         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4086
4087         do {
4088                 u32 pipe_stats[I915_MAX_PIPES] = {};
4089                 u32 eir = 0, eir_stuck = 0;
4090                 u32 hotplug_status = 0;
4091                 u32 iir;
4092
4093                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4094                 if (iir == 0)
4095                         break;
4096
4097                 ret = IRQ_HANDLED;
4098
4099                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4100                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4101
4102                 /* Call regardless, as some status bits might not be
4103                  * signalled in iir */
4104                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4105
4106                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4107                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4108
4109                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4110
4111                 if (iir & I915_USER_INTERRUPT)
4112                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4113                                             iir);
4114
4115                 if (iir & I915_BSD_USER_INTERRUPT)
4116                         intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4117                                             iir >> 25);
4118
4119                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4120                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4121
4122                 if (hotplug_status)
4123                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4124
4125                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4126         } while (0);
4127
4128         pmu_irq_stats(dev_priv, IRQ_HANDLED);
4129
4130         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4131
4132         return ret;
4133 }
4134
4135 struct intel_hotplug_funcs {
4136         void (*hpd_irq_setup)(struct drm_i915_private *i915);
4137 };
4138
4139 #define HPD_FUNCS(platform)                                      \
4140 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4141         .hpd_irq_setup = platform##_hpd_irq_setup,               \
4142 }
4143
4144 HPD_FUNCS(i915);
4145 HPD_FUNCS(xelpdp);
4146 HPD_FUNCS(dg1);
4147 HPD_FUNCS(gen11);
4148 HPD_FUNCS(bxt);
4149 HPD_FUNCS(icp);
4150 HPD_FUNCS(spt);
4151 HPD_FUNCS(ilk);
4152 #undef HPD_FUNCS
4153
4154 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4155 {
4156         if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4157                 i915->display.funcs.hotplug->hpd_irq_setup(i915);
4158 }
4159
4160 /**
4161  * intel_irq_init - initializes irq support
4162  * @dev_priv: i915 device instance
4163  *
4164  * This function initializes all the irq support including work items, timers
4165  * and all the vtables. It does not setup the interrupt itself though.
4166  */
4167 void intel_irq_init(struct drm_i915_private *dev_priv)
4168 {
4169         int i;
4170
4171         INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4172         for (i = 0; i < MAX_L3_SLICES; ++i)
4173                 dev_priv->l3_parity.remap_info[i] = NULL;
4174
4175         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4176         if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4177                 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4178
4179         if (!HAS_DISPLAY(dev_priv))
4180                 return;
4181
4182         intel_hpd_init_pins(dev_priv);
4183
4184         intel_hpd_init_early(dev_priv);
4185
4186         dev_priv->drm.vblank_disable_immediate = true;
4187
4188         /* Most platforms treat the display irq block as an always-on
4189          * power domain. vlv/chv can disable it at runtime and need
4190          * special care to avoid writing any of the display block registers
4191          * outside of the power domain. We defer setting up the display irqs
4192          * in this case to the runtime pm.
4193          */
4194         dev_priv->display_irqs_enabled = true;
4195         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4196                 dev_priv->display_irqs_enabled = false;
4197
4198         if (HAS_GMCH(dev_priv)) {
4199                 if (I915_HAS_HOTPLUG(dev_priv))
4200                         dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4201         } else {
4202                 if (HAS_PCH_DG2(dev_priv))
4203                         dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4204                 else if (HAS_PCH_DG1(dev_priv))
4205                         dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4206                 else if (DISPLAY_VER(dev_priv) >= 14)
4207                         dev_priv->display.funcs.hotplug = &xelpdp_hpd_funcs;
4208                 else if (DISPLAY_VER(dev_priv) >= 11)
4209                         dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4210                 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4211                         dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4212                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4213                         dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4214                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4215                         dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4216                 else
4217                         dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4218         }
4219 }
4220
4221 /**
4222  * intel_irq_fini - deinitializes IRQ support
4223  * @i915: i915 device instance
4224  *
4225  * This function deinitializes all the IRQ support.
4226  */
4227 void intel_irq_fini(struct drm_i915_private *i915)
4228 {
4229         int i;
4230
4231         for (i = 0; i < MAX_L3_SLICES; ++i)
4232                 kfree(i915->l3_parity.remap_info[i]);
4233 }
4234
4235 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4236 {
4237         if (HAS_GMCH(dev_priv)) {
4238                 if (IS_CHERRYVIEW(dev_priv))
4239                         return cherryview_irq_handler;
4240                 else if (IS_VALLEYVIEW(dev_priv))
4241                         return valleyview_irq_handler;
4242                 else if (GRAPHICS_VER(dev_priv) == 4)
4243                         return i965_irq_handler;
4244                 else if (GRAPHICS_VER(dev_priv) == 3)
4245                         return i915_irq_handler;
4246                 else
4247                         return i8xx_irq_handler;
4248         } else {
4249                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4250                         return dg1_irq_handler;
4251                 else if (GRAPHICS_VER(dev_priv) >= 11)
4252                         return gen11_irq_handler;
4253                 else if (GRAPHICS_VER(dev_priv) >= 8)
4254                         return gen8_irq_handler;
4255                 else
4256                         return ilk_irq_handler;
4257         }
4258 }
4259
4260 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4261 {
4262         if (HAS_GMCH(dev_priv)) {
4263                 if (IS_CHERRYVIEW(dev_priv))
4264                         cherryview_irq_reset(dev_priv);
4265                 else if (IS_VALLEYVIEW(dev_priv))
4266                         valleyview_irq_reset(dev_priv);
4267                 else if (GRAPHICS_VER(dev_priv) == 4)
4268                         i965_irq_reset(dev_priv);
4269                 else if (GRAPHICS_VER(dev_priv) == 3)
4270                         i915_irq_reset(dev_priv);
4271                 else
4272                         i8xx_irq_reset(dev_priv);
4273         } else {
4274                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4275                         dg1_irq_reset(dev_priv);
4276                 else if (GRAPHICS_VER(dev_priv) >= 11)
4277                         gen11_irq_reset(dev_priv);
4278                 else if (GRAPHICS_VER(dev_priv) >= 8)
4279                         gen8_irq_reset(dev_priv);
4280                 else
4281                         ilk_irq_reset(dev_priv);
4282         }
4283 }
4284
4285 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4286 {
4287         if (HAS_GMCH(dev_priv)) {
4288                 if (IS_CHERRYVIEW(dev_priv))
4289                         cherryview_irq_postinstall(dev_priv);
4290                 else if (IS_VALLEYVIEW(dev_priv))
4291                         valleyview_irq_postinstall(dev_priv);
4292                 else if (GRAPHICS_VER(dev_priv) == 4)
4293                         i965_irq_postinstall(dev_priv);
4294                 else if (GRAPHICS_VER(dev_priv) == 3)
4295                         i915_irq_postinstall(dev_priv);
4296                 else
4297                         i8xx_irq_postinstall(dev_priv);
4298         } else {
4299                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4300                         dg1_irq_postinstall(dev_priv);
4301                 else if (GRAPHICS_VER(dev_priv) >= 11)
4302                         gen11_irq_postinstall(dev_priv);
4303                 else if (GRAPHICS_VER(dev_priv) >= 8)
4304                         gen8_irq_postinstall(dev_priv);
4305                 else
4306                         ilk_irq_postinstall(dev_priv);
4307         }
4308 }
4309
4310 /**
4311  * intel_irq_install - enables the hardware interrupt
4312  * @dev_priv: i915 device instance
4313  *
4314  * This function enables the hardware interrupt handling, but leaves the hotplug
4315  * handling still disabled. It is called after intel_irq_init().
4316  *
4317  * In the driver load and resume code we need working interrupts in a few places
4318  * but don't want to deal with the hassle of concurrent probe and hotplug
4319  * workers. Hence the split into this two-stage approach.
4320  */
4321 int intel_irq_install(struct drm_i915_private *dev_priv)
4322 {
4323         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4324         int ret;
4325
4326         /*
4327          * We enable some interrupt sources in our postinstall hooks, so mark
4328          * interrupts as enabled _before_ actually enabling them to avoid
4329          * special cases in our ordering checks.
4330          */
4331         dev_priv->runtime_pm.irqs_enabled = true;
4332
4333         dev_priv->irq_enabled = true;
4334
4335         intel_irq_reset(dev_priv);
4336
4337         ret = request_irq(irq, intel_irq_handler(dev_priv),
4338                           IRQF_SHARED, DRIVER_NAME, dev_priv);
4339         if (ret < 0) {
4340                 dev_priv->irq_enabled = false;
4341                 return ret;
4342         }
4343
4344         intel_irq_postinstall(dev_priv);
4345
4346         return ret;
4347 }
4348
4349 /**
4350  * intel_irq_uninstall - finilizes all irq handling
4351  * @dev_priv: i915 device instance
4352  *
4353  * This stops interrupt and hotplug handling and unregisters and frees all
4354  * resources acquired in the init functions.
4355  */
4356 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4357 {
4358         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4359
4360         /*
4361          * FIXME we can get called twice during driver probe
4362          * error handling as well as during driver remove due to
4363          * intel_display_driver_remove() calling us out of sequence.
4364          * Would be nice if it didn't do that...
4365          */
4366         if (!dev_priv->irq_enabled)
4367                 return;
4368
4369         dev_priv->irq_enabled = false;
4370
4371         intel_irq_reset(dev_priv);
4372
4373         free_irq(irq, dev_priv);
4374
4375         intel_hpd_cancel_work(dev_priv);
4376         dev_priv->runtime_pm.irqs_enabled = false;
4377 }
4378
4379 /**
4380  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4381  * @dev_priv: i915 device instance
4382  *
4383  * This function is used to disable interrupts at runtime, both in the runtime
4384  * pm and the system suspend/resume code.
4385  */
4386 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4387 {
4388         intel_irq_reset(dev_priv);
4389         dev_priv->runtime_pm.irqs_enabled = false;
4390         intel_synchronize_irq(dev_priv);
4391 }
4392
4393 /**
4394  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4395  * @dev_priv: i915 device instance
4396  *
4397  * This function is used to enable interrupts at runtime, both in the runtime
4398  * pm and the system suspend/resume code.
4399  */
4400 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4401 {
4402         dev_priv->runtime_pm.irqs_enabled = true;
4403         intel_irq_reset(dev_priv);
4404         intel_irq_postinstall(dev_priv);
4405 }
4406
4407 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4408 {
4409         return dev_priv->runtime_pm.irqs_enabled;
4410 }
4411
4412 void intel_synchronize_irq(struct drm_i915_private *i915)
4413 {
4414         synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4415 }
4416
4417 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4418 {
4419         synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4420 }