drm/i915: Use czclk_freq in vlv c0 residency calculations
[linux-block.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61         [HPD_CRT] = SDE_CRT_HOTPLUG,
62         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121         POSTING_READ(GEN8_##type##_IMR(which)); \
122         I915_WRITE(GEN8_##type##_IER(which), 0); \
123         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124         POSTING_READ(GEN8_##type##_IIR(which)); \
125         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126         POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130         I915_WRITE(type##IMR, 0xffffffff); \
131         POSTING_READ(type##IMR); \
132         I915_WRITE(type##IER, 0); \
133         I915_WRITE(type##IIR, 0xffffffff); \
134         POSTING_READ(type##IIR); \
135         I915_WRITE(type##IIR, 0xffffffff); \
136         POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141  */
142 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
143         u32 val = I915_READ(reg); \
144         if (val) { \
145                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
146                      (reg), val); \
147                 I915_WRITE((reg), 0xffffffff); \
148                 POSTING_READ(reg); \
149                 I915_WRITE((reg), 0xffffffff); \
150                 POSTING_READ(reg); \
151         } \
152 } while (0)
153
154 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
155         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
156         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
157         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
158         POSTING_READ(GEN8_##type##_IMR(which)); \
159 } while (0)
160
161 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
162         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
163         I915_WRITE(type##IER, (ier_val)); \
164         I915_WRITE(type##IMR, (imr_val)); \
165         POSTING_READ(type##IMR); \
166 } while (0)
167
168 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
169
170 /* For display hotplug interrupt */
171 static inline void
172 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
173                                      uint32_t mask,
174                                      uint32_t bits)
175 {
176         uint32_t val;
177
178         assert_spin_locked(&dev_priv->irq_lock);
179         WARN_ON(bits & ~mask);
180
181         val = I915_READ(PORT_HOTPLUG_EN);
182         val &= ~mask;
183         val |= bits;
184         I915_WRITE(PORT_HOTPLUG_EN, val);
185 }
186
187 /**
188  * i915_hotplug_interrupt_update - update hotplug interrupt enable
189  * @dev_priv: driver private
190  * @mask: bits to update
191  * @bits: bits to enable
192  * NOTE: the HPD enable bits are modified both inside and outside
193  * of an interrupt context. To avoid that read-modify-write cycles
194  * interfer, these bits are protected by a spinlock. Since this
195  * function is usually not called from a context where the lock is
196  * held already, this function acquires the lock itself. A non-locking
197  * version is also available.
198  */
199 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
200                                    uint32_t mask,
201                                    uint32_t bits)
202 {
203         spin_lock_irq(&dev_priv->irq_lock);
204         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
205         spin_unlock_irq(&dev_priv->irq_lock);
206 }
207
208 /**
209  * ilk_update_display_irq - update DEIMR
210  * @dev_priv: driver private
211  * @interrupt_mask: mask of interrupt bits to update
212  * @enabled_irq_mask: mask of interrupt bits to enable
213  */
214 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
215                                    uint32_t interrupt_mask,
216                                    uint32_t enabled_irq_mask)
217 {
218         uint32_t new_val;
219
220         assert_spin_locked(&dev_priv->irq_lock);
221
222         WARN_ON(enabled_irq_mask & ~interrupt_mask);
223
224         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
225                 return;
226
227         new_val = dev_priv->irq_mask;
228         new_val &= ~interrupt_mask;
229         new_val |= (~enabled_irq_mask & interrupt_mask);
230
231         if (new_val != dev_priv->irq_mask) {
232                 dev_priv->irq_mask = new_val;
233                 I915_WRITE(DEIMR, dev_priv->irq_mask);
234                 POSTING_READ(DEIMR);
235         }
236 }
237
238 void
239 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
240 {
241         ilk_update_display_irq(dev_priv, mask, mask);
242 }
243
244 void
245 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
246 {
247         ilk_update_display_irq(dev_priv, mask, 0);
248 }
249
250 /**
251  * ilk_update_gt_irq - update GTIMR
252  * @dev_priv: driver private
253  * @interrupt_mask: mask of interrupt bits to update
254  * @enabled_irq_mask: mask of interrupt bits to enable
255  */
256 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
257                               uint32_t interrupt_mask,
258                               uint32_t enabled_irq_mask)
259 {
260         assert_spin_locked(&dev_priv->irq_lock);
261
262         WARN_ON(enabled_irq_mask & ~interrupt_mask);
263
264         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
265                 return;
266
267         dev_priv->gt_irq_mask &= ~interrupt_mask;
268         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
269         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
270         POSTING_READ(GTIMR);
271 }
272
273 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
274 {
275         ilk_update_gt_irq(dev_priv, mask, mask);
276 }
277
278 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
279 {
280         ilk_update_gt_irq(dev_priv, mask, 0);
281 }
282
283 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
284 {
285         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
286 }
287
288 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
289 {
290         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
291 }
292
293 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
294 {
295         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
296 }
297
298 /**
299   * snb_update_pm_irq - update GEN6_PMIMR
300   * @dev_priv: driver private
301   * @interrupt_mask: mask of interrupt bits to update
302   * @enabled_irq_mask: mask of interrupt bits to enable
303   */
304 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
305                               uint32_t interrupt_mask,
306                               uint32_t enabled_irq_mask)
307 {
308         uint32_t new_val;
309
310         WARN_ON(enabled_irq_mask & ~interrupt_mask);
311
312         assert_spin_locked(&dev_priv->irq_lock);
313
314         new_val = dev_priv->pm_irq_mask;
315         new_val &= ~interrupt_mask;
316         new_val |= (~enabled_irq_mask & interrupt_mask);
317
318         if (new_val != dev_priv->pm_irq_mask) {
319                 dev_priv->pm_irq_mask = new_val;
320                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
321                 POSTING_READ(gen6_pm_imr(dev_priv));
322         }
323 }
324
325 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
326 {
327         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
328                 return;
329
330         snb_update_pm_irq(dev_priv, mask, mask);
331 }
332
333 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
334                                   uint32_t mask)
335 {
336         snb_update_pm_irq(dev_priv, mask, 0);
337 }
338
339 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
340 {
341         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
342                 return;
343
344         __gen6_disable_pm_irq(dev_priv, mask);
345 }
346
347 void gen6_reset_rps_interrupts(struct drm_device *dev)
348 {
349         struct drm_i915_private *dev_priv = dev->dev_private;
350         uint32_t reg = gen6_pm_iir(dev_priv);
351
352         spin_lock_irq(&dev_priv->irq_lock);
353         I915_WRITE(reg, dev_priv->pm_rps_events);
354         I915_WRITE(reg, dev_priv->pm_rps_events);
355         POSTING_READ(reg);
356         dev_priv->rps.pm_iir = 0;
357         spin_unlock_irq(&dev_priv->irq_lock);
358 }
359
360 void gen6_enable_rps_interrupts(struct drm_device *dev)
361 {
362         struct drm_i915_private *dev_priv = dev->dev_private;
363
364         spin_lock_irq(&dev_priv->irq_lock);
365
366         WARN_ON(dev_priv->rps.pm_iir);
367         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
368         dev_priv->rps.interrupts_enabled = true;
369         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
370                                 dev_priv->pm_rps_events);
371         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
372
373         spin_unlock_irq(&dev_priv->irq_lock);
374 }
375
376 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
377 {
378         /*
379          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
380          * if GEN6_PM_UP_EI_EXPIRED is masked.
381          *
382          * TODO: verify if this can be reproduced on VLV,CHV.
383          */
384         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
385                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
386
387         if (INTEL_INFO(dev_priv)->gen >= 8)
388                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
389
390         return mask;
391 }
392
393 void gen6_disable_rps_interrupts(struct drm_device *dev)
394 {
395         struct drm_i915_private *dev_priv = dev->dev_private;
396
397         spin_lock_irq(&dev_priv->irq_lock);
398         dev_priv->rps.interrupts_enabled = false;
399         spin_unlock_irq(&dev_priv->irq_lock);
400
401         cancel_work_sync(&dev_priv->rps.work);
402
403         spin_lock_irq(&dev_priv->irq_lock);
404
405         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
406
407         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
408         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
409                                 ~dev_priv->pm_rps_events);
410
411         spin_unlock_irq(&dev_priv->irq_lock);
412
413         synchronize_irq(dev->irq);
414 }
415
416 /**
417   * bdw_update_port_irq - update DE port interrupt
418   * @dev_priv: driver private
419   * @interrupt_mask: mask of interrupt bits to update
420   * @enabled_irq_mask: mask of interrupt bits to enable
421   */
422 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
423                                 uint32_t interrupt_mask,
424                                 uint32_t enabled_irq_mask)
425 {
426         uint32_t new_val;
427         uint32_t old_val;
428
429         assert_spin_locked(&dev_priv->irq_lock);
430
431         WARN_ON(enabled_irq_mask & ~interrupt_mask);
432
433         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
434                 return;
435
436         old_val = I915_READ(GEN8_DE_PORT_IMR);
437
438         new_val = old_val;
439         new_val &= ~interrupt_mask;
440         new_val |= (~enabled_irq_mask & interrupt_mask);
441
442         if (new_val != old_val) {
443                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
444                 POSTING_READ(GEN8_DE_PORT_IMR);
445         }
446 }
447
448 /**
449  * ibx_display_interrupt_update - update SDEIMR
450  * @dev_priv: driver private
451  * @interrupt_mask: mask of interrupt bits to update
452  * @enabled_irq_mask: mask of interrupt bits to enable
453  */
454 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
455                                   uint32_t interrupt_mask,
456                                   uint32_t enabled_irq_mask)
457 {
458         uint32_t sdeimr = I915_READ(SDEIMR);
459         sdeimr &= ~interrupt_mask;
460         sdeimr |= (~enabled_irq_mask & interrupt_mask);
461
462         WARN_ON(enabled_irq_mask & ~interrupt_mask);
463
464         assert_spin_locked(&dev_priv->irq_lock);
465
466         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
467                 return;
468
469         I915_WRITE(SDEIMR, sdeimr);
470         POSTING_READ(SDEIMR);
471 }
472
473 static void
474 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
475                        u32 enable_mask, u32 status_mask)
476 {
477         u32 reg = PIPESTAT(pipe);
478         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
479
480         assert_spin_locked(&dev_priv->irq_lock);
481         WARN_ON(!intel_irqs_enabled(dev_priv));
482
483         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
484                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
485                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
486                       pipe_name(pipe), enable_mask, status_mask))
487                 return;
488
489         if ((pipestat & enable_mask) == enable_mask)
490                 return;
491
492         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
493
494         /* Enable the interrupt, clear any pending status */
495         pipestat |= enable_mask | status_mask;
496         I915_WRITE(reg, pipestat);
497         POSTING_READ(reg);
498 }
499
500 static void
501 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
502                         u32 enable_mask, u32 status_mask)
503 {
504         u32 reg = PIPESTAT(pipe);
505         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
506
507         assert_spin_locked(&dev_priv->irq_lock);
508         WARN_ON(!intel_irqs_enabled(dev_priv));
509
510         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
511                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
512                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
513                       pipe_name(pipe), enable_mask, status_mask))
514                 return;
515
516         if ((pipestat & enable_mask) == 0)
517                 return;
518
519         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
520
521         pipestat &= ~enable_mask;
522         I915_WRITE(reg, pipestat);
523         POSTING_READ(reg);
524 }
525
526 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
527 {
528         u32 enable_mask = status_mask << 16;
529
530         /*
531          * On pipe A we don't support the PSR interrupt yet,
532          * on pipe B and C the same bit MBZ.
533          */
534         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
535                 return 0;
536         /*
537          * On pipe B and C we don't support the PSR interrupt yet, on pipe
538          * A the same bit is for perf counters which we don't use either.
539          */
540         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
541                 return 0;
542
543         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
544                          SPRITE0_FLIP_DONE_INT_EN_VLV |
545                          SPRITE1_FLIP_DONE_INT_EN_VLV);
546         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
547                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
548         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
549                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
550
551         return enable_mask;
552 }
553
554 void
555 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
556                      u32 status_mask)
557 {
558         u32 enable_mask;
559
560         if (IS_VALLEYVIEW(dev_priv->dev))
561                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
562                                                            status_mask);
563         else
564                 enable_mask = status_mask << 16;
565         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
566 }
567
568 void
569 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
570                       u32 status_mask)
571 {
572         u32 enable_mask;
573
574         if (IS_VALLEYVIEW(dev_priv->dev))
575                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
576                                                            status_mask);
577         else
578                 enable_mask = status_mask << 16;
579         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
580 }
581
582 /**
583  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
584  */
585 static void i915_enable_asle_pipestat(struct drm_device *dev)
586 {
587         struct drm_i915_private *dev_priv = dev->dev_private;
588
589         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
590                 return;
591
592         spin_lock_irq(&dev_priv->irq_lock);
593
594         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
595         if (INTEL_INFO(dev)->gen >= 4)
596                 i915_enable_pipestat(dev_priv, PIPE_A,
597                                      PIPE_LEGACY_BLC_EVENT_STATUS);
598
599         spin_unlock_irq(&dev_priv->irq_lock);
600 }
601
602 /*
603  * This timing diagram depicts the video signal in and
604  * around the vertical blanking period.
605  *
606  * Assumptions about the fictitious mode used in this example:
607  *  vblank_start >= 3
608  *  vsync_start = vblank_start + 1
609  *  vsync_end = vblank_start + 2
610  *  vtotal = vblank_start + 3
611  *
612  *           start of vblank:
613  *           latch double buffered registers
614  *           increment frame counter (ctg+)
615  *           generate start of vblank interrupt (gen4+)
616  *           |
617  *           |          frame start:
618  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
619  *           |          may be shifted forward 1-3 extra lines via PIPECONF
620  *           |          |
621  *           |          |  start of vsync:
622  *           |          |  generate vsync interrupt
623  *           |          |  |
624  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
625  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
626  * ----va---> <-----------------vb--------------------> <--------va-------------
627  *       |          |       <----vs----->                     |
628  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
629  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
630  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
631  *       |          |                                         |
632  *       last visible pixel                                   first visible pixel
633  *                  |                                         increment frame counter (gen3/4)
634  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
635  *
636  * x  = horizontal active
637  * _  = horizontal blanking
638  * hs = horizontal sync
639  * va = vertical active
640  * vb = vertical blanking
641  * vs = vertical sync
642  * vbs = vblank_start (number)
643  *
644  * Summary:
645  * - most events happen at the start of horizontal sync
646  * - frame start happens at the start of horizontal blank, 1-4 lines
647  *   (depending on PIPECONF settings) after the start of vblank
648  * - gen3/4 pixel and frame counter are synchronized with the start
649  *   of horizontal active on the first line of vertical active
650  */
651
652 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
653 {
654         /* Gen2 doesn't have a hardware frame counter */
655         return 0;
656 }
657
658 /* Called from drm generic code, passed a 'crtc', which
659  * we use as a pipe index
660  */
661 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
662 {
663         struct drm_i915_private *dev_priv = dev->dev_private;
664         unsigned long high_frame;
665         unsigned long low_frame;
666         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
667         struct intel_crtc *intel_crtc =
668                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
669         const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
670
671         htotal = mode->crtc_htotal;
672         hsync_start = mode->crtc_hsync_start;
673         vbl_start = mode->crtc_vblank_start;
674         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
675                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
676
677         /* Convert to pixel count */
678         vbl_start *= htotal;
679
680         /* Start of vblank event occurs at start of hsync */
681         vbl_start -= htotal - hsync_start;
682
683         high_frame = PIPEFRAME(pipe);
684         low_frame = PIPEFRAMEPIXEL(pipe);
685
686         /*
687          * High & low register fields aren't synchronized, so make sure
688          * we get a low value that's stable across two reads of the high
689          * register.
690          */
691         do {
692                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
693                 low   = I915_READ(low_frame);
694                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
695         } while (high1 != high2);
696
697         high1 >>= PIPE_FRAME_HIGH_SHIFT;
698         pixel = low & PIPE_PIXEL_MASK;
699         low >>= PIPE_FRAME_LOW_SHIFT;
700
701         /*
702          * The frame counter increments at beginning of active.
703          * Cook up a vblank counter by also checking the pixel
704          * counter against vblank start.
705          */
706         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
707 }
708
709 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
710 {
711         struct drm_i915_private *dev_priv = dev->dev_private;
712         int reg = PIPE_FRMCOUNT_GM45(pipe);
713
714         return I915_READ(reg);
715 }
716
717 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
718 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
719
720 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
721 {
722         struct drm_device *dev = crtc->base.dev;
723         struct drm_i915_private *dev_priv = dev->dev_private;
724         const struct drm_display_mode *mode = &crtc->base.hwmode;
725         enum pipe pipe = crtc->pipe;
726         int position, vtotal;
727
728         vtotal = mode->crtc_vtotal;
729         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730                 vtotal /= 2;
731
732         if (IS_GEN2(dev))
733                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
734         else
735                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
736
737         /*
738          * On HSW, the DSL reg (0x70000) appears to return 0 if we
739          * read it just before the start of vblank.  So try it again
740          * so we don't accidentally end up spanning a vblank frame
741          * increment, causing the pipe_update_end() code to squak at us.
742          *
743          * The nature of this problem means we can't simply check the ISR
744          * bit and return the vblank start value; nor can we use the scanline
745          * debug register in the transcoder as it appears to have the same
746          * problem.  We may need to extend this to include other platforms,
747          * but so far testing only shows the problem on HSW.
748          */
749         if (IS_HASWELL(dev) && !position) {
750                 int i, temp;
751
752                 for (i = 0; i < 100; i++) {
753                         udelay(1);
754                         temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
755                                 DSL_LINEMASK_GEN3;
756                         if (temp != position) {
757                                 position = temp;
758                                 break;
759                         }
760                 }
761         }
762
763         /*
764          * See update_scanline_offset() for the details on the
765          * scanline_offset adjustment.
766          */
767         return (position + crtc->scanline_offset) % vtotal;
768 }
769
770 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
771                                     unsigned int flags, int *vpos, int *hpos,
772                                     ktime_t *stime, ktime_t *etime,
773                                     const struct drm_display_mode *mode)
774 {
775         struct drm_i915_private *dev_priv = dev->dev_private;
776         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
777         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
778         int position;
779         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
780         bool in_vbl = true;
781         int ret = 0;
782         unsigned long irqflags;
783
784         if (WARN_ON(!mode->crtc_clock)) {
785                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
786                                  "pipe %c\n", pipe_name(pipe));
787                 return 0;
788         }
789
790         htotal = mode->crtc_htotal;
791         hsync_start = mode->crtc_hsync_start;
792         vtotal = mode->crtc_vtotal;
793         vbl_start = mode->crtc_vblank_start;
794         vbl_end = mode->crtc_vblank_end;
795
796         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
797                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
798                 vbl_end /= 2;
799                 vtotal /= 2;
800         }
801
802         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
803
804         /*
805          * Lock uncore.lock, as we will do multiple timing critical raw
806          * register reads, potentially with preemption disabled, so the
807          * following code must not block on uncore.lock.
808          */
809         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
810
811         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
812
813         /* Get optional system timestamp before query. */
814         if (stime)
815                 *stime = ktime_get();
816
817         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
818                 /* No obvious pixelcount register. Only query vertical
819                  * scanout position from Display scan line register.
820                  */
821                 position = __intel_get_crtc_scanline(intel_crtc);
822         } else {
823                 /* Have access to pixelcount since start of frame.
824                  * We can split this into vertical and horizontal
825                  * scanout position.
826                  */
827                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
828
829                 /* convert to pixel counts */
830                 vbl_start *= htotal;
831                 vbl_end *= htotal;
832                 vtotal *= htotal;
833
834                 /*
835                  * In interlaced modes, the pixel counter counts all pixels,
836                  * so one field will have htotal more pixels. In order to avoid
837                  * the reported position from jumping backwards when the pixel
838                  * counter is beyond the length of the shorter field, just
839                  * clamp the position the length of the shorter field. This
840                  * matches how the scanline counter based position works since
841                  * the scanline counter doesn't count the two half lines.
842                  */
843                 if (position >= vtotal)
844                         position = vtotal - 1;
845
846                 /*
847                  * Start of vblank interrupt is triggered at start of hsync,
848                  * just prior to the first active line of vblank. However we
849                  * consider lines to start at the leading edge of horizontal
850                  * active. So, should we get here before we've crossed into
851                  * the horizontal active of the first line in vblank, we would
852                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
853                  * always add htotal-hsync_start to the current pixel position.
854                  */
855                 position = (position + htotal - hsync_start) % vtotal;
856         }
857
858         /* Get optional system timestamp after query. */
859         if (etime)
860                 *etime = ktime_get();
861
862         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
863
864         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
865
866         in_vbl = position >= vbl_start && position < vbl_end;
867
868         /*
869          * While in vblank, position will be negative
870          * counting up towards 0 at vbl_end. And outside
871          * vblank, position will be positive counting
872          * up since vbl_end.
873          */
874         if (position >= vbl_start)
875                 position -= vbl_end;
876         else
877                 position += vtotal - vbl_end;
878
879         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
880                 *vpos = position;
881                 *hpos = 0;
882         } else {
883                 *vpos = position / htotal;
884                 *hpos = position - (*vpos * htotal);
885         }
886
887         /* In vblank? */
888         if (in_vbl)
889                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
890
891         return ret;
892 }
893
894 int intel_get_crtc_scanline(struct intel_crtc *crtc)
895 {
896         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
897         unsigned long irqflags;
898         int position;
899
900         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
901         position = __intel_get_crtc_scanline(crtc);
902         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
903
904         return position;
905 }
906
907 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
908                               int *max_error,
909                               struct timeval *vblank_time,
910                               unsigned flags)
911 {
912         struct drm_crtc *crtc;
913
914         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
915                 DRM_ERROR("Invalid crtc %d\n", pipe);
916                 return -EINVAL;
917         }
918
919         /* Get drm_crtc to timestamp: */
920         crtc = intel_get_crtc_for_pipe(dev, pipe);
921         if (crtc == NULL) {
922                 DRM_ERROR("Invalid crtc %d\n", pipe);
923                 return -EINVAL;
924         }
925
926         if (!crtc->hwmode.crtc_clock) {
927                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
928                 return -EBUSY;
929         }
930
931         /* Helper routine in DRM core does all the work: */
932         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
933                                                      vblank_time, flags,
934                                                      &crtc->hwmode);
935 }
936
937 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
938 {
939         struct drm_i915_private *dev_priv = dev->dev_private;
940         u32 busy_up, busy_down, max_avg, min_avg;
941         u8 new_delay;
942
943         spin_lock(&mchdev_lock);
944
945         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
946
947         new_delay = dev_priv->ips.cur_delay;
948
949         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
950         busy_up = I915_READ(RCPREVBSYTUPAVG);
951         busy_down = I915_READ(RCPREVBSYTDNAVG);
952         max_avg = I915_READ(RCBMAXAVG);
953         min_avg = I915_READ(RCBMINAVG);
954
955         /* Handle RCS change request from hw */
956         if (busy_up > max_avg) {
957                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
958                         new_delay = dev_priv->ips.cur_delay - 1;
959                 if (new_delay < dev_priv->ips.max_delay)
960                         new_delay = dev_priv->ips.max_delay;
961         } else if (busy_down < min_avg) {
962                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
963                         new_delay = dev_priv->ips.cur_delay + 1;
964                 if (new_delay > dev_priv->ips.min_delay)
965                         new_delay = dev_priv->ips.min_delay;
966         }
967
968         if (ironlake_set_drps(dev, new_delay))
969                 dev_priv->ips.cur_delay = new_delay;
970
971         spin_unlock(&mchdev_lock);
972
973         return;
974 }
975
976 static void notify_ring(struct intel_engine_cs *ring)
977 {
978         if (!intel_ring_initialized(ring))
979                 return;
980
981         trace_i915_gem_request_notify(ring);
982
983         wake_up_all(&ring->irq_queue);
984 }
985
986 static void vlv_c0_read(struct drm_i915_private *dev_priv,
987                         struct intel_rps_ei *ei)
988 {
989         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
990         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
991         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
992 }
993
994 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
995                          const struct intel_rps_ei *old,
996                          const struct intel_rps_ei *now,
997                          int threshold)
998 {
999         u64 time, c0;
1000         unsigned int mul = 100;
1001
1002         if (old->cz_clock == 0)
1003                 return false;
1004
1005         if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1006                 mul <<= 8;
1007
1008         time = now->cz_clock - old->cz_clock;
1009         time *= threshold * dev_priv->czclk_freq;
1010
1011         /* Workload can be split between render + media, e.g. SwapBuffers
1012          * being blitted in X after being rendered in mesa. To account for
1013          * this we need to combine both engines into our activity counter.
1014          */
1015         c0 = now->render_c0 - old->render_c0;
1016         c0 += now->media_c0 - old->media_c0;
1017         c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1018
1019         return c0 >= time;
1020 }
1021
1022 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1023 {
1024         vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1025         dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1026 }
1027
1028 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1029 {
1030         struct intel_rps_ei now;
1031         u32 events = 0;
1032
1033         if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1034                 return 0;
1035
1036         vlv_c0_read(dev_priv, &now);
1037         if (now.cz_clock == 0)
1038                 return 0;
1039
1040         if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1041                 if (!vlv_c0_above(dev_priv,
1042                                   &dev_priv->rps.down_ei, &now,
1043                                   dev_priv->rps.down_threshold))
1044                         events |= GEN6_PM_RP_DOWN_THRESHOLD;
1045                 dev_priv->rps.down_ei = now;
1046         }
1047
1048         if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1049                 if (vlv_c0_above(dev_priv,
1050                                  &dev_priv->rps.up_ei, &now,
1051                                  dev_priv->rps.up_threshold))
1052                         events |= GEN6_PM_RP_UP_THRESHOLD;
1053                 dev_priv->rps.up_ei = now;
1054         }
1055
1056         return events;
1057 }
1058
1059 static bool any_waiters(struct drm_i915_private *dev_priv)
1060 {
1061         struct intel_engine_cs *ring;
1062         int i;
1063
1064         for_each_ring(ring, dev_priv, i)
1065                 if (ring->irq_refcount)
1066                         return true;
1067
1068         return false;
1069 }
1070
1071 static void gen6_pm_rps_work(struct work_struct *work)
1072 {
1073         struct drm_i915_private *dev_priv =
1074                 container_of(work, struct drm_i915_private, rps.work);
1075         bool client_boost;
1076         int new_delay, adj, min, max;
1077         u32 pm_iir;
1078
1079         spin_lock_irq(&dev_priv->irq_lock);
1080         /* Speed up work cancelation during disabling rps interrupts. */
1081         if (!dev_priv->rps.interrupts_enabled) {
1082                 spin_unlock_irq(&dev_priv->irq_lock);
1083                 return;
1084         }
1085         pm_iir = dev_priv->rps.pm_iir;
1086         dev_priv->rps.pm_iir = 0;
1087         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1088         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1089         client_boost = dev_priv->rps.client_boost;
1090         dev_priv->rps.client_boost = false;
1091         spin_unlock_irq(&dev_priv->irq_lock);
1092
1093         /* Make sure we didn't queue anything we're not going to process. */
1094         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1095
1096         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1097                 return;
1098
1099         mutex_lock(&dev_priv->rps.hw_lock);
1100
1101         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1102
1103         adj = dev_priv->rps.last_adj;
1104         new_delay = dev_priv->rps.cur_freq;
1105         min = dev_priv->rps.min_freq_softlimit;
1106         max = dev_priv->rps.max_freq_softlimit;
1107
1108         if (client_boost) {
1109                 new_delay = dev_priv->rps.max_freq_softlimit;
1110                 adj = 0;
1111         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1112                 if (adj > 0)
1113                         adj *= 2;
1114                 else /* CHV needs even encode values */
1115                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1116                 /*
1117                  * For better performance, jump directly
1118                  * to RPe if we're below it.
1119                  */
1120                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1121                         new_delay = dev_priv->rps.efficient_freq;
1122                         adj = 0;
1123                 }
1124         } else if (any_waiters(dev_priv)) {
1125                 adj = 0;
1126         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1127                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1128                         new_delay = dev_priv->rps.efficient_freq;
1129                 else
1130                         new_delay = dev_priv->rps.min_freq_softlimit;
1131                 adj = 0;
1132         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1133                 if (adj < 0)
1134                         adj *= 2;
1135                 else /* CHV needs even encode values */
1136                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1137         } else { /* unknown event */
1138                 adj = 0;
1139         }
1140
1141         dev_priv->rps.last_adj = adj;
1142
1143         /* sysfs frequency interfaces may have snuck in while servicing the
1144          * interrupt
1145          */
1146         new_delay += adj;
1147         new_delay = clamp_t(int, new_delay, min, max);
1148
1149         intel_set_rps(dev_priv->dev, new_delay);
1150
1151         mutex_unlock(&dev_priv->rps.hw_lock);
1152 }
1153
1154
1155 /**
1156  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1157  * occurred.
1158  * @work: workqueue struct
1159  *
1160  * Doesn't actually do anything except notify userspace. As a consequence of
1161  * this event, userspace should try to remap the bad rows since statistically
1162  * it is likely the same row is more likely to go bad again.
1163  */
1164 static void ivybridge_parity_work(struct work_struct *work)
1165 {
1166         struct drm_i915_private *dev_priv =
1167                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1168         u32 error_status, row, bank, subbank;
1169         char *parity_event[6];
1170         uint32_t misccpctl;
1171         uint8_t slice = 0;
1172
1173         /* We must turn off DOP level clock gating to access the L3 registers.
1174          * In order to prevent a get/put style interface, acquire struct mutex
1175          * any time we access those registers.
1176          */
1177         mutex_lock(&dev_priv->dev->struct_mutex);
1178
1179         /* If we've screwed up tracking, just let the interrupt fire again */
1180         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1181                 goto out;
1182
1183         misccpctl = I915_READ(GEN7_MISCCPCTL);
1184         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1185         POSTING_READ(GEN7_MISCCPCTL);
1186
1187         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1188                 u32 reg;
1189
1190                 slice--;
1191                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1192                         break;
1193
1194                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1195
1196                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1197
1198                 error_status = I915_READ(reg);
1199                 row = GEN7_PARITY_ERROR_ROW(error_status);
1200                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1201                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1202
1203                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1204                 POSTING_READ(reg);
1205
1206                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1207                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1208                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1209                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1210                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1211                 parity_event[5] = NULL;
1212
1213                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1214                                    KOBJ_CHANGE, parity_event);
1215
1216                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1217                           slice, row, bank, subbank);
1218
1219                 kfree(parity_event[4]);
1220                 kfree(parity_event[3]);
1221                 kfree(parity_event[2]);
1222                 kfree(parity_event[1]);
1223         }
1224
1225         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1226
1227 out:
1228         WARN_ON(dev_priv->l3_parity.which_slice);
1229         spin_lock_irq(&dev_priv->irq_lock);
1230         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1231         spin_unlock_irq(&dev_priv->irq_lock);
1232
1233         mutex_unlock(&dev_priv->dev->struct_mutex);
1234 }
1235
1236 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1237 {
1238         struct drm_i915_private *dev_priv = dev->dev_private;
1239
1240         if (!HAS_L3_DPF(dev))
1241                 return;
1242
1243         spin_lock(&dev_priv->irq_lock);
1244         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1245         spin_unlock(&dev_priv->irq_lock);
1246
1247         iir &= GT_PARITY_ERROR(dev);
1248         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1249                 dev_priv->l3_parity.which_slice |= 1 << 1;
1250
1251         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1252                 dev_priv->l3_parity.which_slice |= 1 << 0;
1253
1254         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1255 }
1256
1257 static void ilk_gt_irq_handler(struct drm_device *dev,
1258                                struct drm_i915_private *dev_priv,
1259                                u32 gt_iir)
1260 {
1261         if (gt_iir &
1262             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1263                 notify_ring(&dev_priv->ring[RCS]);
1264         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1265                 notify_ring(&dev_priv->ring[VCS]);
1266 }
1267
1268 static void snb_gt_irq_handler(struct drm_device *dev,
1269                                struct drm_i915_private *dev_priv,
1270                                u32 gt_iir)
1271 {
1272
1273         if (gt_iir &
1274             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1275                 notify_ring(&dev_priv->ring[RCS]);
1276         if (gt_iir & GT_BSD_USER_INTERRUPT)
1277                 notify_ring(&dev_priv->ring[VCS]);
1278         if (gt_iir & GT_BLT_USER_INTERRUPT)
1279                 notify_ring(&dev_priv->ring[BCS]);
1280
1281         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1282                       GT_BSD_CS_ERROR_INTERRUPT |
1283                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1284                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1285
1286         if (gt_iir & GT_PARITY_ERROR(dev))
1287                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1288 }
1289
1290 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1291                                        u32 master_ctl)
1292 {
1293         irqreturn_t ret = IRQ_NONE;
1294
1295         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1296                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1297                 if (tmp) {
1298                         I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1299                         ret = IRQ_HANDLED;
1300
1301                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1302                                 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1303                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1304                                 notify_ring(&dev_priv->ring[RCS]);
1305
1306                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1307                                 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1308                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1309                                 notify_ring(&dev_priv->ring[BCS]);
1310                 } else
1311                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1312         }
1313
1314         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1315                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1316                 if (tmp) {
1317                         I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1318                         ret = IRQ_HANDLED;
1319
1320                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1321                                 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1322                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1323                                 notify_ring(&dev_priv->ring[VCS]);
1324
1325                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1326                                 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1327                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1328                                 notify_ring(&dev_priv->ring[VCS2]);
1329                 } else
1330                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1331         }
1332
1333         if (master_ctl & GEN8_GT_VECS_IRQ) {
1334                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1335                 if (tmp) {
1336                         I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1337                         ret = IRQ_HANDLED;
1338
1339                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1340                                 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1341                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1342                                 notify_ring(&dev_priv->ring[VECS]);
1343                 } else
1344                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1345         }
1346
1347         if (master_ctl & GEN8_GT_PM_IRQ) {
1348                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1349                 if (tmp & dev_priv->pm_rps_events) {
1350                         I915_WRITE_FW(GEN8_GT_IIR(2),
1351                                       tmp & dev_priv->pm_rps_events);
1352                         ret = IRQ_HANDLED;
1353                         gen6_rps_irq_handler(dev_priv, tmp);
1354                 } else
1355                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1356         }
1357
1358         return ret;
1359 }
1360
1361 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1362 {
1363         switch (port) {
1364         case PORT_A:
1365                 return val & PORTA_HOTPLUG_LONG_DETECT;
1366         case PORT_B:
1367                 return val & PORTB_HOTPLUG_LONG_DETECT;
1368         case PORT_C:
1369                 return val & PORTC_HOTPLUG_LONG_DETECT;
1370         default:
1371                 return false;
1372         }
1373 }
1374
1375 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1376 {
1377         switch (port) {
1378         case PORT_E:
1379                 return val & PORTE_HOTPLUG_LONG_DETECT;
1380         default:
1381                 return false;
1382         }
1383 }
1384
1385 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1386 {
1387         switch (port) {
1388         case PORT_A:
1389                 return val & PORTA_HOTPLUG_LONG_DETECT;
1390         case PORT_B:
1391                 return val & PORTB_HOTPLUG_LONG_DETECT;
1392         case PORT_C:
1393                 return val & PORTC_HOTPLUG_LONG_DETECT;
1394         case PORT_D:
1395                 return val & PORTD_HOTPLUG_LONG_DETECT;
1396         default:
1397                 return false;
1398         }
1399 }
1400
1401 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1402 {
1403         switch (port) {
1404         case PORT_A:
1405                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1406         default:
1407                 return false;
1408         }
1409 }
1410
1411 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1412 {
1413         switch (port) {
1414         case PORT_B:
1415                 return val & PORTB_HOTPLUG_LONG_DETECT;
1416         case PORT_C:
1417                 return val & PORTC_HOTPLUG_LONG_DETECT;
1418         case PORT_D:
1419                 return val & PORTD_HOTPLUG_LONG_DETECT;
1420         default:
1421                 return false;
1422         }
1423 }
1424
1425 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1426 {
1427         switch (port) {
1428         case PORT_B:
1429                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1430         case PORT_C:
1431                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1432         case PORT_D:
1433                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1434         default:
1435                 return false;
1436         }
1437 }
1438
1439 /*
1440  * Get a bit mask of pins that have triggered, and which ones may be long.
1441  * This can be called multiple times with the same masks to accumulate
1442  * hotplug detection results from several registers.
1443  *
1444  * Note that the caller is expected to zero out the masks initially.
1445  */
1446 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1447                              u32 hotplug_trigger, u32 dig_hotplug_reg,
1448                              const u32 hpd[HPD_NUM_PINS],
1449                              bool long_pulse_detect(enum port port, u32 val))
1450 {
1451         enum port port;
1452         int i;
1453
1454         for_each_hpd_pin(i) {
1455                 if ((hpd[i] & hotplug_trigger) == 0)
1456                         continue;
1457
1458                 *pin_mask |= BIT(i);
1459
1460                 if (!intel_hpd_pin_to_port(i, &port))
1461                         continue;
1462
1463                 if (long_pulse_detect(port, dig_hotplug_reg))
1464                         *long_mask |= BIT(i);
1465         }
1466
1467         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1468                          hotplug_trigger, dig_hotplug_reg, *pin_mask);
1469
1470 }
1471
1472 static void gmbus_irq_handler(struct drm_device *dev)
1473 {
1474         struct drm_i915_private *dev_priv = dev->dev_private;
1475
1476         wake_up_all(&dev_priv->gmbus_wait_queue);
1477 }
1478
1479 static void dp_aux_irq_handler(struct drm_device *dev)
1480 {
1481         struct drm_i915_private *dev_priv = dev->dev_private;
1482
1483         wake_up_all(&dev_priv->gmbus_wait_queue);
1484 }
1485
1486 #if defined(CONFIG_DEBUG_FS)
1487 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1488                                          uint32_t crc0, uint32_t crc1,
1489                                          uint32_t crc2, uint32_t crc3,
1490                                          uint32_t crc4)
1491 {
1492         struct drm_i915_private *dev_priv = dev->dev_private;
1493         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1494         struct intel_pipe_crc_entry *entry;
1495         int head, tail;
1496
1497         spin_lock(&pipe_crc->lock);
1498
1499         if (!pipe_crc->entries) {
1500                 spin_unlock(&pipe_crc->lock);
1501                 DRM_DEBUG_KMS("spurious interrupt\n");
1502                 return;
1503         }
1504
1505         head = pipe_crc->head;
1506         tail = pipe_crc->tail;
1507
1508         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1509                 spin_unlock(&pipe_crc->lock);
1510                 DRM_ERROR("CRC buffer overflowing\n");
1511                 return;
1512         }
1513
1514         entry = &pipe_crc->entries[head];
1515
1516         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1517         entry->crc[0] = crc0;
1518         entry->crc[1] = crc1;
1519         entry->crc[2] = crc2;
1520         entry->crc[3] = crc3;
1521         entry->crc[4] = crc4;
1522
1523         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1524         pipe_crc->head = head;
1525
1526         spin_unlock(&pipe_crc->lock);
1527
1528         wake_up_interruptible(&pipe_crc->wq);
1529 }
1530 #else
1531 static inline void
1532 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1533                              uint32_t crc0, uint32_t crc1,
1534                              uint32_t crc2, uint32_t crc3,
1535                              uint32_t crc4) {}
1536 #endif
1537
1538
1539 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1540 {
1541         struct drm_i915_private *dev_priv = dev->dev_private;
1542
1543         display_pipe_crc_irq_handler(dev, pipe,
1544                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1545                                      0, 0, 0, 0);
1546 }
1547
1548 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1549 {
1550         struct drm_i915_private *dev_priv = dev->dev_private;
1551
1552         display_pipe_crc_irq_handler(dev, pipe,
1553                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1554                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1555                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1556                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1557                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1558 }
1559
1560 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1561 {
1562         struct drm_i915_private *dev_priv = dev->dev_private;
1563         uint32_t res1, res2;
1564
1565         if (INTEL_INFO(dev)->gen >= 3)
1566                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1567         else
1568                 res1 = 0;
1569
1570         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1571                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1572         else
1573                 res2 = 0;
1574
1575         display_pipe_crc_irq_handler(dev, pipe,
1576                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1577                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1578                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1579                                      res1, res2);
1580 }
1581
1582 /* The RPS events need forcewake, so we add them to a work queue and mask their
1583  * IMR bits until the work is done. Other interrupts can be processed without
1584  * the work queue. */
1585 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1586 {
1587         if (pm_iir & dev_priv->pm_rps_events) {
1588                 spin_lock(&dev_priv->irq_lock);
1589                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1590                 if (dev_priv->rps.interrupts_enabled) {
1591                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1592                         queue_work(dev_priv->wq, &dev_priv->rps.work);
1593                 }
1594                 spin_unlock(&dev_priv->irq_lock);
1595         }
1596
1597         if (INTEL_INFO(dev_priv)->gen >= 8)
1598                 return;
1599
1600         if (HAS_VEBOX(dev_priv->dev)) {
1601                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1602                         notify_ring(&dev_priv->ring[VECS]);
1603
1604                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1605                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1606         }
1607 }
1608
1609 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1610 {
1611         if (!drm_handle_vblank(dev, pipe))
1612                 return false;
1613
1614         return true;
1615 }
1616
1617 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1618 {
1619         struct drm_i915_private *dev_priv = dev->dev_private;
1620         u32 pipe_stats[I915_MAX_PIPES] = { };
1621         int pipe;
1622
1623         spin_lock(&dev_priv->irq_lock);
1624         for_each_pipe(dev_priv, pipe) {
1625                 int reg;
1626                 u32 mask, iir_bit = 0;
1627
1628                 /*
1629                  * PIPESTAT bits get signalled even when the interrupt is
1630                  * disabled with the mask bits, and some of the status bits do
1631                  * not generate interrupts at all (like the underrun bit). Hence
1632                  * we need to be careful that we only handle what we want to
1633                  * handle.
1634                  */
1635
1636                 /* fifo underruns are filterered in the underrun handler. */
1637                 mask = PIPE_FIFO_UNDERRUN_STATUS;
1638
1639                 switch (pipe) {
1640                 case PIPE_A:
1641                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1642                         break;
1643                 case PIPE_B:
1644                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1645                         break;
1646                 case PIPE_C:
1647                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1648                         break;
1649                 }
1650                 if (iir & iir_bit)
1651                         mask |= dev_priv->pipestat_irq_mask[pipe];
1652
1653                 if (!mask)
1654                         continue;
1655
1656                 reg = PIPESTAT(pipe);
1657                 mask |= PIPESTAT_INT_ENABLE_MASK;
1658                 pipe_stats[pipe] = I915_READ(reg) & mask;
1659
1660                 /*
1661                  * Clear the PIPE*STAT regs before the IIR
1662                  */
1663                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1664                                         PIPESTAT_INT_STATUS_MASK))
1665                         I915_WRITE(reg, pipe_stats[pipe]);
1666         }
1667         spin_unlock(&dev_priv->irq_lock);
1668
1669         for_each_pipe(dev_priv, pipe) {
1670                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1671                     intel_pipe_handle_vblank(dev, pipe))
1672                         intel_check_page_flip(dev, pipe);
1673
1674                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1675                         intel_prepare_page_flip(dev, pipe);
1676                         intel_finish_page_flip(dev, pipe);
1677                 }
1678
1679                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1680                         i9xx_pipe_crc_irq_handler(dev, pipe);
1681
1682                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1683                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1684         }
1685
1686         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1687                 gmbus_irq_handler(dev);
1688 }
1689
1690 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1691 {
1692         struct drm_i915_private *dev_priv = dev->dev_private;
1693         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1694         u32 pin_mask = 0, long_mask = 0;
1695
1696         if (!hotplug_status)
1697                 return;
1698
1699         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1700         /*
1701          * Make sure hotplug status is cleared before we clear IIR, or else we
1702          * may miss hotplug events.
1703          */
1704         POSTING_READ(PORT_HOTPLUG_STAT);
1705
1706         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1707                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1708
1709                 if (hotplug_trigger) {
1710                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1711                                            hotplug_trigger, hpd_status_g4x,
1712                                            i9xx_port_hotplug_long_detect);
1713
1714                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
1715                 }
1716
1717                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1718                         dp_aux_irq_handler(dev);
1719         } else {
1720                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1721
1722                 if (hotplug_trigger) {
1723                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1724                                            hotplug_trigger, hpd_status_i915,
1725                                            i9xx_port_hotplug_long_detect);
1726                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
1727                 }
1728         }
1729 }
1730
1731 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1732 {
1733         struct drm_device *dev = arg;
1734         struct drm_i915_private *dev_priv = dev->dev_private;
1735         u32 iir, gt_iir, pm_iir;
1736         irqreturn_t ret = IRQ_NONE;
1737
1738         if (!intel_irqs_enabled(dev_priv))
1739                 return IRQ_NONE;
1740
1741         while (true) {
1742                 /* Find, clear, then process each source of interrupt */
1743
1744                 gt_iir = I915_READ(GTIIR);
1745                 if (gt_iir)
1746                         I915_WRITE(GTIIR, gt_iir);
1747
1748                 pm_iir = I915_READ(GEN6_PMIIR);
1749                 if (pm_iir)
1750                         I915_WRITE(GEN6_PMIIR, pm_iir);
1751
1752                 iir = I915_READ(VLV_IIR);
1753                 if (iir) {
1754                         /* Consume port before clearing IIR or we'll miss events */
1755                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1756                                 i9xx_hpd_irq_handler(dev);
1757                         I915_WRITE(VLV_IIR, iir);
1758                 }
1759
1760                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1761                         goto out;
1762
1763                 ret = IRQ_HANDLED;
1764
1765                 if (gt_iir)
1766                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1767                 if (pm_iir)
1768                         gen6_rps_irq_handler(dev_priv, pm_iir);
1769                 /* Call regardless, as some status bits might not be
1770                  * signalled in iir */
1771                 valleyview_pipestat_irq_handler(dev, iir);
1772         }
1773
1774 out:
1775         return ret;
1776 }
1777
1778 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1779 {
1780         struct drm_device *dev = arg;
1781         struct drm_i915_private *dev_priv = dev->dev_private;
1782         u32 master_ctl, iir;
1783         irqreturn_t ret = IRQ_NONE;
1784
1785         if (!intel_irqs_enabled(dev_priv))
1786                 return IRQ_NONE;
1787
1788         for (;;) {
1789                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1790                 iir = I915_READ(VLV_IIR);
1791
1792                 if (master_ctl == 0 && iir == 0)
1793                         break;
1794
1795                 ret = IRQ_HANDLED;
1796
1797                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1798
1799                 /* Find, clear, then process each source of interrupt */
1800
1801                 if (iir) {
1802                         /* Consume port before clearing IIR or we'll miss events */
1803                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1804                                 i9xx_hpd_irq_handler(dev);
1805                         I915_WRITE(VLV_IIR, iir);
1806                 }
1807
1808                 gen8_gt_irq_handler(dev_priv, master_ctl);
1809
1810                 /* Call regardless, as some status bits might not be
1811                  * signalled in iir */
1812                 valleyview_pipestat_irq_handler(dev, iir);
1813
1814                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1815                 POSTING_READ(GEN8_MASTER_IRQ);
1816         }
1817
1818         return ret;
1819 }
1820
1821 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1822                                 const u32 hpd[HPD_NUM_PINS])
1823 {
1824         struct drm_i915_private *dev_priv = to_i915(dev);
1825         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1826
1827         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1828         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1829
1830         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1831                            dig_hotplug_reg, hpd,
1832                            pch_port_hotplug_long_detect);
1833
1834         intel_hpd_irq_handler(dev, pin_mask, long_mask);
1835 }
1836
1837 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1838 {
1839         struct drm_i915_private *dev_priv = dev->dev_private;
1840         int pipe;
1841         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1842
1843         if (hotplug_trigger)
1844                 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1845
1846         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1847                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1848                                SDE_AUDIO_POWER_SHIFT);
1849                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1850                                  port_name(port));
1851         }
1852
1853         if (pch_iir & SDE_AUX_MASK)
1854                 dp_aux_irq_handler(dev);
1855
1856         if (pch_iir & SDE_GMBUS)
1857                 gmbus_irq_handler(dev);
1858
1859         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1860                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1861
1862         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1863                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1864
1865         if (pch_iir & SDE_POISON)
1866                 DRM_ERROR("PCH poison interrupt\n");
1867
1868         if (pch_iir & SDE_FDI_MASK)
1869                 for_each_pipe(dev_priv, pipe)
1870                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1871                                          pipe_name(pipe),
1872                                          I915_READ(FDI_RX_IIR(pipe)));
1873
1874         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1875                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1876
1877         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1878                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1879
1880         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1881                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1882
1883         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1884                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1885 }
1886
1887 static void ivb_err_int_handler(struct drm_device *dev)
1888 {
1889         struct drm_i915_private *dev_priv = dev->dev_private;
1890         u32 err_int = I915_READ(GEN7_ERR_INT);
1891         enum pipe pipe;
1892
1893         if (err_int & ERR_INT_POISON)
1894                 DRM_ERROR("Poison interrupt\n");
1895
1896         for_each_pipe(dev_priv, pipe) {
1897                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1898                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1899
1900                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1901                         if (IS_IVYBRIDGE(dev))
1902                                 ivb_pipe_crc_irq_handler(dev, pipe);
1903                         else
1904                                 hsw_pipe_crc_irq_handler(dev, pipe);
1905                 }
1906         }
1907
1908         I915_WRITE(GEN7_ERR_INT, err_int);
1909 }
1910
1911 static void cpt_serr_int_handler(struct drm_device *dev)
1912 {
1913         struct drm_i915_private *dev_priv = dev->dev_private;
1914         u32 serr_int = I915_READ(SERR_INT);
1915
1916         if (serr_int & SERR_INT_POISON)
1917                 DRM_ERROR("PCH poison interrupt\n");
1918
1919         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1920                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1921
1922         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1923                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1924
1925         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1926                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1927
1928         I915_WRITE(SERR_INT, serr_int);
1929 }
1930
1931 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1932 {
1933         struct drm_i915_private *dev_priv = dev->dev_private;
1934         int pipe;
1935         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1936
1937         if (hotplug_trigger)
1938                 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1939
1940         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1941                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1942                                SDE_AUDIO_POWER_SHIFT_CPT);
1943                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1944                                  port_name(port));
1945         }
1946
1947         if (pch_iir & SDE_AUX_MASK_CPT)
1948                 dp_aux_irq_handler(dev);
1949
1950         if (pch_iir & SDE_GMBUS_CPT)
1951                 gmbus_irq_handler(dev);
1952
1953         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1954                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1955
1956         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1957                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1958
1959         if (pch_iir & SDE_FDI_MASK_CPT)
1960                 for_each_pipe(dev_priv, pipe)
1961                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1962                                          pipe_name(pipe),
1963                                          I915_READ(FDI_RX_IIR(pipe)));
1964
1965         if (pch_iir & SDE_ERROR_CPT)
1966                 cpt_serr_int_handler(dev);
1967 }
1968
1969 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1970 {
1971         struct drm_i915_private *dev_priv = dev->dev_private;
1972         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1973                 ~SDE_PORTE_HOTPLUG_SPT;
1974         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1975         u32 pin_mask = 0, long_mask = 0;
1976
1977         if (hotplug_trigger) {
1978                 u32 dig_hotplug_reg;
1979
1980                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1981                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1982
1983                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1984                                    dig_hotplug_reg, hpd_spt,
1985                                    spt_port_hotplug_long_detect);
1986         }
1987
1988         if (hotplug2_trigger) {
1989                 u32 dig_hotplug_reg;
1990
1991                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1992                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1993
1994                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1995                                    dig_hotplug_reg, hpd_spt,
1996                                    spt_port_hotplug2_long_detect);
1997         }
1998
1999         if (pin_mask)
2000                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2001
2002         if (pch_iir & SDE_GMBUS_CPT)
2003                 gmbus_irq_handler(dev);
2004 }
2005
2006 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2007                                 const u32 hpd[HPD_NUM_PINS])
2008 {
2009         struct drm_i915_private *dev_priv = to_i915(dev);
2010         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2011
2012         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2013         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2014
2015         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2016                            dig_hotplug_reg, hpd,
2017                            ilk_port_hotplug_long_detect);
2018
2019         intel_hpd_irq_handler(dev, pin_mask, long_mask);
2020 }
2021
2022 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2023 {
2024         struct drm_i915_private *dev_priv = dev->dev_private;
2025         enum pipe pipe;
2026         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2027
2028         if (hotplug_trigger)
2029                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2030
2031         if (de_iir & DE_AUX_CHANNEL_A)
2032                 dp_aux_irq_handler(dev);
2033
2034         if (de_iir & DE_GSE)
2035                 intel_opregion_asle_intr(dev);
2036
2037         if (de_iir & DE_POISON)
2038                 DRM_ERROR("Poison interrupt\n");
2039
2040         for_each_pipe(dev_priv, pipe) {
2041                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2042                     intel_pipe_handle_vblank(dev, pipe))
2043                         intel_check_page_flip(dev, pipe);
2044
2045                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2046                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2047
2048                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2049                         i9xx_pipe_crc_irq_handler(dev, pipe);
2050
2051                 /* plane/pipes map 1:1 on ilk+ */
2052                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2053                         intel_prepare_page_flip(dev, pipe);
2054                         intel_finish_page_flip_plane(dev, pipe);
2055                 }
2056         }
2057
2058         /* check event from PCH */
2059         if (de_iir & DE_PCH_EVENT) {
2060                 u32 pch_iir = I915_READ(SDEIIR);
2061
2062                 if (HAS_PCH_CPT(dev))
2063                         cpt_irq_handler(dev, pch_iir);
2064                 else
2065                         ibx_irq_handler(dev, pch_iir);
2066
2067                 /* should clear PCH hotplug event before clear CPU irq */
2068                 I915_WRITE(SDEIIR, pch_iir);
2069         }
2070
2071         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2072                 ironlake_rps_change_irq_handler(dev);
2073 }
2074
2075 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2076 {
2077         struct drm_i915_private *dev_priv = dev->dev_private;
2078         enum pipe pipe;
2079         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2080
2081         if (hotplug_trigger)
2082                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2083
2084         if (de_iir & DE_ERR_INT_IVB)
2085                 ivb_err_int_handler(dev);
2086
2087         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2088                 dp_aux_irq_handler(dev);
2089
2090         if (de_iir & DE_GSE_IVB)
2091                 intel_opregion_asle_intr(dev);
2092
2093         for_each_pipe(dev_priv, pipe) {
2094                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2095                     intel_pipe_handle_vblank(dev, pipe))
2096                         intel_check_page_flip(dev, pipe);
2097
2098                 /* plane/pipes map 1:1 on ilk+ */
2099                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2100                         intel_prepare_page_flip(dev, pipe);
2101                         intel_finish_page_flip_plane(dev, pipe);
2102                 }
2103         }
2104
2105         /* check event from PCH */
2106         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2107                 u32 pch_iir = I915_READ(SDEIIR);
2108
2109                 cpt_irq_handler(dev, pch_iir);
2110
2111                 /* clear PCH hotplug event before clear CPU irq */
2112                 I915_WRITE(SDEIIR, pch_iir);
2113         }
2114 }
2115
2116 /*
2117  * To handle irqs with the minimum potential races with fresh interrupts, we:
2118  * 1 - Disable Master Interrupt Control.
2119  * 2 - Find the source(s) of the interrupt.
2120  * 3 - Clear the Interrupt Identity bits (IIR).
2121  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2122  * 5 - Re-enable Master Interrupt Control.
2123  */
2124 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2125 {
2126         struct drm_device *dev = arg;
2127         struct drm_i915_private *dev_priv = dev->dev_private;
2128         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2129         irqreturn_t ret = IRQ_NONE;
2130
2131         if (!intel_irqs_enabled(dev_priv))
2132                 return IRQ_NONE;
2133
2134         /* We get interrupts on unclaimed registers, so check for this before we
2135          * do any I915_{READ,WRITE}. */
2136         intel_uncore_check_errors(dev);
2137
2138         /* disable master interrupt before clearing iir  */
2139         de_ier = I915_READ(DEIER);
2140         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2141         POSTING_READ(DEIER);
2142
2143         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2144          * interrupts will will be stored on its back queue, and then we'll be
2145          * able to process them after we restore SDEIER (as soon as we restore
2146          * it, we'll get an interrupt if SDEIIR still has something to process
2147          * due to its back queue). */
2148         if (!HAS_PCH_NOP(dev)) {
2149                 sde_ier = I915_READ(SDEIER);
2150                 I915_WRITE(SDEIER, 0);
2151                 POSTING_READ(SDEIER);
2152         }
2153
2154         /* Find, clear, then process each source of interrupt */
2155
2156         gt_iir = I915_READ(GTIIR);
2157         if (gt_iir) {
2158                 I915_WRITE(GTIIR, gt_iir);
2159                 ret = IRQ_HANDLED;
2160                 if (INTEL_INFO(dev)->gen >= 6)
2161                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
2162                 else
2163                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2164         }
2165
2166         de_iir = I915_READ(DEIIR);
2167         if (de_iir) {
2168                 I915_WRITE(DEIIR, de_iir);
2169                 ret = IRQ_HANDLED;
2170                 if (INTEL_INFO(dev)->gen >= 7)
2171                         ivb_display_irq_handler(dev, de_iir);
2172                 else
2173                         ilk_display_irq_handler(dev, de_iir);
2174         }
2175
2176         if (INTEL_INFO(dev)->gen >= 6) {
2177                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2178                 if (pm_iir) {
2179                         I915_WRITE(GEN6_PMIIR, pm_iir);
2180                         ret = IRQ_HANDLED;
2181                         gen6_rps_irq_handler(dev_priv, pm_iir);
2182                 }
2183         }
2184
2185         I915_WRITE(DEIER, de_ier);
2186         POSTING_READ(DEIER);
2187         if (!HAS_PCH_NOP(dev)) {
2188                 I915_WRITE(SDEIER, sde_ier);
2189                 POSTING_READ(SDEIER);
2190         }
2191
2192         return ret;
2193 }
2194
2195 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2196                                 const u32 hpd[HPD_NUM_PINS])
2197 {
2198         struct drm_i915_private *dev_priv = to_i915(dev);
2199         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2200
2201         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2202         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2203
2204         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2205                            dig_hotplug_reg, hpd,
2206                            bxt_port_hotplug_long_detect);
2207
2208         intel_hpd_irq_handler(dev, pin_mask, long_mask);
2209 }
2210
2211 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2212 {
2213         struct drm_device *dev = arg;
2214         struct drm_i915_private *dev_priv = dev->dev_private;
2215         u32 master_ctl;
2216         irqreturn_t ret = IRQ_NONE;
2217         uint32_t tmp = 0;
2218         enum pipe pipe;
2219         u32 aux_mask = GEN8_AUX_CHANNEL_A;
2220
2221         if (!intel_irqs_enabled(dev_priv))
2222                 return IRQ_NONE;
2223
2224         if (INTEL_INFO(dev_priv)->gen >= 9)
2225                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2226                         GEN9_AUX_CHANNEL_D;
2227
2228         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2229         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2230         if (!master_ctl)
2231                 return IRQ_NONE;
2232
2233         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2234
2235         /* Find, clear, then process each source of interrupt */
2236
2237         ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2238
2239         if (master_ctl & GEN8_DE_MISC_IRQ) {
2240                 tmp = I915_READ(GEN8_DE_MISC_IIR);
2241                 if (tmp) {
2242                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2243                         ret = IRQ_HANDLED;
2244                         if (tmp & GEN8_DE_MISC_GSE)
2245                                 intel_opregion_asle_intr(dev);
2246                         else
2247                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2248                 }
2249                 else
2250                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2251         }
2252
2253         if (master_ctl & GEN8_DE_PORT_IRQ) {
2254                 tmp = I915_READ(GEN8_DE_PORT_IIR);
2255                 if (tmp) {
2256                         bool found = false;
2257                         u32 hotplug_trigger = 0;
2258
2259                         if (IS_BROXTON(dev_priv))
2260                                 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2261                         else if (IS_BROADWELL(dev_priv))
2262                                 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2263
2264                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2265                         ret = IRQ_HANDLED;
2266
2267                         if (tmp & aux_mask) {
2268                                 dp_aux_irq_handler(dev);
2269                                 found = true;
2270                         }
2271
2272                         if (hotplug_trigger) {
2273                                 if (IS_BROXTON(dev))
2274                                         bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2275                                 else
2276                                         ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2277                                 found = true;
2278                         }
2279
2280                         if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2281                                 gmbus_irq_handler(dev);
2282                                 found = true;
2283                         }
2284
2285                         if (!found)
2286                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2287                 }
2288                 else
2289                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2290         }
2291
2292         for_each_pipe(dev_priv, pipe) {
2293                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2294
2295                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2296                         continue;
2297
2298                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2299                 if (pipe_iir) {
2300                         ret = IRQ_HANDLED;
2301                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2302
2303                         if (pipe_iir & GEN8_PIPE_VBLANK &&
2304                             intel_pipe_handle_vblank(dev, pipe))
2305                                 intel_check_page_flip(dev, pipe);
2306
2307                         if (INTEL_INFO(dev_priv)->gen >= 9)
2308                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2309                         else
2310                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2311
2312                         if (flip_done) {
2313                                 intel_prepare_page_flip(dev, pipe);
2314                                 intel_finish_page_flip_plane(dev, pipe);
2315                         }
2316
2317                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2318                                 hsw_pipe_crc_irq_handler(dev, pipe);
2319
2320                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2321                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2322                                                                     pipe);
2323
2324
2325                         if (INTEL_INFO(dev_priv)->gen >= 9)
2326                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2327                         else
2328                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2329
2330                         if (fault_errors)
2331                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2332                                           pipe_name(pipe),
2333                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2334                 } else
2335                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2336         }
2337
2338         if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2339             master_ctl & GEN8_DE_PCH_IRQ) {
2340                 /*
2341                  * FIXME(BDW): Assume for now that the new interrupt handling
2342                  * scheme also closed the SDE interrupt handling race we've seen
2343                  * on older pch-split platforms. But this needs testing.
2344                  */
2345                 u32 pch_iir = I915_READ(SDEIIR);
2346                 if (pch_iir) {
2347                         I915_WRITE(SDEIIR, pch_iir);
2348                         ret = IRQ_HANDLED;
2349
2350                         if (HAS_PCH_SPT(dev_priv))
2351                                 spt_irq_handler(dev, pch_iir);
2352                         else
2353                                 cpt_irq_handler(dev, pch_iir);
2354                 } else
2355                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
2356
2357         }
2358
2359         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2360         POSTING_READ_FW(GEN8_MASTER_IRQ);
2361
2362         return ret;
2363 }
2364
2365 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2366                                bool reset_completed)
2367 {
2368         struct intel_engine_cs *ring;
2369         int i;
2370
2371         /*
2372          * Notify all waiters for GPU completion events that reset state has
2373          * been changed, and that they need to restart their wait after
2374          * checking for potential errors (and bail out to drop locks if there is
2375          * a gpu reset pending so that i915_error_work_func can acquire them).
2376          */
2377
2378         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2379         for_each_ring(ring, dev_priv, i)
2380                 wake_up_all(&ring->irq_queue);
2381
2382         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2383         wake_up_all(&dev_priv->pending_flip_queue);
2384
2385         /*
2386          * Signal tasks blocked in i915_gem_wait_for_error that the pending
2387          * reset state is cleared.
2388          */
2389         if (reset_completed)
2390                 wake_up_all(&dev_priv->gpu_error.reset_queue);
2391 }
2392
2393 /**
2394  * i915_reset_and_wakeup - do process context error handling work
2395  *
2396  * Fire an error uevent so userspace can see that a hang or error
2397  * was detected.
2398  */
2399 static void i915_reset_and_wakeup(struct drm_device *dev)
2400 {
2401         struct drm_i915_private *dev_priv = to_i915(dev);
2402         struct i915_gpu_error *error = &dev_priv->gpu_error;
2403         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2404         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2405         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2406         int ret;
2407
2408         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2409
2410         /*
2411          * Note that there's only one work item which does gpu resets, so we
2412          * need not worry about concurrent gpu resets potentially incrementing
2413          * error->reset_counter twice. We only need to take care of another
2414          * racing irq/hangcheck declaring the gpu dead for a second time. A
2415          * quick check for that is good enough: schedule_work ensures the
2416          * correct ordering between hang detection and this work item, and since
2417          * the reset in-progress bit is only ever set by code outside of this
2418          * work we don't need to worry about any other races.
2419          */
2420         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2421                 DRM_DEBUG_DRIVER("resetting chip\n");
2422                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2423                                    reset_event);
2424
2425                 /*
2426                  * In most cases it's guaranteed that we get here with an RPM
2427                  * reference held, for example because there is a pending GPU
2428                  * request that won't finish until the reset is done. This
2429                  * isn't the case at least when we get here by doing a
2430                  * simulated reset via debugs, so get an RPM reference.
2431                  */
2432                 intel_runtime_pm_get(dev_priv);
2433
2434                 intel_prepare_reset(dev);
2435
2436                 /*
2437                  * All state reset _must_ be completed before we update the
2438                  * reset counter, for otherwise waiters might miss the reset
2439                  * pending state and not properly drop locks, resulting in
2440                  * deadlocks with the reset work.
2441                  */
2442                 ret = i915_reset(dev);
2443
2444                 intel_finish_reset(dev);
2445
2446                 intel_runtime_pm_put(dev_priv);
2447
2448                 if (ret == 0) {
2449                         /*
2450                          * After all the gem state is reset, increment the reset
2451                          * counter and wake up everyone waiting for the reset to
2452                          * complete.
2453                          *
2454                          * Since unlock operations are a one-sided barrier only,
2455                          * we need to insert a barrier here to order any seqno
2456                          * updates before
2457                          * the counter increment.
2458                          */
2459                         smp_mb__before_atomic();
2460                         atomic_inc(&dev_priv->gpu_error.reset_counter);
2461
2462                         kobject_uevent_env(&dev->primary->kdev->kobj,
2463                                            KOBJ_CHANGE, reset_done_event);
2464                 } else {
2465                         atomic_or(I915_WEDGED, &error->reset_counter);
2466                 }
2467
2468                 /*
2469                  * Note: The wake_up also serves as a memory barrier so that
2470                  * waiters see the update value of the reset counter atomic_t.
2471                  */
2472                 i915_error_wake_up(dev_priv, true);
2473         }
2474 }
2475
2476 static void i915_report_and_clear_eir(struct drm_device *dev)
2477 {
2478         struct drm_i915_private *dev_priv = dev->dev_private;
2479         uint32_t instdone[I915_NUM_INSTDONE_REG];
2480         u32 eir = I915_READ(EIR);
2481         int pipe, i;
2482
2483         if (!eir)
2484                 return;
2485
2486         pr_err("render error detected, EIR: 0x%08x\n", eir);
2487
2488         i915_get_extra_instdone(dev, instdone);
2489
2490         if (IS_G4X(dev)) {
2491                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2492                         u32 ipeir = I915_READ(IPEIR_I965);
2493
2494                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2495                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2496                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2497                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2498                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2499                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2500                         I915_WRITE(IPEIR_I965, ipeir);
2501                         POSTING_READ(IPEIR_I965);
2502                 }
2503                 if (eir & GM45_ERROR_PAGE_TABLE) {
2504                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2505                         pr_err("page table error\n");
2506                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2507                         I915_WRITE(PGTBL_ER, pgtbl_err);
2508                         POSTING_READ(PGTBL_ER);
2509                 }
2510         }
2511
2512         if (!IS_GEN2(dev)) {
2513                 if (eir & I915_ERROR_PAGE_TABLE) {
2514                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2515                         pr_err("page table error\n");
2516                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2517                         I915_WRITE(PGTBL_ER, pgtbl_err);
2518                         POSTING_READ(PGTBL_ER);
2519                 }
2520         }
2521
2522         if (eir & I915_ERROR_MEMORY_REFRESH) {
2523                 pr_err("memory refresh error:\n");
2524                 for_each_pipe(dev_priv, pipe)
2525                         pr_err("pipe %c stat: 0x%08x\n",
2526                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2527                 /* pipestat has already been acked */
2528         }
2529         if (eir & I915_ERROR_INSTRUCTION) {
2530                 pr_err("instruction error\n");
2531                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2532                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2533                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2534                 if (INTEL_INFO(dev)->gen < 4) {
2535                         u32 ipeir = I915_READ(IPEIR);
2536
2537                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2538                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2539                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2540                         I915_WRITE(IPEIR, ipeir);
2541                         POSTING_READ(IPEIR);
2542                 } else {
2543                         u32 ipeir = I915_READ(IPEIR_I965);
2544
2545                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2546                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2547                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2548                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2549                         I915_WRITE(IPEIR_I965, ipeir);
2550                         POSTING_READ(IPEIR_I965);
2551                 }
2552         }
2553
2554         I915_WRITE(EIR, eir);
2555         POSTING_READ(EIR);
2556         eir = I915_READ(EIR);
2557         if (eir) {
2558                 /*
2559                  * some errors might have become stuck,
2560                  * mask them.
2561                  */
2562                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2563                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2564                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2565         }
2566 }
2567
2568 /**
2569  * i915_handle_error - handle a gpu error
2570  * @dev: drm device
2571  *
2572  * Do some basic checking of regsiter state at error time and
2573  * dump it to the syslog.  Also call i915_capture_error_state() to make
2574  * sure we get a record and make it available in debugfs.  Fire a uevent
2575  * so userspace knows something bad happened (should trigger collection
2576  * of a ring dump etc.).
2577  */
2578 void i915_handle_error(struct drm_device *dev, bool wedged,
2579                        const char *fmt, ...)
2580 {
2581         struct drm_i915_private *dev_priv = dev->dev_private;
2582         va_list args;
2583         char error_msg[80];
2584
2585         va_start(args, fmt);
2586         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2587         va_end(args);
2588
2589         i915_capture_error_state(dev, wedged, error_msg);
2590         i915_report_and_clear_eir(dev);
2591
2592         if (wedged) {
2593                 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2594                                 &dev_priv->gpu_error.reset_counter);
2595
2596                 /*
2597                  * Wakeup waiting processes so that the reset function
2598                  * i915_reset_and_wakeup doesn't deadlock trying to grab
2599                  * various locks. By bumping the reset counter first, the woken
2600                  * processes will see a reset in progress and back off,
2601                  * releasing their locks and then wait for the reset completion.
2602                  * We must do this for _all_ gpu waiters that might hold locks
2603                  * that the reset work needs to acquire.
2604                  *
2605                  * Note: The wake_up serves as the required memory barrier to
2606                  * ensure that the waiters see the updated value of the reset
2607                  * counter atomic_t.
2608                  */
2609                 i915_error_wake_up(dev_priv, false);
2610         }
2611
2612         i915_reset_and_wakeup(dev);
2613 }
2614
2615 /* Called from drm generic code, passed 'crtc' which
2616  * we use as a pipe index
2617  */
2618 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2619 {
2620         struct drm_i915_private *dev_priv = dev->dev_private;
2621         unsigned long irqflags;
2622
2623         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2624         if (INTEL_INFO(dev)->gen >= 4)
2625                 i915_enable_pipestat(dev_priv, pipe,
2626                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
2627         else
2628                 i915_enable_pipestat(dev_priv, pipe,
2629                                      PIPE_VBLANK_INTERRUPT_STATUS);
2630         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2631
2632         return 0;
2633 }
2634
2635 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2636 {
2637         struct drm_i915_private *dev_priv = dev->dev_private;
2638         unsigned long irqflags;
2639         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2640                                                      DE_PIPE_VBLANK(pipe);
2641
2642         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2643         ironlake_enable_display_irq(dev_priv, bit);
2644         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2645
2646         return 0;
2647 }
2648
2649 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2650 {
2651         struct drm_i915_private *dev_priv = dev->dev_private;
2652         unsigned long irqflags;
2653
2654         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2655         i915_enable_pipestat(dev_priv, pipe,
2656                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2657         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2658
2659         return 0;
2660 }
2661
2662 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2663 {
2664         struct drm_i915_private *dev_priv = dev->dev_private;
2665         unsigned long irqflags;
2666
2667         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2668         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2669         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2670         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2671         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2672         return 0;
2673 }
2674
2675 /* Called from drm generic code, passed 'crtc' which
2676  * we use as a pipe index
2677  */
2678 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2679 {
2680         struct drm_i915_private *dev_priv = dev->dev_private;
2681         unsigned long irqflags;
2682
2683         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2684         i915_disable_pipestat(dev_priv, pipe,
2685                               PIPE_VBLANK_INTERRUPT_STATUS |
2686                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2687         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2688 }
2689
2690 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2691 {
2692         struct drm_i915_private *dev_priv = dev->dev_private;
2693         unsigned long irqflags;
2694         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2695                                                      DE_PIPE_VBLANK(pipe);
2696
2697         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2698         ironlake_disable_display_irq(dev_priv, bit);
2699         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2700 }
2701
2702 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2703 {
2704         struct drm_i915_private *dev_priv = dev->dev_private;
2705         unsigned long irqflags;
2706
2707         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2708         i915_disable_pipestat(dev_priv, pipe,
2709                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2710         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2711 }
2712
2713 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2714 {
2715         struct drm_i915_private *dev_priv = dev->dev_private;
2716         unsigned long irqflags;
2717
2718         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2719         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2720         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2721         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2722         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2723 }
2724
2725 static bool
2726 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2727 {
2728         return (list_empty(&ring->request_list) ||
2729                 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2730 }
2731
2732 static bool
2733 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2734 {
2735         if (INTEL_INFO(dev)->gen >= 8) {
2736                 return (ipehr >> 23) == 0x1c;
2737         } else {
2738                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2739                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2740                                  MI_SEMAPHORE_REGISTER);
2741         }
2742 }
2743
2744 static struct intel_engine_cs *
2745 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2746 {
2747         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2748         struct intel_engine_cs *signaller;
2749         int i;
2750
2751         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2752                 for_each_ring(signaller, dev_priv, i) {
2753                         if (ring == signaller)
2754                                 continue;
2755
2756                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
2757                                 return signaller;
2758                 }
2759         } else {
2760                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2761
2762                 for_each_ring(signaller, dev_priv, i) {
2763                         if(ring == signaller)
2764                                 continue;
2765
2766                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2767                                 return signaller;
2768                 }
2769         }
2770
2771         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2772                   ring->id, ipehr, offset);
2773
2774         return NULL;
2775 }
2776
2777 static struct intel_engine_cs *
2778 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2779 {
2780         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2781         u32 cmd, ipehr, head;
2782         u64 offset = 0;
2783         int i, backwards;
2784
2785         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2786         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2787                 return NULL;
2788
2789         /*
2790          * HEAD is likely pointing to the dword after the actual command,
2791          * so scan backwards until we find the MBOX. But limit it to just 3
2792          * or 4 dwords depending on the semaphore wait command size.
2793          * Note that we don't care about ACTHD here since that might
2794          * point at at batch, and semaphores are always emitted into the
2795          * ringbuffer itself.
2796          */
2797         head = I915_READ_HEAD(ring) & HEAD_ADDR;
2798         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2799
2800         for (i = backwards; i; --i) {
2801                 /*
2802                  * Be paranoid and presume the hw has gone off into the wild -
2803                  * our ring is smaller than what the hardware (and hence
2804                  * HEAD_ADDR) allows. Also handles wrap-around.
2805                  */
2806                 head &= ring->buffer->size - 1;
2807
2808                 /* This here seems to blow up */
2809                 cmd = ioread32(ring->buffer->virtual_start + head);
2810                 if (cmd == ipehr)
2811                         break;
2812
2813                 head -= 4;
2814         }
2815
2816         if (!i)
2817                 return NULL;
2818
2819         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2820         if (INTEL_INFO(ring->dev)->gen >= 8) {
2821                 offset = ioread32(ring->buffer->virtual_start + head + 12);
2822                 offset <<= 32;
2823                 offset = ioread32(ring->buffer->virtual_start + head + 8);
2824         }
2825         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2826 }
2827
2828 static int semaphore_passed(struct intel_engine_cs *ring)
2829 {
2830         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2831         struct intel_engine_cs *signaller;
2832         u32 seqno;
2833
2834         ring->hangcheck.deadlock++;
2835
2836         signaller = semaphore_waits_for(ring, &seqno);
2837         if (signaller == NULL)
2838                 return -1;
2839
2840         /* Prevent pathological recursion due to driver bugs */
2841         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2842                 return -1;
2843
2844         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2845                 return 1;
2846
2847         /* cursory check for an unkickable deadlock */
2848         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2849             semaphore_passed(signaller) < 0)
2850                 return -1;
2851
2852         return 0;
2853 }
2854
2855 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2856 {
2857         struct intel_engine_cs *ring;
2858         int i;
2859
2860         for_each_ring(ring, dev_priv, i)
2861                 ring->hangcheck.deadlock = 0;
2862 }
2863
2864 static enum intel_ring_hangcheck_action
2865 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2866 {
2867         struct drm_device *dev = ring->dev;
2868         struct drm_i915_private *dev_priv = dev->dev_private;
2869         u32 tmp;
2870
2871         if (acthd != ring->hangcheck.acthd) {
2872                 if (acthd > ring->hangcheck.max_acthd) {
2873                         ring->hangcheck.max_acthd = acthd;
2874                         return HANGCHECK_ACTIVE;
2875                 }
2876
2877                 return HANGCHECK_ACTIVE_LOOP;
2878         }
2879
2880         if (IS_GEN2(dev))
2881                 return HANGCHECK_HUNG;
2882
2883         /* Is the chip hanging on a WAIT_FOR_EVENT?
2884          * If so we can simply poke the RB_WAIT bit
2885          * and break the hang. This should work on
2886          * all but the second generation chipsets.
2887          */
2888         tmp = I915_READ_CTL(ring);
2889         if (tmp & RING_WAIT) {
2890                 i915_handle_error(dev, false,
2891                                   "Kicking stuck wait on %s",
2892                                   ring->name);
2893                 I915_WRITE_CTL(ring, tmp);
2894                 return HANGCHECK_KICK;
2895         }
2896
2897         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2898                 switch (semaphore_passed(ring)) {
2899                 default:
2900                         return HANGCHECK_HUNG;
2901                 case 1:
2902                         i915_handle_error(dev, false,
2903                                           "Kicking stuck semaphore on %s",
2904                                           ring->name);
2905                         I915_WRITE_CTL(ring, tmp);
2906                         return HANGCHECK_KICK;
2907                 case 0:
2908                         return HANGCHECK_WAIT;
2909                 }
2910         }
2911
2912         return HANGCHECK_HUNG;
2913 }
2914
2915 /*
2916  * This is called when the chip hasn't reported back with completed
2917  * batchbuffers in a long time. We keep track per ring seqno progress and
2918  * if there are no progress, hangcheck score for that ring is increased.
2919  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2920  * we kick the ring. If we see no progress on three subsequent calls
2921  * we assume chip is wedged and try to fix it by resetting the chip.
2922  */
2923 static void i915_hangcheck_elapsed(struct work_struct *work)
2924 {
2925         struct drm_i915_private *dev_priv =
2926                 container_of(work, typeof(*dev_priv),
2927                              gpu_error.hangcheck_work.work);
2928         struct drm_device *dev = dev_priv->dev;
2929         struct intel_engine_cs *ring;
2930         int i;
2931         int busy_count = 0, rings_hung = 0;
2932         bool stuck[I915_NUM_RINGS] = { 0 };
2933 #define BUSY 1
2934 #define KICK 5
2935 #define HUNG 20
2936
2937         if (!i915.enable_hangcheck)
2938                 return;
2939
2940         for_each_ring(ring, dev_priv, i) {
2941                 u64 acthd;
2942                 u32 seqno;
2943                 bool busy = true;
2944
2945                 semaphore_clear_deadlocks(dev_priv);
2946
2947                 seqno = ring->get_seqno(ring, false);
2948                 acthd = intel_ring_get_active_head(ring);
2949
2950                 if (ring->hangcheck.seqno == seqno) {
2951                         if (ring_idle(ring, seqno)) {
2952                                 ring->hangcheck.action = HANGCHECK_IDLE;
2953
2954                                 if (waitqueue_active(&ring->irq_queue)) {
2955                                         /* Issue a wake-up to catch stuck h/w. */
2956                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2957                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2958                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2959                                                                   ring->name);
2960                                                 else
2961                                                         DRM_INFO("Fake missed irq on %s\n",
2962                                                                  ring->name);
2963                                                 wake_up_all(&ring->irq_queue);
2964                                         }
2965                                         /* Safeguard against driver failure */
2966                                         ring->hangcheck.score += BUSY;
2967                                 } else
2968                                         busy = false;
2969                         } else {
2970                                 /* We always increment the hangcheck score
2971                                  * if the ring is busy and still processing
2972                                  * the same request, so that no single request
2973                                  * can run indefinitely (such as a chain of
2974                                  * batches). The only time we do not increment
2975                                  * the hangcheck score on this ring, if this
2976                                  * ring is in a legitimate wait for another
2977                                  * ring. In that case the waiting ring is a
2978                                  * victim and we want to be sure we catch the
2979                                  * right culprit. Then every time we do kick
2980                                  * the ring, add a small increment to the
2981                                  * score so that we can catch a batch that is
2982                                  * being repeatedly kicked and so responsible
2983                                  * for stalling the machine.
2984                                  */
2985                                 ring->hangcheck.action = ring_stuck(ring,
2986                                                                     acthd);
2987
2988                                 switch (ring->hangcheck.action) {
2989                                 case HANGCHECK_IDLE:
2990                                 case HANGCHECK_WAIT:
2991                                 case HANGCHECK_ACTIVE:
2992                                         break;
2993                                 case HANGCHECK_ACTIVE_LOOP:
2994                                         ring->hangcheck.score += BUSY;
2995                                         break;
2996                                 case HANGCHECK_KICK:
2997                                         ring->hangcheck.score += KICK;
2998                                         break;
2999                                 case HANGCHECK_HUNG:
3000                                         ring->hangcheck.score += HUNG;
3001                                         stuck[i] = true;
3002                                         break;
3003                                 }
3004                         }
3005                 } else {
3006                         ring->hangcheck.action = HANGCHECK_ACTIVE;
3007
3008                         /* Gradually reduce the count so that we catch DoS
3009                          * attempts across multiple batches.
3010                          */
3011                         if (ring->hangcheck.score > 0)
3012                                 ring->hangcheck.score--;
3013
3014                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3015                 }
3016
3017                 ring->hangcheck.seqno = seqno;
3018                 ring->hangcheck.acthd = acthd;
3019                 busy_count += busy;
3020         }
3021
3022         for_each_ring(ring, dev_priv, i) {
3023                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3024                         DRM_INFO("%s on %s\n",
3025                                  stuck[i] ? "stuck" : "no progress",
3026                                  ring->name);
3027                         rings_hung++;
3028                 }
3029         }
3030
3031         if (rings_hung)
3032                 return i915_handle_error(dev, true, "Ring hung");
3033
3034         if (busy_count)
3035                 /* Reset timer case chip hangs without another request
3036                  * being added */
3037                 i915_queue_hangcheck(dev);
3038 }
3039
3040 void i915_queue_hangcheck(struct drm_device *dev)
3041 {
3042         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3043
3044         if (!i915.enable_hangcheck)
3045                 return;
3046
3047         /* Don't continually defer the hangcheck so that it is always run at
3048          * least once after work has been scheduled on any ring. Otherwise,
3049          * we will ignore a hung ring if a second ring is kept busy.
3050          */
3051
3052         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3053                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3054 }
3055
3056 static void ibx_irq_reset(struct drm_device *dev)
3057 {
3058         struct drm_i915_private *dev_priv = dev->dev_private;
3059
3060         if (HAS_PCH_NOP(dev))
3061                 return;
3062
3063         GEN5_IRQ_RESET(SDE);
3064
3065         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3066                 I915_WRITE(SERR_INT, 0xffffffff);
3067 }
3068
3069 /*
3070  * SDEIER is also touched by the interrupt handler to work around missed PCH
3071  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3072  * instead we unconditionally enable all PCH interrupt sources here, but then
3073  * only unmask them as needed with SDEIMR.
3074  *
3075  * This function needs to be called before interrupts are enabled.
3076  */
3077 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3078 {
3079         struct drm_i915_private *dev_priv = dev->dev_private;
3080
3081         if (HAS_PCH_NOP(dev))
3082                 return;
3083
3084         WARN_ON(I915_READ(SDEIER) != 0);
3085         I915_WRITE(SDEIER, 0xffffffff);
3086         POSTING_READ(SDEIER);
3087 }
3088
3089 static void gen5_gt_irq_reset(struct drm_device *dev)
3090 {
3091         struct drm_i915_private *dev_priv = dev->dev_private;
3092
3093         GEN5_IRQ_RESET(GT);
3094         if (INTEL_INFO(dev)->gen >= 6)
3095                 GEN5_IRQ_RESET(GEN6_PM);
3096 }
3097
3098 /* drm_dma.h hooks
3099 */
3100 static void ironlake_irq_reset(struct drm_device *dev)
3101 {
3102         struct drm_i915_private *dev_priv = dev->dev_private;
3103
3104         I915_WRITE(HWSTAM, 0xffffffff);
3105
3106         GEN5_IRQ_RESET(DE);
3107         if (IS_GEN7(dev))
3108                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3109
3110         gen5_gt_irq_reset(dev);
3111
3112         ibx_irq_reset(dev);
3113 }
3114
3115 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3116 {
3117         enum pipe pipe;
3118
3119         i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3120         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3121
3122         for_each_pipe(dev_priv, pipe)
3123                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3124
3125         GEN5_IRQ_RESET(VLV_);
3126 }
3127
3128 static void valleyview_irq_preinstall(struct drm_device *dev)
3129 {
3130         struct drm_i915_private *dev_priv = dev->dev_private;
3131
3132         /* VLV magic */
3133         I915_WRITE(VLV_IMR, 0);
3134         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3135         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3136         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3137
3138         gen5_gt_irq_reset(dev);
3139
3140         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3141
3142         vlv_display_irq_reset(dev_priv);
3143 }
3144
3145 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3146 {
3147         GEN8_IRQ_RESET_NDX(GT, 0);
3148         GEN8_IRQ_RESET_NDX(GT, 1);
3149         GEN8_IRQ_RESET_NDX(GT, 2);
3150         GEN8_IRQ_RESET_NDX(GT, 3);
3151 }
3152
3153 static void gen8_irq_reset(struct drm_device *dev)
3154 {
3155         struct drm_i915_private *dev_priv = dev->dev_private;
3156         int pipe;
3157
3158         I915_WRITE(GEN8_MASTER_IRQ, 0);
3159         POSTING_READ(GEN8_MASTER_IRQ);
3160
3161         gen8_gt_irq_reset(dev_priv);
3162
3163         for_each_pipe(dev_priv, pipe)
3164                 if (intel_display_power_is_enabled(dev_priv,
3165                                                    POWER_DOMAIN_PIPE(pipe)))
3166                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3167
3168         GEN5_IRQ_RESET(GEN8_DE_PORT_);
3169         GEN5_IRQ_RESET(GEN8_DE_MISC_);
3170         GEN5_IRQ_RESET(GEN8_PCU_);
3171
3172         if (HAS_PCH_SPLIT(dev))
3173                 ibx_irq_reset(dev);
3174 }
3175
3176 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3177                                      unsigned int pipe_mask)
3178 {
3179         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3180
3181         spin_lock_irq(&dev_priv->irq_lock);
3182         if (pipe_mask & 1 << PIPE_A)
3183                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3184                                   dev_priv->de_irq_mask[PIPE_A],
3185                                   ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3186         if (pipe_mask & 1 << PIPE_B)
3187                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3188                                   dev_priv->de_irq_mask[PIPE_B],
3189                                   ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3190         if (pipe_mask & 1 << PIPE_C)
3191                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3192                                   dev_priv->de_irq_mask[PIPE_C],
3193                                   ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3194         spin_unlock_irq(&dev_priv->irq_lock);
3195 }
3196
3197 static void cherryview_irq_preinstall(struct drm_device *dev)
3198 {
3199         struct drm_i915_private *dev_priv = dev->dev_private;
3200
3201         I915_WRITE(GEN8_MASTER_IRQ, 0);
3202         POSTING_READ(GEN8_MASTER_IRQ);
3203
3204         gen8_gt_irq_reset(dev_priv);
3205
3206         GEN5_IRQ_RESET(GEN8_PCU_);
3207
3208         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3209
3210         vlv_display_irq_reset(dev_priv);
3211 }
3212
3213 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3214                                   const u32 hpd[HPD_NUM_PINS])
3215 {
3216         struct drm_i915_private *dev_priv = to_i915(dev);
3217         struct intel_encoder *encoder;
3218         u32 enabled_irqs = 0;
3219
3220         for_each_intel_encoder(dev, encoder)
3221                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3222                         enabled_irqs |= hpd[encoder->hpd_pin];
3223
3224         return enabled_irqs;
3225 }
3226
3227 static void ibx_hpd_irq_setup(struct drm_device *dev)
3228 {
3229         struct drm_i915_private *dev_priv = dev->dev_private;
3230         u32 hotplug_irqs, hotplug, enabled_irqs;
3231
3232         if (HAS_PCH_IBX(dev)) {
3233                 hotplug_irqs = SDE_HOTPLUG_MASK;
3234                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3235         } else {
3236                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3237                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3238         }
3239
3240         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3241
3242         /*
3243          * Enable digital hotplug on the PCH, and configure the DP short pulse
3244          * duration to 2ms (which is the minimum in the Display Port spec).
3245          * The pulse duration bits are reserved on LPT+.
3246          */
3247         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3248         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3249         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3250         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3251         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3252         /*
3253          * When CPU and PCH are on the same package, port A
3254          * HPD must be enabled in both north and south.
3255          */
3256         if (HAS_PCH_LPT_LP(dev))
3257                 hotplug |= PORTA_HOTPLUG_ENABLE;
3258         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3259 }
3260
3261 static void spt_hpd_irq_setup(struct drm_device *dev)
3262 {
3263         struct drm_i915_private *dev_priv = dev->dev_private;
3264         u32 hotplug_irqs, hotplug, enabled_irqs;
3265
3266         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3267         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3268
3269         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3270
3271         /* Enable digital hotplug on the PCH */
3272         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3273         hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3274                 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3275         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3276
3277         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3278         hotplug |= PORTE_HOTPLUG_ENABLE;
3279         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3280 }
3281
3282 static void ilk_hpd_irq_setup(struct drm_device *dev)
3283 {
3284         struct drm_i915_private *dev_priv = dev->dev_private;
3285         u32 hotplug_irqs, hotplug, enabled_irqs;
3286
3287         if (INTEL_INFO(dev)->gen >= 8) {
3288                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3289                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3290
3291                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3292         } else if (INTEL_INFO(dev)->gen >= 7) {
3293                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3294                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3295
3296                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3297         } else {
3298                 hotplug_irqs = DE_DP_A_HOTPLUG;
3299                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3300
3301                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3302         }
3303
3304         /*
3305          * Enable digital hotplug on the CPU, and configure the DP short pulse
3306          * duration to 2ms (which is the minimum in the Display Port spec)
3307          * The pulse duration bits are reserved on HSW+.
3308          */
3309         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3310         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3311         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3312         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3313
3314         ibx_hpd_irq_setup(dev);
3315 }
3316
3317 static void bxt_hpd_irq_setup(struct drm_device *dev)
3318 {
3319         struct drm_i915_private *dev_priv = dev->dev_private;
3320         u32 hotplug_irqs, hotplug, enabled_irqs;
3321
3322         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3323         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3324
3325         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3326
3327         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3328         hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3329                 PORTA_HOTPLUG_ENABLE;
3330         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3331 }
3332
3333 static void ibx_irq_postinstall(struct drm_device *dev)
3334 {
3335         struct drm_i915_private *dev_priv = dev->dev_private;
3336         u32 mask;
3337
3338         if (HAS_PCH_NOP(dev))
3339                 return;
3340
3341         if (HAS_PCH_IBX(dev))
3342                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3343         else
3344                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3345
3346         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3347         I915_WRITE(SDEIMR, ~mask);
3348 }
3349
3350 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3351 {
3352         struct drm_i915_private *dev_priv = dev->dev_private;
3353         u32 pm_irqs, gt_irqs;
3354
3355         pm_irqs = gt_irqs = 0;
3356
3357         dev_priv->gt_irq_mask = ~0;
3358         if (HAS_L3_DPF(dev)) {
3359                 /* L3 parity interrupt is always unmasked. */
3360                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3361                 gt_irqs |= GT_PARITY_ERROR(dev);
3362         }
3363
3364         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3365         if (IS_GEN5(dev)) {
3366                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3367                            ILK_BSD_USER_INTERRUPT;
3368         } else {
3369                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3370         }
3371
3372         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3373
3374         if (INTEL_INFO(dev)->gen >= 6) {
3375                 /*
3376                  * RPS interrupts will get enabled/disabled on demand when RPS
3377                  * itself is enabled/disabled.
3378                  */
3379                 if (HAS_VEBOX(dev))
3380                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3381
3382                 dev_priv->pm_irq_mask = 0xffffffff;
3383                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3384         }
3385 }
3386
3387 static int ironlake_irq_postinstall(struct drm_device *dev)
3388 {
3389         struct drm_i915_private *dev_priv = dev->dev_private;
3390         u32 display_mask, extra_mask;
3391
3392         if (INTEL_INFO(dev)->gen >= 7) {
3393                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3394                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3395                                 DE_PLANEB_FLIP_DONE_IVB |
3396                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3397                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3398                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3399                               DE_DP_A_HOTPLUG_IVB);
3400         } else {
3401                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3402                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3403                                 DE_AUX_CHANNEL_A |
3404                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3405                                 DE_POISON);
3406                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3407                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3408                               DE_DP_A_HOTPLUG);
3409         }
3410
3411         dev_priv->irq_mask = ~display_mask;
3412
3413         I915_WRITE(HWSTAM, 0xeffe);
3414
3415         ibx_irq_pre_postinstall(dev);
3416
3417         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3418
3419         gen5_gt_irq_postinstall(dev);
3420
3421         ibx_irq_postinstall(dev);
3422
3423         if (IS_IRONLAKE_M(dev)) {
3424                 /* Enable PCU event interrupts
3425                  *
3426                  * spinlocking not required here for correctness since interrupt
3427                  * setup is guaranteed to run in single-threaded context. But we
3428                  * need it to make the assert_spin_locked happy. */
3429                 spin_lock_irq(&dev_priv->irq_lock);
3430                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3431                 spin_unlock_irq(&dev_priv->irq_lock);
3432         }
3433
3434         return 0;
3435 }
3436
3437 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3438 {
3439         u32 pipestat_mask;
3440         u32 iir_mask;
3441         enum pipe pipe;
3442
3443         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3444                         PIPE_FIFO_UNDERRUN_STATUS;
3445
3446         for_each_pipe(dev_priv, pipe)
3447                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3448         POSTING_READ(PIPESTAT(PIPE_A));
3449
3450         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3451                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3452
3453         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3454         for_each_pipe(dev_priv, pipe)
3455                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3456
3457         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3458                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3459                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3460         if (IS_CHERRYVIEW(dev_priv))
3461                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3462         dev_priv->irq_mask &= ~iir_mask;
3463
3464         I915_WRITE(VLV_IIR, iir_mask);
3465         I915_WRITE(VLV_IIR, iir_mask);
3466         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3467         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3468         POSTING_READ(VLV_IMR);
3469 }
3470
3471 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3472 {
3473         u32 pipestat_mask;
3474         u32 iir_mask;
3475         enum pipe pipe;
3476
3477         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3478                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3479                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3480         if (IS_CHERRYVIEW(dev_priv))
3481                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3482
3483         dev_priv->irq_mask |= iir_mask;
3484         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3485         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3486         I915_WRITE(VLV_IIR, iir_mask);
3487         I915_WRITE(VLV_IIR, iir_mask);
3488         POSTING_READ(VLV_IIR);
3489
3490         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3491                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3492
3493         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3494         for_each_pipe(dev_priv, pipe)
3495                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3496
3497         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3498                         PIPE_FIFO_UNDERRUN_STATUS;
3499
3500         for_each_pipe(dev_priv, pipe)
3501                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3502         POSTING_READ(PIPESTAT(PIPE_A));
3503 }
3504
3505 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3506 {
3507         assert_spin_locked(&dev_priv->irq_lock);
3508
3509         if (dev_priv->display_irqs_enabled)
3510                 return;
3511
3512         dev_priv->display_irqs_enabled = true;
3513
3514         if (intel_irqs_enabled(dev_priv))
3515                 valleyview_display_irqs_install(dev_priv);
3516 }
3517
3518 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3519 {
3520         assert_spin_locked(&dev_priv->irq_lock);
3521
3522         if (!dev_priv->display_irqs_enabled)
3523                 return;
3524
3525         dev_priv->display_irqs_enabled = false;
3526
3527         if (intel_irqs_enabled(dev_priv))
3528                 valleyview_display_irqs_uninstall(dev_priv);
3529 }
3530
3531 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3532 {
3533         dev_priv->irq_mask = ~0;
3534
3535         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3536         POSTING_READ(PORT_HOTPLUG_EN);
3537
3538         I915_WRITE(VLV_IIR, 0xffffffff);
3539         I915_WRITE(VLV_IIR, 0xffffffff);
3540         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3541         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3542         POSTING_READ(VLV_IMR);
3543
3544         /* Interrupt setup is already guaranteed to be single-threaded, this is
3545          * just to make the assert_spin_locked check happy. */
3546         spin_lock_irq(&dev_priv->irq_lock);
3547         if (dev_priv->display_irqs_enabled)
3548                 valleyview_display_irqs_install(dev_priv);
3549         spin_unlock_irq(&dev_priv->irq_lock);
3550 }
3551
3552 static int valleyview_irq_postinstall(struct drm_device *dev)
3553 {
3554         struct drm_i915_private *dev_priv = dev->dev_private;
3555
3556         vlv_display_irq_postinstall(dev_priv);
3557
3558         gen5_gt_irq_postinstall(dev);
3559
3560         /* ack & enable invalid PTE error interrupts */
3561 #if 0 /* FIXME: add support to irq handler for checking these bits */
3562         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3563         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3564 #endif
3565
3566         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3567
3568         return 0;
3569 }
3570
3571 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3572 {
3573         /* These are interrupts we'll toggle with the ring mask register */
3574         uint32_t gt_interrupts[] = {
3575                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3576                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3577                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3578                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3579                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3580                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3581                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3582                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3583                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3584                 0,
3585                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3586                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3587                 };
3588
3589         dev_priv->pm_irq_mask = 0xffffffff;
3590         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3591         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3592         /*
3593          * RPS interrupts will get enabled/disabled on demand when RPS itself
3594          * is enabled/disabled.
3595          */
3596         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3597         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3598 }
3599
3600 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3601 {
3602         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3603         uint32_t de_pipe_enables;
3604         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3605         u32 de_port_enables;
3606         enum pipe pipe;
3607
3608         if (INTEL_INFO(dev_priv)->gen >= 9) {
3609                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3610                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3611                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3612                                   GEN9_AUX_CHANNEL_D;
3613                 if (IS_BROXTON(dev_priv))
3614                         de_port_masked |= BXT_DE_PORT_GMBUS;
3615         } else {
3616                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3617                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3618         }
3619
3620         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3621                                            GEN8_PIPE_FIFO_UNDERRUN;
3622
3623         de_port_enables = de_port_masked;
3624         if (IS_BROXTON(dev_priv))
3625                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3626         else if (IS_BROADWELL(dev_priv))
3627                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3628
3629         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3630         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3631         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3632
3633         for_each_pipe(dev_priv, pipe)
3634                 if (intel_display_power_is_enabled(dev_priv,
3635                                 POWER_DOMAIN_PIPE(pipe)))
3636                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3637                                           dev_priv->de_irq_mask[pipe],
3638                                           de_pipe_enables);
3639
3640         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3641 }
3642
3643 static int gen8_irq_postinstall(struct drm_device *dev)
3644 {
3645         struct drm_i915_private *dev_priv = dev->dev_private;
3646
3647         if (HAS_PCH_SPLIT(dev))
3648                 ibx_irq_pre_postinstall(dev);
3649
3650         gen8_gt_irq_postinstall(dev_priv);
3651         gen8_de_irq_postinstall(dev_priv);
3652
3653         if (HAS_PCH_SPLIT(dev))
3654                 ibx_irq_postinstall(dev);
3655
3656         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3657         POSTING_READ(GEN8_MASTER_IRQ);
3658
3659         return 0;
3660 }
3661
3662 static int cherryview_irq_postinstall(struct drm_device *dev)
3663 {
3664         struct drm_i915_private *dev_priv = dev->dev_private;
3665
3666         vlv_display_irq_postinstall(dev_priv);
3667
3668         gen8_gt_irq_postinstall(dev_priv);
3669
3670         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3671         POSTING_READ(GEN8_MASTER_IRQ);
3672
3673         return 0;
3674 }
3675
3676 static void gen8_irq_uninstall(struct drm_device *dev)
3677 {
3678         struct drm_i915_private *dev_priv = dev->dev_private;
3679
3680         if (!dev_priv)
3681                 return;
3682
3683         gen8_irq_reset(dev);
3684 }
3685
3686 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3687 {
3688         /* Interrupt setup is already guaranteed to be single-threaded, this is
3689          * just to make the assert_spin_locked check happy. */
3690         spin_lock_irq(&dev_priv->irq_lock);
3691         if (dev_priv->display_irqs_enabled)
3692                 valleyview_display_irqs_uninstall(dev_priv);
3693         spin_unlock_irq(&dev_priv->irq_lock);
3694
3695         vlv_display_irq_reset(dev_priv);
3696
3697         dev_priv->irq_mask = ~0;
3698 }
3699
3700 static void valleyview_irq_uninstall(struct drm_device *dev)
3701 {
3702         struct drm_i915_private *dev_priv = dev->dev_private;
3703
3704         if (!dev_priv)
3705                 return;
3706
3707         I915_WRITE(VLV_MASTER_IER, 0);
3708
3709         gen5_gt_irq_reset(dev);
3710
3711         I915_WRITE(HWSTAM, 0xffffffff);
3712
3713         vlv_display_irq_uninstall(dev_priv);
3714 }
3715
3716 static void cherryview_irq_uninstall(struct drm_device *dev)
3717 {
3718         struct drm_i915_private *dev_priv = dev->dev_private;
3719
3720         if (!dev_priv)
3721                 return;
3722
3723         I915_WRITE(GEN8_MASTER_IRQ, 0);
3724         POSTING_READ(GEN8_MASTER_IRQ);
3725
3726         gen8_gt_irq_reset(dev_priv);
3727
3728         GEN5_IRQ_RESET(GEN8_PCU_);
3729
3730         vlv_display_irq_uninstall(dev_priv);
3731 }
3732
3733 static void ironlake_irq_uninstall(struct drm_device *dev)
3734 {
3735         struct drm_i915_private *dev_priv = dev->dev_private;
3736
3737         if (!dev_priv)
3738                 return;
3739
3740         ironlake_irq_reset(dev);
3741 }
3742
3743 static void i8xx_irq_preinstall(struct drm_device * dev)
3744 {
3745         struct drm_i915_private *dev_priv = dev->dev_private;
3746         int pipe;
3747
3748         for_each_pipe(dev_priv, pipe)
3749                 I915_WRITE(PIPESTAT(pipe), 0);
3750         I915_WRITE16(IMR, 0xffff);
3751         I915_WRITE16(IER, 0x0);
3752         POSTING_READ16(IER);
3753 }
3754
3755 static int i8xx_irq_postinstall(struct drm_device *dev)
3756 {
3757         struct drm_i915_private *dev_priv = dev->dev_private;
3758
3759         I915_WRITE16(EMR,
3760                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3761
3762         /* Unmask the interrupts that we always want on. */
3763         dev_priv->irq_mask =
3764                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3765                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3766                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3767                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3768         I915_WRITE16(IMR, dev_priv->irq_mask);
3769
3770         I915_WRITE16(IER,
3771                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3772                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3773                      I915_USER_INTERRUPT);
3774         POSTING_READ16(IER);
3775
3776         /* Interrupt setup is already guaranteed to be single-threaded, this is
3777          * just to make the assert_spin_locked check happy. */
3778         spin_lock_irq(&dev_priv->irq_lock);
3779         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3780         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3781         spin_unlock_irq(&dev_priv->irq_lock);
3782
3783         return 0;
3784 }
3785
3786 /*
3787  * Returns true when a page flip has completed.
3788  */
3789 static bool i8xx_handle_vblank(struct drm_device *dev,
3790                                int plane, int pipe, u32 iir)
3791 {
3792         struct drm_i915_private *dev_priv = dev->dev_private;
3793         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3794
3795         if (!intel_pipe_handle_vblank(dev, pipe))
3796                 return false;
3797
3798         if ((iir & flip_pending) == 0)
3799                 goto check_page_flip;
3800
3801         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3802          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3803          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3804          * the flip is completed (no longer pending). Since this doesn't raise
3805          * an interrupt per se, we watch for the change at vblank.
3806          */
3807         if (I915_READ16(ISR) & flip_pending)
3808                 goto check_page_flip;
3809
3810         intel_prepare_page_flip(dev, plane);
3811         intel_finish_page_flip(dev, pipe);
3812         return true;
3813
3814 check_page_flip:
3815         intel_check_page_flip(dev, pipe);
3816         return false;
3817 }
3818
3819 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3820 {
3821         struct drm_device *dev = arg;
3822         struct drm_i915_private *dev_priv = dev->dev_private;
3823         u16 iir, new_iir;
3824         u32 pipe_stats[2];
3825         int pipe;
3826         u16 flip_mask =
3827                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3828                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3829
3830         if (!intel_irqs_enabled(dev_priv))
3831                 return IRQ_NONE;
3832
3833         iir = I915_READ16(IIR);
3834         if (iir == 0)
3835                 return IRQ_NONE;
3836
3837         while (iir & ~flip_mask) {
3838                 /* Can't rely on pipestat interrupt bit in iir as it might
3839                  * have been cleared after the pipestat interrupt was received.
3840                  * It doesn't set the bit in iir again, but it still produces
3841                  * interrupts (for non-MSI).
3842                  */
3843                 spin_lock(&dev_priv->irq_lock);
3844                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3845                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3846
3847                 for_each_pipe(dev_priv, pipe) {
3848                         int reg = PIPESTAT(pipe);
3849                         pipe_stats[pipe] = I915_READ(reg);
3850
3851                         /*
3852                          * Clear the PIPE*STAT regs before the IIR
3853                          */
3854                         if (pipe_stats[pipe] & 0x8000ffff)
3855                                 I915_WRITE(reg, pipe_stats[pipe]);
3856                 }
3857                 spin_unlock(&dev_priv->irq_lock);
3858
3859                 I915_WRITE16(IIR, iir & ~flip_mask);
3860                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3861
3862                 if (iir & I915_USER_INTERRUPT)
3863                         notify_ring(&dev_priv->ring[RCS]);
3864
3865                 for_each_pipe(dev_priv, pipe) {
3866                         int plane = pipe;
3867                         if (HAS_FBC(dev))
3868                                 plane = !plane;
3869
3870                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3871                             i8xx_handle_vblank(dev, plane, pipe, iir))
3872                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3873
3874                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3875                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3876
3877                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3878                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3879                                                                     pipe);
3880                 }
3881
3882                 iir = new_iir;
3883         }
3884
3885         return IRQ_HANDLED;
3886 }
3887
3888 static void i8xx_irq_uninstall(struct drm_device * dev)
3889 {
3890         struct drm_i915_private *dev_priv = dev->dev_private;
3891         int pipe;
3892
3893         for_each_pipe(dev_priv, pipe) {
3894                 /* Clear enable bits; then clear status bits */
3895                 I915_WRITE(PIPESTAT(pipe), 0);
3896                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3897         }
3898         I915_WRITE16(IMR, 0xffff);
3899         I915_WRITE16(IER, 0x0);
3900         I915_WRITE16(IIR, I915_READ16(IIR));
3901 }
3902
3903 static void i915_irq_preinstall(struct drm_device * dev)
3904 {
3905         struct drm_i915_private *dev_priv = dev->dev_private;
3906         int pipe;
3907
3908         if (I915_HAS_HOTPLUG(dev)) {
3909                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3910                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3911         }
3912
3913         I915_WRITE16(HWSTAM, 0xeffe);
3914         for_each_pipe(dev_priv, pipe)
3915                 I915_WRITE(PIPESTAT(pipe), 0);
3916         I915_WRITE(IMR, 0xffffffff);
3917         I915_WRITE(IER, 0x0);
3918         POSTING_READ(IER);
3919 }
3920
3921 static int i915_irq_postinstall(struct drm_device *dev)
3922 {
3923         struct drm_i915_private *dev_priv = dev->dev_private;
3924         u32 enable_mask;
3925
3926         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3927
3928         /* Unmask the interrupts that we always want on. */
3929         dev_priv->irq_mask =
3930                 ~(I915_ASLE_INTERRUPT |
3931                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3932                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3933                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3934                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3935
3936         enable_mask =
3937                 I915_ASLE_INTERRUPT |
3938                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3939                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3940                 I915_USER_INTERRUPT;
3941
3942         if (I915_HAS_HOTPLUG(dev)) {
3943                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3944                 POSTING_READ(PORT_HOTPLUG_EN);
3945
3946                 /* Enable in IER... */
3947                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3948                 /* and unmask in IMR */
3949                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3950         }
3951
3952         I915_WRITE(IMR, dev_priv->irq_mask);
3953         I915_WRITE(IER, enable_mask);
3954         POSTING_READ(IER);
3955
3956         i915_enable_asle_pipestat(dev);
3957
3958         /* Interrupt setup is already guaranteed to be single-threaded, this is
3959          * just to make the assert_spin_locked check happy. */
3960         spin_lock_irq(&dev_priv->irq_lock);
3961         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3962         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3963         spin_unlock_irq(&dev_priv->irq_lock);
3964
3965         return 0;
3966 }
3967
3968 /*
3969  * Returns true when a page flip has completed.
3970  */
3971 static bool i915_handle_vblank(struct drm_device *dev,
3972                                int plane, int pipe, u32 iir)
3973 {
3974         struct drm_i915_private *dev_priv = dev->dev_private;
3975         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3976
3977         if (!intel_pipe_handle_vblank(dev, pipe))
3978                 return false;
3979
3980         if ((iir & flip_pending) == 0)
3981                 goto check_page_flip;
3982
3983         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3984          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3985          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3986          * the flip is completed (no longer pending). Since this doesn't raise
3987          * an interrupt per se, we watch for the change at vblank.
3988          */
3989         if (I915_READ(ISR) & flip_pending)
3990                 goto check_page_flip;
3991
3992         intel_prepare_page_flip(dev, plane);
3993         intel_finish_page_flip(dev, pipe);
3994         return true;
3995
3996 check_page_flip:
3997         intel_check_page_flip(dev, pipe);
3998         return false;
3999 }
4000
4001 static irqreturn_t i915_irq_handler(int irq, void *arg)
4002 {
4003         struct drm_device *dev = arg;
4004         struct drm_i915_private *dev_priv = dev->dev_private;
4005         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4006         u32 flip_mask =
4007                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4008                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4009         int pipe, ret = IRQ_NONE;
4010
4011         if (!intel_irqs_enabled(dev_priv))
4012                 return IRQ_NONE;
4013
4014         iir = I915_READ(IIR);
4015         do {
4016                 bool irq_received = (iir & ~flip_mask) != 0;
4017                 bool blc_event = false;
4018
4019                 /* Can't rely on pipestat interrupt bit in iir as it might
4020                  * have been cleared after the pipestat interrupt was received.
4021                  * It doesn't set the bit in iir again, but it still produces
4022                  * interrupts (for non-MSI).
4023                  */
4024                 spin_lock(&dev_priv->irq_lock);
4025                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4026                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4027
4028                 for_each_pipe(dev_priv, pipe) {
4029                         int reg = PIPESTAT(pipe);
4030                         pipe_stats[pipe] = I915_READ(reg);
4031
4032                         /* Clear the PIPE*STAT regs before the IIR */
4033                         if (pipe_stats[pipe] & 0x8000ffff) {
4034                                 I915_WRITE(reg, pipe_stats[pipe]);
4035                                 irq_received = true;
4036                         }
4037                 }
4038                 spin_unlock(&dev_priv->irq_lock);
4039
4040                 if (!irq_received)
4041                         break;
4042
4043                 /* Consume port.  Then clear IIR or we'll miss events */
4044                 if (I915_HAS_HOTPLUG(dev) &&
4045                     iir & I915_DISPLAY_PORT_INTERRUPT)
4046                         i9xx_hpd_irq_handler(dev);
4047
4048                 I915_WRITE(IIR, iir & ~flip_mask);
4049                 new_iir = I915_READ(IIR); /* Flush posted writes */
4050
4051                 if (iir & I915_USER_INTERRUPT)
4052                         notify_ring(&dev_priv->ring[RCS]);
4053
4054                 for_each_pipe(dev_priv, pipe) {
4055                         int plane = pipe;
4056                         if (HAS_FBC(dev))
4057                                 plane = !plane;
4058
4059                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4060                             i915_handle_vblank(dev, plane, pipe, iir))
4061                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4062
4063                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4064                                 blc_event = true;
4065
4066                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4067                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4068
4069                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4070                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4071                                                                     pipe);
4072                 }
4073
4074                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4075                         intel_opregion_asle_intr(dev);
4076
4077                 /* With MSI, interrupts are only generated when iir
4078                  * transitions from zero to nonzero.  If another bit got
4079                  * set while we were handling the existing iir bits, then
4080                  * we would never get another interrupt.
4081                  *
4082                  * This is fine on non-MSI as well, as if we hit this path
4083                  * we avoid exiting the interrupt handler only to generate
4084                  * another one.
4085                  *
4086                  * Note that for MSI this could cause a stray interrupt report
4087                  * if an interrupt landed in the time between writing IIR and
4088                  * the posting read.  This should be rare enough to never
4089                  * trigger the 99% of 100,000 interrupts test for disabling
4090                  * stray interrupts.
4091                  */
4092                 ret = IRQ_HANDLED;
4093                 iir = new_iir;
4094         } while (iir & ~flip_mask);
4095
4096         return ret;
4097 }
4098
4099 static void i915_irq_uninstall(struct drm_device * dev)
4100 {
4101         struct drm_i915_private *dev_priv = dev->dev_private;
4102         int pipe;
4103
4104         if (I915_HAS_HOTPLUG(dev)) {
4105                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4106                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4107         }
4108
4109         I915_WRITE16(HWSTAM, 0xffff);
4110         for_each_pipe(dev_priv, pipe) {
4111                 /* Clear enable bits; then clear status bits */
4112                 I915_WRITE(PIPESTAT(pipe), 0);
4113                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4114         }
4115         I915_WRITE(IMR, 0xffffffff);
4116         I915_WRITE(IER, 0x0);
4117
4118         I915_WRITE(IIR, I915_READ(IIR));
4119 }
4120
4121 static void i965_irq_preinstall(struct drm_device * dev)
4122 {
4123         struct drm_i915_private *dev_priv = dev->dev_private;
4124         int pipe;
4125
4126         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4127         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4128
4129         I915_WRITE(HWSTAM, 0xeffe);
4130         for_each_pipe(dev_priv, pipe)
4131                 I915_WRITE(PIPESTAT(pipe), 0);
4132         I915_WRITE(IMR, 0xffffffff);
4133         I915_WRITE(IER, 0x0);
4134         POSTING_READ(IER);
4135 }
4136
4137 static int i965_irq_postinstall(struct drm_device *dev)
4138 {
4139         struct drm_i915_private *dev_priv = dev->dev_private;
4140         u32 enable_mask;
4141         u32 error_mask;
4142
4143         /* Unmask the interrupts that we always want on. */
4144         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4145                                I915_DISPLAY_PORT_INTERRUPT |
4146                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4147                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4148                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4149                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4150                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4151
4152         enable_mask = ~dev_priv->irq_mask;
4153         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4154                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4155         enable_mask |= I915_USER_INTERRUPT;
4156
4157         if (IS_G4X(dev))
4158                 enable_mask |= I915_BSD_USER_INTERRUPT;
4159
4160         /* Interrupt setup is already guaranteed to be single-threaded, this is
4161          * just to make the assert_spin_locked check happy. */
4162         spin_lock_irq(&dev_priv->irq_lock);
4163         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4164         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4165         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4166         spin_unlock_irq(&dev_priv->irq_lock);
4167
4168         /*
4169          * Enable some error detection, note the instruction error mask
4170          * bit is reserved, so we leave it masked.
4171          */
4172         if (IS_G4X(dev)) {
4173                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4174                                GM45_ERROR_MEM_PRIV |
4175                                GM45_ERROR_CP_PRIV |
4176                                I915_ERROR_MEMORY_REFRESH);
4177         } else {
4178                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4179                                I915_ERROR_MEMORY_REFRESH);
4180         }
4181         I915_WRITE(EMR, error_mask);
4182
4183         I915_WRITE(IMR, dev_priv->irq_mask);
4184         I915_WRITE(IER, enable_mask);
4185         POSTING_READ(IER);
4186
4187         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4188         POSTING_READ(PORT_HOTPLUG_EN);
4189
4190         i915_enable_asle_pipestat(dev);
4191
4192         return 0;
4193 }
4194
4195 static void i915_hpd_irq_setup(struct drm_device *dev)
4196 {
4197         struct drm_i915_private *dev_priv = dev->dev_private;
4198         u32 hotplug_en;
4199
4200         assert_spin_locked(&dev_priv->irq_lock);
4201
4202         /* Note HDMI and DP share hotplug bits */
4203         /* enable bits are the same for all generations */
4204         hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4205         /* Programming the CRT detection parameters tends
4206            to generate a spurious hotplug event about three
4207            seconds later.  So just do it once.
4208         */
4209         if (IS_G4X(dev))
4210                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4211         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4212
4213         /* Ignore TV since it's buggy */
4214         i915_hotplug_interrupt_update_locked(dev_priv,
4215                                       (HOTPLUG_INT_EN_MASK
4216                                        | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK),
4217                                       hotplug_en);
4218 }
4219
4220 static irqreturn_t i965_irq_handler(int irq, void *arg)
4221 {
4222         struct drm_device *dev = arg;
4223         struct drm_i915_private *dev_priv = dev->dev_private;
4224         u32 iir, new_iir;
4225         u32 pipe_stats[I915_MAX_PIPES];
4226         int ret = IRQ_NONE, pipe;
4227         u32 flip_mask =
4228                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4229                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4230
4231         if (!intel_irqs_enabled(dev_priv))
4232                 return IRQ_NONE;
4233
4234         iir = I915_READ(IIR);
4235
4236         for (;;) {
4237                 bool irq_received = (iir & ~flip_mask) != 0;
4238                 bool blc_event = false;
4239
4240                 /* Can't rely on pipestat interrupt bit in iir as it might
4241                  * have been cleared after the pipestat interrupt was received.
4242                  * It doesn't set the bit in iir again, but it still produces
4243                  * interrupts (for non-MSI).
4244                  */
4245                 spin_lock(&dev_priv->irq_lock);
4246                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4247                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4248
4249                 for_each_pipe(dev_priv, pipe) {
4250                         int reg = PIPESTAT(pipe);
4251                         pipe_stats[pipe] = I915_READ(reg);
4252
4253                         /*
4254                          * Clear the PIPE*STAT regs before the IIR
4255                          */
4256                         if (pipe_stats[pipe] & 0x8000ffff) {
4257                                 I915_WRITE(reg, pipe_stats[pipe]);
4258                                 irq_received = true;
4259                         }
4260                 }
4261                 spin_unlock(&dev_priv->irq_lock);
4262
4263                 if (!irq_received)
4264                         break;
4265
4266                 ret = IRQ_HANDLED;
4267
4268                 /* Consume port.  Then clear IIR or we'll miss events */
4269                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4270                         i9xx_hpd_irq_handler(dev);
4271
4272                 I915_WRITE(IIR, iir & ~flip_mask);
4273                 new_iir = I915_READ(IIR); /* Flush posted writes */
4274
4275                 if (iir & I915_USER_INTERRUPT)
4276                         notify_ring(&dev_priv->ring[RCS]);
4277                 if (iir & I915_BSD_USER_INTERRUPT)
4278                         notify_ring(&dev_priv->ring[VCS]);
4279
4280                 for_each_pipe(dev_priv, pipe) {
4281                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4282                             i915_handle_vblank(dev, pipe, pipe, iir))
4283                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4284
4285                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4286                                 blc_event = true;
4287
4288                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4289                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4290
4291                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4292                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4293                 }
4294
4295                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4296                         intel_opregion_asle_intr(dev);
4297
4298                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4299                         gmbus_irq_handler(dev);
4300
4301                 /* With MSI, interrupts are only generated when iir
4302                  * transitions from zero to nonzero.  If another bit got
4303                  * set while we were handling the existing iir bits, then
4304                  * we would never get another interrupt.
4305                  *
4306                  * This is fine on non-MSI as well, as if we hit this path
4307                  * we avoid exiting the interrupt handler only to generate
4308                  * another one.
4309                  *
4310                  * Note that for MSI this could cause a stray interrupt report
4311                  * if an interrupt landed in the time between writing IIR and
4312                  * the posting read.  This should be rare enough to never
4313                  * trigger the 99% of 100,000 interrupts test for disabling
4314                  * stray interrupts.
4315                  */
4316                 iir = new_iir;
4317         }
4318
4319         return ret;
4320 }
4321
4322 static void i965_irq_uninstall(struct drm_device * dev)
4323 {
4324         struct drm_i915_private *dev_priv = dev->dev_private;
4325         int pipe;
4326
4327         if (!dev_priv)
4328                 return;
4329
4330         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4331         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4332
4333         I915_WRITE(HWSTAM, 0xffffffff);
4334         for_each_pipe(dev_priv, pipe)
4335                 I915_WRITE(PIPESTAT(pipe), 0);
4336         I915_WRITE(IMR, 0xffffffff);
4337         I915_WRITE(IER, 0x0);
4338
4339         for_each_pipe(dev_priv, pipe)
4340                 I915_WRITE(PIPESTAT(pipe),
4341                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4342         I915_WRITE(IIR, I915_READ(IIR));
4343 }
4344
4345 /**
4346  * intel_irq_init - initializes irq support
4347  * @dev_priv: i915 device instance
4348  *
4349  * This function initializes all the irq support including work items, timers
4350  * and all the vtables. It does not setup the interrupt itself though.
4351  */
4352 void intel_irq_init(struct drm_i915_private *dev_priv)
4353 {
4354         struct drm_device *dev = dev_priv->dev;
4355
4356         intel_hpd_init_work(dev_priv);
4357
4358         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4359         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4360
4361         /* Let's track the enabled rps events */
4362         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4363                 /* WaGsvRC0ResidencyMethod:vlv */
4364                 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4365         else
4366                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4367
4368         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4369                           i915_hangcheck_elapsed);
4370
4371         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4372
4373         if (IS_GEN2(dev_priv)) {
4374                 dev->max_vblank_count = 0;
4375                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4376         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4377                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4378                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4379         } else {
4380                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4381                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4382         }
4383
4384         /*
4385          * Opt out of the vblank disable timer on everything except gen2.
4386          * Gen2 doesn't have a hardware frame counter and so depends on
4387          * vblank interrupts to produce sane vblank seuquence numbers.
4388          */
4389         if (!IS_GEN2(dev_priv))
4390                 dev->vblank_disable_immediate = true;
4391
4392         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4393         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4394
4395         if (IS_CHERRYVIEW(dev_priv)) {
4396                 dev->driver->irq_handler = cherryview_irq_handler;
4397                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4398                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4399                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4400                 dev->driver->enable_vblank = valleyview_enable_vblank;
4401                 dev->driver->disable_vblank = valleyview_disable_vblank;
4402                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4403         } else if (IS_VALLEYVIEW(dev_priv)) {
4404                 dev->driver->irq_handler = valleyview_irq_handler;
4405                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4406                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4407                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4408                 dev->driver->enable_vblank = valleyview_enable_vblank;
4409                 dev->driver->disable_vblank = valleyview_disable_vblank;
4410                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4411         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4412                 dev->driver->irq_handler = gen8_irq_handler;
4413                 dev->driver->irq_preinstall = gen8_irq_reset;
4414                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4415                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4416                 dev->driver->enable_vblank = gen8_enable_vblank;
4417                 dev->driver->disable_vblank = gen8_disable_vblank;
4418                 if (IS_BROXTON(dev))
4419                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4420                 else if (HAS_PCH_SPT(dev))
4421                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4422                 else
4423                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4424         } else if (HAS_PCH_SPLIT(dev)) {
4425                 dev->driver->irq_handler = ironlake_irq_handler;
4426                 dev->driver->irq_preinstall = ironlake_irq_reset;
4427                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4428                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4429                 dev->driver->enable_vblank = ironlake_enable_vblank;
4430                 dev->driver->disable_vblank = ironlake_disable_vblank;
4431                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4432         } else {
4433                 if (INTEL_INFO(dev_priv)->gen == 2) {
4434                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4435                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4436                         dev->driver->irq_handler = i8xx_irq_handler;
4437                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4438                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4439                         dev->driver->irq_preinstall = i915_irq_preinstall;
4440                         dev->driver->irq_postinstall = i915_irq_postinstall;
4441                         dev->driver->irq_uninstall = i915_irq_uninstall;
4442                         dev->driver->irq_handler = i915_irq_handler;
4443                 } else {
4444                         dev->driver->irq_preinstall = i965_irq_preinstall;
4445                         dev->driver->irq_postinstall = i965_irq_postinstall;
4446                         dev->driver->irq_uninstall = i965_irq_uninstall;
4447                         dev->driver->irq_handler = i965_irq_handler;
4448                 }
4449                 if (I915_HAS_HOTPLUG(dev_priv))
4450                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4451                 dev->driver->enable_vblank = i915_enable_vblank;
4452                 dev->driver->disable_vblank = i915_disable_vblank;
4453         }
4454 }
4455
4456 /**
4457  * intel_irq_install - enables the hardware interrupt
4458  * @dev_priv: i915 device instance
4459  *
4460  * This function enables the hardware interrupt handling, but leaves the hotplug
4461  * handling still disabled. It is called after intel_irq_init().
4462  *
4463  * In the driver load and resume code we need working interrupts in a few places
4464  * but don't want to deal with the hassle of concurrent probe and hotplug
4465  * workers. Hence the split into this two-stage approach.
4466  */
4467 int intel_irq_install(struct drm_i915_private *dev_priv)
4468 {
4469         /*
4470          * We enable some interrupt sources in our postinstall hooks, so mark
4471          * interrupts as enabled _before_ actually enabling them to avoid
4472          * special cases in our ordering checks.
4473          */
4474         dev_priv->pm.irqs_enabled = true;
4475
4476         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4477 }
4478
4479 /**
4480  * intel_irq_uninstall - finilizes all irq handling
4481  * @dev_priv: i915 device instance
4482  *
4483  * This stops interrupt and hotplug handling and unregisters and frees all
4484  * resources acquired in the init functions.
4485  */
4486 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4487 {
4488         drm_irq_uninstall(dev_priv->dev);
4489         intel_hpd_cancel_work(dev_priv);
4490         dev_priv->pm.irqs_enabled = false;
4491 }
4492
4493 /**
4494  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4495  * @dev_priv: i915 device instance
4496  *
4497  * This function is used to disable interrupts at runtime, both in the runtime
4498  * pm and the system suspend/resume code.
4499  */
4500 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4501 {
4502         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4503         dev_priv->pm.irqs_enabled = false;
4504         synchronize_irq(dev_priv->dev->irq);
4505 }
4506
4507 /**
4508  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4509  * @dev_priv: i915 device instance
4510  *
4511  * This function is used to enable interrupts at runtime, both in the runtime
4512  * pm and the system suspend/resume code.
4513  */
4514 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4515 {
4516         dev_priv->pm.irqs_enabled = true;
4517         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4518         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4519 }