Merge tag 'topic/drm-misc-2015-09-25' of git://anongit.freedesktop.org/drm-intel...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49         [HPD_CRT] = SDE_CRT_HOTPLUG,
50         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_spt[HPD_NUM_PINS] = {
65         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
66         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
67         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
68         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
69 };
70
71 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
72         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
73         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
74         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
75         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
76         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
77         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
78 };
79
80 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
81         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
82         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
83         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
84         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
85         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
86         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
87 };
88
89 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
90         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
91         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
92         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
93         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
94         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
95         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
96 };
97
98 /* BXT hpd list */
99 static const u32 hpd_bxt[HPD_NUM_PINS] = {
100         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
101         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
102 };
103
104 /* IIR can theoretically queue up two events. Be paranoid. */
105 #define GEN8_IRQ_RESET_NDX(type, which) do { \
106         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
107         POSTING_READ(GEN8_##type##_IMR(which)); \
108         I915_WRITE(GEN8_##type##_IER(which), 0); \
109         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
110         POSTING_READ(GEN8_##type##_IIR(which)); \
111         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
112         POSTING_READ(GEN8_##type##_IIR(which)); \
113 } while (0)
114
115 #define GEN5_IRQ_RESET(type) do { \
116         I915_WRITE(type##IMR, 0xffffffff); \
117         POSTING_READ(type##IMR); \
118         I915_WRITE(type##IER, 0); \
119         I915_WRITE(type##IIR, 0xffffffff); \
120         POSTING_READ(type##IIR); \
121         I915_WRITE(type##IIR, 0xffffffff); \
122         POSTING_READ(type##IIR); \
123 } while (0)
124
125 /*
126  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
127  */
128 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
129         u32 val = I915_READ(reg); \
130         if (val) { \
131                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
132                      (reg), val); \
133                 I915_WRITE((reg), 0xffffffff); \
134                 POSTING_READ(reg); \
135                 I915_WRITE((reg), 0xffffffff); \
136                 POSTING_READ(reg); \
137         } \
138 } while (0)
139
140 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
141         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
142         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
143         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
144         POSTING_READ(GEN8_##type##_IMR(which)); \
145 } while (0)
146
147 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
148         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
149         I915_WRITE(type##IER, (ier_val)); \
150         I915_WRITE(type##IMR, (imr_val)); \
151         POSTING_READ(type##IMR); \
152 } while (0)
153
154 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
155
156 /* For display hotplug interrupt */
157 void
158 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
159 {
160         assert_spin_locked(&dev_priv->irq_lock);
161
162         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
163                 return;
164
165         if ((dev_priv->irq_mask & mask) != 0) {
166                 dev_priv->irq_mask &= ~mask;
167                 I915_WRITE(DEIMR, dev_priv->irq_mask);
168                 POSTING_READ(DEIMR);
169         }
170 }
171
172 void
173 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
174 {
175         assert_spin_locked(&dev_priv->irq_lock);
176
177         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
178                 return;
179
180         if ((dev_priv->irq_mask & mask) != mask) {
181                 dev_priv->irq_mask |= mask;
182                 I915_WRITE(DEIMR, dev_priv->irq_mask);
183                 POSTING_READ(DEIMR);
184         }
185 }
186
187 /**
188  * ilk_update_gt_irq - update GTIMR
189  * @dev_priv: driver private
190  * @interrupt_mask: mask of interrupt bits to update
191  * @enabled_irq_mask: mask of interrupt bits to enable
192  */
193 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
194                               uint32_t interrupt_mask,
195                               uint32_t enabled_irq_mask)
196 {
197         assert_spin_locked(&dev_priv->irq_lock);
198
199         WARN_ON(enabled_irq_mask & ~interrupt_mask);
200
201         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
202                 return;
203
204         dev_priv->gt_irq_mask &= ~interrupt_mask;
205         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
206         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
207         POSTING_READ(GTIMR);
208 }
209
210 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
211 {
212         ilk_update_gt_irq(dev_priv, mask, mask);
213 }
214
215 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
216 {
217         ilk_update_gt_irq(dev_priv, mask, 0);
218 }
219
220 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
221 {
222         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
223 }
224
225 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
226 {
227         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
228 }
229
230 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
231 {
232         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
233 }
234
235 /**
236   * snb_update_pm_irq - update GEN6_PMIMR
237   * @dev_priv: driver private
238   * @interrupt_mask: mask of interrupt bits to update
239   * @enabled_irq_mask: mask of interrupt bits to enable
240   */
241 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
242                               uint32_t interrupt_mask,
243                               uint32_t enabled_irq_mask)
244 {
245         uint32_t new_val;
246
247         WARN_ON(enabled_irq_mask & ~interrupt_mask);
248
249         assert_spin_locked(&dev_priv->irq_lock);
250
251         new_val = dev_priv->pm_irq_mask;
252         new_val &= ~interrupt_mask;
253         new_val |= (~enabled_irq_mask & interrupt_mask);
254
255         if (new_val != dev_priv->pm_irq_mask) {
256                 dev_priv->pm_irq_mask = new_val;
257                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
258                 POSTING_READ(gen6_pm_imr(dev_priv));
259         }
260 }
261
262 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
263 {
264         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
265                 return;
266
267         snb_update_pm_irq(dev_priv, mask, mask);
268 }
269
270 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
271                                   uint32_t mask)
272 {
273         snb_update_pm_irq(dev_priv, mask, 0);
274 }
275
276 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
279                 return;
280
281         __gen6_disable_pm_irq(dev_priv, mask);
282 }
283
284 void gen6_reset_rps_interrupts(struct drm_device *dev)
285 {
286         struct drm_i915_private *dev_priv = dev->dev_private;
287         uint32_t reg = gen6_pm_iir(dev_priv);
288
289         spin_lock_irq(&dev_priv->irq_lock);
290         I915_WRITE(reg, dev_priv->pm_rps_events);
291         I915_WRITE(reg, dev_priv->pm_rps_events);
292         POSTING_READ(reg);
293         dev_priv->rps.pm_iir = 0;
294         spin_unlock_irq(&dev_priv->irq_lock);
295 }
296
297 void gen6_enable_rps_interrupts(struct drm_device *dev)
298 {
299         struct drm_i915_private *dev_priv = dev->dev_private;
300
301         spin_lock_irq(&dev_priv->irq_lock);
302
303         WARN_ON(dev_priv->rps.pm_iir);
304         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
305         dev_priv->rps.interrupts_enabled = true;
306         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
307                                 dev_priv->pm_rps_events);
308         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
309
310         spin_unlock_irq(&dev_priv->irq_lock);
311 }
312
313 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
314 {
315         /*
316          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
317          * if GEN6_PM_UP_EI_EXPIRED is masked.
318          *
319          * TODO: verify if this can be reproduced on VLV,CHV.
320          */
321         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
322                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
323
324         if (INTEL_INFO(dev_priv)->gen >= 8)
325                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
326
327         return mask;
328 }
329
330 void gen6_disable_rps_interrupts(struct drm_device *dev)
331 {
332         struct drm_i915_private *dev_priv = dev->dev_private;
333
334         spin_lock_irq(&dev_priv->irq_lock);
335         dev_priv->rps.interrupts_enabled = false;
336         spin_unlock_irq(&dev_priv->irq_lock);
337
338         cancel_work_sync(&dev_priv->rps.work);
339
340         spin_lock_irq(&dev_priv->irq_lock);
341
342         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
343
344         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
345         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
346                                 ~dev_priv->pm_rps_events);
347
348         spin_unlock_irq(&dev_priv->irq_lock);
349
350         synchronize_irq(dev->irq);
351 }
352
353 /**
354  * ibx_display_interrupt_update - update SDEIMR
355  * @dev_priv: driver private
356  * @interrupt_mask: mask of interrupt bits to update
357  * @enabled_irq_mask: mask of interrupt bits to enable
358  */
359 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
360                                   uint32_t interrupt_mask,
361                                   uint32_t enabled_irq_mask)
362 {
363         uint32_t sdeimr = I915_READ(SDEIMR);
364         sdeimr &= ~interrupt_mask;
365         sdeimr |= (~enabled_irq_mask & interrupt_mask);
366
367         WARN_ON(enabled_irq_mask & ~interrupt_mask);
368
369         assert_spin_locked(&dev_priv->irq_lock);
370
371         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
372                 return;
373
374         I915_WRITE(SDEIMR, sdeimr);
375         POSTING_READ(SDEIMR);
376 }
377
378 static void
379 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
380                        u32 enable_mask, u32 status_mask)
381 {
382         u32 reg = PIPESTAT(pipe);
383         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
384
385         assert_spin_locked(&dev_priv->irq_lock);
386         WARN_ON(!intel_irqs_enabled(dev_priv));
387
388         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
389                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
390                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
391                       pipe_name(pipe), enable_mask, status_mask))
392                 return;
393
394         if ((pipestat & enable_mask) == enable_mask)
395                 return;
396
397         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
398
399         /* Enable the interrupt, clear any pending status */
400         pipestat |= enable_mask | status_mask;
401         I915_WRITE(reg, pipestat);
402         POSTING_READ(reg);
403 }
404
405 static void
406 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
407                         u32 enable_mask, u32 status_mask)
408 {
409         u32 reg = PIPESTAT(pipe);
410         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
411
412         assert_spin_locked(&dev_priv->irq_lock);
413         WARN_ON(!intel_irqs_enabled(dev_priv));
414
415         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
416                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
417                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
418                       pipe_name(pipe), enable_mask, status_mask))
419                 return;
420
421         if ((pipestat & enable_mask) == 0)
422                 return;
423
424         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
425
426         pipestat &= ~enable_mask;
427         I915_WRITE(reg, pipestat);
428         POSTING_READ(reg);
429 }
430
431 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
432 {
433         u32 enable_mask = status_mask << 16;
434
435         /*
436          * On pipe A we don't support the PSR interrupt yet,
437          * on pipe B and C the same bit MBZ.
438          */
439         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
440                 return 0;
441         /*
442          * On pipe B and C we don't support the PSR interrupt yet, on pipe
443          * A the same bit is for perf counters which we don't use either.
444          */
445         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
446                 return 0;
447
448         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
449                          SPRITE0_FLIP_DONE_INT_EN_VLV |
450                          SPRITE1_FLIP_DONE_INT_EN_VLV);
451         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
452                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
453         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
454                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
455
456         return enable_mask;
457 }
458
459 void
460 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
461                      u32 status_mask)
462 {
463         u32 enable_mask;
464
465         if (IS_VALLEYVIEW(dev_priv->dev))
466                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
467                                                            status_mask);
468         else
469                 enable_mask = status_mask << 16;
470         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
471 }
472
473 void
474 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
475                       u32 status_mask)
476 {
477         u32 enable_mask;
478
479         if (IS_VALLEYVIEW(dev_priv->dev))
480                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
481                                                            status_mask);
482         else
483                 enable_mask = status_mask << 16;
484         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
485 }
486
487 /**
488  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
489  */
490 static void i915_enable_asle_pipestat(struct drm_device *dev)
491 {
492         struct drm_i915_private *dev_priv = dev->dev_private;
493
494         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
495                 return;
496
497         spin_lock_irq(&dev_priv->irq_lock);
498
499         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
500         if (INTEL_INFO(dev)->gen >= 4)
501                 i915_enable_pipestat(dev_priv, PIPE_A,
502                                      PIPE_LEGACY_BLC_EVENT_STATUS);
503
504         spin_unlock_irq(&dev_priv->irq_lock);
505 }
506
507 /*
508  * This timing diagram depicts the video signal in and
509  * around the vertical blanking period.
510  *
511  * Assumptions about the fictitious mode used in this example:
512  *  vblank_start >= 3
513  *  vsync_start = vblank_start + 1
514  *  vsync_end = vblank_start + 2
515  *  vtotal = vblank_start + 3
516  *
517  *           start of vblank:
518  *           latch double buffered registers
519  *           increment frame counter (ctg+)
520  *           generate start of vblank interrupt (gen4+)
521  *           |
522  *           |          frame start:
523  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
524  *           |          may be shifted forward 1-3 extra lines via PIPECONF
525  *           |          |
526  *           |          |  start of vsync:
527  *           |          |  generate vsync interrupt
528  *           |          |  |
529  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
530  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
531  * ----va---> <-----------------vb--------------------> <--------va-------------
532  *       |          |       <----vs----->                     |
533  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
534  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
535  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
536  *       |          |                                         |
537  *       last visible pixel                                   first visible pixel
538  *                  |                                         increment frame counter (gen3/4)
539  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
540  *
541  * x  = horizontal active
542  * _  = horizontal blanking
543  * hs = horizontal sync
544  * va = vertical active
545  * vb = vertical blanking
546  * vs = vertical sync
547  * vbs = vblank_start (number)
548  *
549  * Summary:
550  * - most events happen at the start of horizontal sync
551  * - frame start happens at the start of horizontal blank, 1-4 lines
552  *   (depending on PIPECONF settings) after the start of vblank
553  * - gen3/4 pixel and frame counter are synchronized with the start
554  *   of horizontal active on the first line of vertical active
555  */
556
557 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
558 {
559         /* Gen2 doesn't have a hardware frame counter */
560         return 0;
561 }
562
563 /* Called from drm generic code, passed a 'crtc', which
564  * we use as a pipe index
565  */
566 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
567 {
568         struct drm_i915_private *dev_priv = dev->dev_private;
569         unsigned long high_frame;
570         unsigned long low_frame;
571         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
572         struct intel_crtc *intel_crtc =
573                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
574         const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
575
576         htotal = mode->crtc_htotal;
577         hsync_start = mode->crtc_hsync_start;
578         vbl_start = mode->crtc_vblank_start;
579         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
580                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
581
582         /* Convert to pixel count */
583         vbl_start *= htotal;
584
585         /* Start of vblank event occurs at start of hsync */
586         vbl_start -= htotal - hsync_start;
587
588         high_frame = PIPEFRAME(pipe);
589         low_frame = PIPEFRAMEPIXEL(pipe);
590
591         /*
592          * High & low register fields aren't synchronized, so make sure
593          * we get a low value that's stable across two reads of the high
594          * register.
595          */
596         do {
597                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
598                 low   = I915_READ(low_frame);
599                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
600         } while (high1 != high2);
601
602         high1 >>= PIPE_FRAME_HIGH_SHIFT;
603         pixel = low & PIPE_PIXEL_MASK;
604         low >>= PIPE_FRAME_LOW_SHIFT;
605
606         /*
607          * The frame counter increments at beginning of active.
608          * Cook up a vblank counter by also checking the pixel
609          * counter against vblank start.
610          */
611         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
612 }
613
614 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
615 {
616         struct drm_i915_private *dev_priv = dev->dev_private;
617         int reg = PIPE_FRMCOUNT_GM45(pipe);
618
619         return I915_READ(reg);
620 }
621
622 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
623 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
624
625 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
626 {
627         struct drm_device *dev = crtc->base.dev;
628         struct drm_i915_private *dev_priv = dev->dev_private;
629         const struct drm_display_mode *mode = &crtc->base.hwmode;
630         enum pipe pipe = crtc->pipe;
631         int position, vtotal;
632
633         vtotal = mode->crtc_vtotal;
634         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
635                 vtotal /= 2;
636
637         if (IS_GEN2(dev))
638                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
639         else
640                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
641
642         /*
643          * On HSW, the DSL reg (0x70000) appears to return 0 if we
644          * read it just before the start of vblank.  So try it again
645          * so we don't accidentally end up spanning a vblank frame
646          * increment, causing the pipe_update_end() code to squak at us.
647          *
648          * The nature of this problem means we can't simply check the ISR
649          * bit and return the vblank start value; nor can we use the scanline
650          * debug register in the transcoder as it appears to have the same
651          * problem.  We may need to extend this to include other platforms,
652          * but so far testing only shows the problem on HSW.
653          */
654         if (IS_HASWELL(dev) && !position) {
655                 int i, temp;
656
657                 for (i = 0; i < 100; i++) {
658                         udelay(1);
659                         temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
660                                 DSL_LINEMASK_GEN3;
661                         if (temp != position) {
662                                 position = temp;
663                                 break;
664                         }
665                 }
666         }
667
668         /*
669          * See update_scanline_offset() for the details on the
670          * scanline_offset adjustment.
671          */
672         return (position + crtc->scanline_offset) % vtotal;
673 }
674
675 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
676                                     unsigned int flags, int *vpos, int *hpos,
677                                     ktime_t *stime, ktime_t *etime,
678                                     const struct drm_display_mode *mode)
679 {
680         struct drm_i915_private *dev_priv = dev->dev_private;
681         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
682         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
683         int position;
684         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
685         bool in_vbl = true;
686         int ret = 0;
687         unsigned long irqflags;
688
689         if (WARN_ON(!mode->crtc_clock)) {
690                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
691                                  "pipe %c\n", pipe_name(pipe));
692                 return 0;
693         }
694
695         htotal = mode->crtc_htotal;
696         hsync_start = mode->crtc_hsync_start;
697         vtotal = mode->crtc_vtotal;
698         vbl_start = mode->crtc_vblank_start;
699         vbl_end = mode->crtc_vblank_end;
700
701         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
702                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
703                 vbl_end /= 2;
704                 vtotal /= 2;
705         }
706
707         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
708
709         /*
710          * Lock uncore.lock, as we will do multiple timing critical raw
711          * register reads, potentially with preemption disabled, so the
712          * following code must not block on uncore.lock.
713          */
714         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
715
716         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
717
718         /* Get optional system timestamp before query. */
719         if (stime)
720                 *stime = ktime_get();
721
722         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
723                 /* No obvious pixelcount register. Only query vertical
724                  * scanout position from Display scan line register.
725                  */
726                 position = __intel_get_crtc_scanline(intel_crtc);
727         } else {
728                 /* Have access to pixelcount since start of frame.
729                  * We can split this into vertical and horizontal
730                  * scanout position.
731                  */
732                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
733
734                 /* convert to pixel counts */
735                 vbl_start *= htotal;
736                 vbl_end *= htotal;
737                 vtotal *= htotal;
738
739                 /*
740                  * In interlaced modes, the pixel counter counts all pixels,
741                  * so one field will have htotal more pixels. In order to avoid
742                  * the reported position from jumping backwards when the pixel
743                  * counter is beyond the length of the shorter field, just
744                  * clamp the position the length of the shorter field. This
745                  * matches how the scanline counter based position works since
746                  * the scanline counter doesn't count the two half lines.
747                  */
748                 if (position >= vtotal)
749                         position = vtotal - 1;
750
751                 /*
752                  * Start of vblank interrupt is triggered at start of hsync,
753                  * just prior to the first active line of vblank. However we
754                  * consider lines to start at the leading edge of horizontal
755                  * active. So, should we get here before we've crossed into
756                  * the horizontal active of the first line in vblank, we would
757                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
758                  * always add htotal-hsync_start to the current pixel position.
759                  */
760                 position = (position + htotal - hsync_start) % vtotal;
761         }
762
763         /* Get optional system timestamp after query. */
764         if (etime)
765                 *etime = ktime_get();
766
767         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
768
769         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
770
771         in_vbl = position >= vbl_start && position < vbl_end;
772
773         /*
774          * While in vblank, position will be negative
775          * counting up towards 0 at vbl_end. And outside
776          * vblank, position will be positive counting
777          * up since vbl_end.
778          */
779         if (position >= vbl_start)
780                 position -= vbl_end;
781         else
782                 position += vtotal - vbl_end;
783
784         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
785                 *vpos = position;
786                 *hpos = 0;
787         } else {
788                 *vpos = position / htotal;
789                 *hpos = position - (*vpos * htotal);
790         }
791
792         /* In vblank? */
793         if (in_vbl)
794                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
795
796         return ret;
797 }
798
799 int intel_get_crtc_scanline(struct intel_crtc *crtc)
800 {
801         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
802         unsigned long irqflags;
803         int position;
804
805         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
806         position = __intel_get_crtc_scanline(crtc);
807         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
808
809         return position;
810 }
811
812 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
813                               int *max_error,
814                               struct timeval *vblank_time,
815                               unsigned flags)
816 {
817         struct drm_crtc *crtc;
818
819         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
820                 DRM_ERROR("Invalid crtc %d\n", pipe);
821                 return -EINVAL;
822         }
823
824         /* Get drm_crtc to timestamp: */
825         crtc = intel_get_crtc_for_pipe(dev, pipe);
826         if (crtc == NULL) {
827                 DRM_ERROR("Invalid crtc %d\n", pipe);
828                 return -EINVAL;
829         }
830
831         if (!crtc->hwmode.crtc_clock) {
832                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
833                 return -EBUSY;
834         }
835
836         /* Helper routine in DRM core does all the work: */
837         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
838                                                      vblank_time, flags,
839                                                      &crtc->hwmode);
840 }
841
842 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
843 {
844         struct drm_i915_private *dev_priv = dev->dev_private;
845         u32 busy_up, busy_down, max_avg, min_avg;
846         u8 new_delay;
847
848         spin_lock(&mchdev_lock);
849
850         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
851
852         new_delay = dev_priv->ips.cur_delay;
853
854         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
855         busy_up = I915_READ(RCPREVBSYTUPAVG);
856         busy_down = I915_READ(RCPREVBSYTDNAVG);
857         max_avg = I915_READ(RCBMAXAVG);
858         min_avg = I915_READ(RCBMINAVG);
859
860         /* Handle RCS change request from hw */
861         if (busy_up > max_avg) {
862                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
863                         new_delay = dev_priv->ips.cur_delay - 1;
864                 if (new_delay < dev_priv->ips.max_delay)
865                         new_delay = dev_priv->ips.max_delay;
866         } else if (busy_down < min_avg) {
867                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
868                         new_delay = dev_priv->ips.cur_delay + 1;
869                 if (new_delay > dev_priv->ips.min_delay)
870                         new_delay = dev_priv->ips.min_delay;
871         }
872
873         if (ironlake_set_drps(dev, new_delay))
874                 dev_priv->ips.cur_delay = new_delay;
875
876         spin_unlock(&mchdev_lock);
877
878         return;
879 }
880
881 static void notify_ring(struct intel_engine_cs *ring)
882 {
883         if (!intel_ring_initialized(ring))
884                 return;
885
886         trace_i915_gem_request_notify(ring);
887
888         wake_up_all(&ring->irq_queue);
889 }
890
891 static void vlv_c0_read(struct drm_i915_private *dev_priv,
892                         struct intel_rps_ei *ei)
893 {
894         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
895         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
896         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
897 }
898
899 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
900                          const struct intel_rps_ei *old,
901                          const struct intel_rps_ei *now,
902                          int threshold)
903 {
904         u64 time, c0;
905
906         if (old->cz_clock == 0)
907                 return false;
908
909         time = now->cz_clock - old->cz_clock;
910         time *= threshold * dev_priv->mem_freq;
911
912         /* Workload can be split between render + media, e.g. SwapBuffers
913          * being blitted in X after being rendered in mesa. To account for
914          * this we need to combine both engines into our activity counter.
915          */
916         c0 = now->render_c0 - old->render_c0;
917         c0 += now->media_c0 - old->media_c0;
918         c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
919
920         return c0 >= time;
921 }
922
923 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
924 {
925         vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
926         dev_priv->rps.up_ei = dev_priv->rps.down_ei;
927 }
928
929 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
930 {
931         struct intel_rps_ei now;
932         u32 events = 0;
933
934         if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
935                 return 0;
936
937         vlv_c0_read(dev_priv, &now);
938         if (now.cz_clock == 0)
939                 return 0;
940
941         if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
942                 if (!vlv_c0_above(dev_priv,
943                                   &dev_priv->rps.down_ei, &now,
944                                   dev_priv->rps.down_threshold))
945                         events |= GEN6_PM_RP_DOWN_THRESHOLD;
946                 dev_priv->rps.down_ei = now;
947         }
948
949         if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
950                 if (vlv_c0_above(dev_priv,
951                                  &dev_priv->rps.up_ei, &now,
952                                  dev_priv->rps.up_threshold))
953                         events |= GEN6_PM_RP_UP_THRESHOLD;
954                 dev_priv->rps.up_ei = now;
955         }
956
957         return events;
958 }
959
960 static bool any_waiters(struct drm_i915_private *dev_priv)
961 {
962         struct intel_engine_cs *ring;
963         int i;
964
965         for_each_ring(ring, dev_priv, i)
966                 if (ring->irq_refcount)
967                         return true;
968
969         return false;
970 }
971
972 static void gen6_pm_rps_work(struct work_struct *work)
973 {
974         struct drm_i915_private *dev_priv =
975                 container_of(work, struct drm_i915_private, rps.work);
976         bool client_boost;
977         int new_delay, adj, min, max;
978         u32 pm_iir;
979
980         spin_lock_irq(&dev_priv->irq_lock);
981         /* Speed up work cancelation during disabling rps interrupts. */
982         if (!dev_priv->rps.interrupts_enabled) {
983                 spin_unlock_irq(&dev_priv->irq_lock);
984                 return;
985         }
986         pm_iir = dev_priv->rps.pm_iir;
987         dev_priv->rps.pm_iir = 0;
988         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
989         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
990         client_boost = dev_priv->rps.client_boost;
991         dev_priv->rps.client_boost = false;
992         spin_unlock_irq(&dev_priv->irq_lock);
993
994         /* Make sure we didn't queue anything we're not going to process. */
995         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
996
997         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
998                 return;
999
1000         mutex_lock(&dev_priv->rps.hw_lock);
1001
1002         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1003
1004         adj = dev_priv->rps.last_adj;
1005         new_delay = dev_priv->rps.cur_freq;
1006         min = dev_priv->rps.min_freq_softlimit;
1007         max = dev_priv->rps.max_freq_softlimit;
1008
1009         if (client_boost) {
1010                 new_delay = dev_priv->rps.max_freq_softlimit;
1011                 adj = 0;
1012         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1013                 if (adj > 0)
1014                         adj *= 2;
1015                 else /* CHV needs even encode values */
1016                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1017                 /*
1018                  * For better performance, jump directly
1019                  * to RPe if we're below it.
1020                  */
1021                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1022                         new_delay = dev_priv->rps.efficient_freq;
1023                         adj = 0;
1024                 }
1025         } else if (any_waiters(dev_priv)) {
1026                 adj = 0;
1027         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1028                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1029                         new_delay = dev_priv->rps.efficient_freq;
1030                 else
1031                         new_delay = dev_priv->rps.min_freq_softlimit;
1032                 adj = 0;
1033         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1034                 if (adj < 0)
1035                         adj *= 2;
1036                 else /* CHV needs even encode values */
1037                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1038         } else { /* unknown event */
1039                 adj = 0;
1040         }
1041
1042         dev_priv->rps.last_adj = adj;
1043
1044         /* sysfs frequency interfaces may have snuck in while servicing the
1045          * interrupt
1046          */
1047         new_delay += adj;
1048         new_delay = clamp_t(int, new_delay, min, max);
1049
1050         intel_set_rps(dev_priv->dev, new_delay);
1051
1052         mutex_unlock(&dev_priv->rps.hw_lock);
1053 }
1054
1055
1056 /**
1057  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1058  * occurred.
1059  * @work: workqueue struct
1060  *
1061  * Doesn't actually do anything except notify userspace. As a consequence of
1062  * this event, userspace should try to remap the bad rows since statistically
1063  * it is likely the same row is more likely to go bad again.
1064  */
1065 static void ivybridge_parity_work(struct work_struct *work)
1066 {
1067         struct drm_i915_private *dev_priv =
1068                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1069         u32 error_status, row, bank, subbank;
1070         char *parity_event[6];
1071         uint32_t misccpctl;
1072         uint8_t slice = 0;
1073
1074         /* We must turn off DOP level clock gating to access the L3 registers.
1075          * In order to prevent a get/put style interface, acquire struct mutex
1076          * any time we access those registers.
1077          */
1078         mutex_lock(&dev_priv->dev->struct_mutex);
1079
1080         /* If we've screwed up tracking, just let the interrupt fire again */
1081         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1082                 goto out;
1083
1084         misccpctl = I915_READ(GEN7_MISCCPCTL);
1085         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1086         POSTING_READ(GEN7_MISCCPCTL);
1087
1088         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1089                 u32 reg;
1090
1091                 slice--;
1092                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1093                         break;
1094
1095                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1096
1097                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1098
1099                 error_status = I915_READ(reg);
1100                 row = GEN7_PARITY_ERROR_ROW(error_status);
1101                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1102                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1103
1104                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1105                 POSTING_READ(reg);
1106
1107                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1108                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1109                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1110                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1111                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1112                 parity_event[5] = NULL;
1113
1114                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1115                                    KOBJ_CHANGE, parity_event);
1116
1117                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1118                           slice, row, bank, subbank);
1119
1120                 kfree(parity_event[4]);
1121                 kfree(parity_event[3]);
1122                 kfree(parity_event[2]);
1123                 kfree(parity_event[1]);
1124         }
1125
1126         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1127
1128 out:
1129         WARN_ON(dev_priv->l3_parity.which_slice);
1130         spin_lock_irq(&dev_priv->irq_lock);
1131         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1132         spin_unlock_irq(&dev_priv->irq_lock);
1133
1134         mutex_unlock(&dev_priv->dev->struct_mutex);
1135 }
1136
1137 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1138 {
1139         struct drm_i915_private *dev_priv = dev->dev_private;
1140
1141         if (!HAS_L3_DPF(dev))
1142                 return;
1143
1144         spin_lock(&dev_priv->irq_lock);
1145         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1146         spin_unlock(&dev_priv->irq_lock);
1147
1148         iir &= GT_PARITY_ERROR(dev);
1149         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1150                 dev_priv->l3_parity.which_slice |= 1 << 1;
1151
1152         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1153                 dev_priv->l3_parity.which_slice |= 1 << 0;
1154
1155         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1156 }
1157
1158 static void ilk_gt_irq_handler(struct drm_device *dev,
1159                                struct drm_i915_private *dev_priv,
1160                                u32 gt_iir)
1161 {
1162         if (gt_iir &
1163             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1164                 notify_ring(&dev_priv->ring[RCS]);
1165         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1166                 notify_ring(&dev_priv->ring[VCS]);
1167 }
1168
1169 static void snb_gt_irq_handler(struct drm_device *dev,
1170                                struct drm_i915_private *dev_priv,
1171                                u32 gt_iir)
1172 {
1173
1174         if (gt_iir &
1175             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1176                 notify_ring(&dev_priv->ring[RCS]);
1177         if (gt_iir & GT_BSD_USER_INTERRUPT)
1178                 notify_ring(&dev_priv->ring[VCS]);
1179         if (gt_iir & GT_BLT_USER_INTERRUPT)
1180                 notify_ring(&dev_priv->ring[BCS]);
1181
1182         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1183                       GT_BSD_CS_ERROR_INTERRUPT |
1184                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1185                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1186
1187         if (gt_iir & GT_PARITY_ERROR(dev))
1188                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1189 }
1190
1191 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1192                                        u32 master_ctl)
1193 {
1194         irqreturn_t ret = IRQ_NONE;
1195
1196         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1197                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1198                 if (tmp) {
1199                         I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1200                         ret = IRQ_HANDLED;
1201
1202                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1203                                 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1204                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1205                                 notify_ring(&dev_priv->ring[RCS]);
1206
1207                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1208                                 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1209                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1210                                 notify_ring(&dev_priv->ring[BCS]);
1211                 } else
1212                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1213         }
1214
1215         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1216                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1217                 if (tmp) {
1218                         I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1219                         ret = IRQ_HANDLED;
1220
1221                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1222                                 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1223                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1224                                 notify_ring(&dev_priv->ring[VCS]);
1225
1226                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1227                                 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1228                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1229                                 notify_ring(&dev_priv->ring[VCS2]);
1230                 } else
1231                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1232         }
1233
1234         if (master_ctl & GEN8_GT_VECS_IRQ) {
1235                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1236                 if (tmp) {
1237                         I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1238                         ret = IRQ_HANDLED;
1239
1240                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1241                                 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1242                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1243                                 notify_ring(&dev_priv->ring[VECS]);
1244                 } else
1245                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1246         }
1247
1248         if (master_ctl & GEN8_GT_PM_IRQ) {
1249                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1250                 if (tmp & dev_priv->pm_rps_events) {
1251                         I915_WRITE_FW(GEN8_GT_IIR(2),
1252                                       tmp & dev_priv->pm_rps_events);
1253                         ret = IRQ_HANDLED;
1254                         gen6_rps_irq_handler(dev_priv, tmp);
1255                 } else
1256                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1257         }
1258
1259         return ret;
1260 }
1261
1262 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1263 {
1264         switch (port) {
1265         case PORT_A:
1266                 return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
1267         case PORT_B:
1268                 return val & PORTB_HOTPLUG_LONG_DETECT;
1269         case PORT_C:
1270                 return val & PORTC_HOTPLUG_LONG_DETECT;
1271         case PORT_D:
1272                 return val & PORTD_HOTPLUG_LONG_DETECT;
1273         default:
1274                 return false;
1275         }
1276 }
1277
1278 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1279 {
1280         switch (port) {
1281         case PORT_B:
1282                 return val & PORTB_HOTPLUG_LONG_DETECT;
1283         case PORT_C:
1284                 return val & PORTC_HOTPLUG_LONG_DETECT;
1285         case PORT_D:
1286                 return val & PORTD_HOTPLUG_LONG_DETECT;
1287         case PORT_E:
1288                 return val & PORTE_HOTPLUG_LONG_DETECT;
1289         default:
1290                 return false;
1291         }
1292 }
1293
1294 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1295 {
1296         switch (port) {
1297         case PORT_B:
1298                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1299         case PORT_C:
1300                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1301         case PORT_D:
1302                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1303         default:
1304                 return false;
1305         }
1306 }
1307
1308 /* Get a bit mask of pins that have triggered, and which ones may be long. */
1309 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1310                              u32 hotplug_trigger, u32 dig_hotplug_reg,
1311                              const u32 hpd[HPD_NUM_PINS],
1312                              bool long_pulse_detect(enum port port, u32 val))
1313 {
1314         enum port port;
1315         int i;
1316
1317         *pin_mask = 0;
1318         *long_mask = 0;
1319
1320         for_each_hpd_pin(i) {
1321                 if ((hpd[i] & hotplug_trigger) == 0)
1322                         continue;
1323
1324                 *pin_mask |= BIT(i);
1325
1326                 if (!intel_hpd_pin_to_port(i, &port))
1327                         continue;
1328
1329                 if (long_pulse_detect(port, dig_hotplug_reg))
1330                         *long_mask |= BIT(i);
1331         }
1332
1333         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1334                          hotplug_trigger, dig_hotplug_reg, *pin_mask);
1335
1336 }
1337
1338 static void gmbus_irq_handler(struct drm_device *dev)
1339 {
1340         struct drm_i915_private *dev_priv = dev->dev_private;
1341
1342         wake_up_all(&dev_priv->gmbus_wait_queue);
1343 }
1344
1345 static void dp_aux_irq_handler(struct drm_device *dev)
1346 {
1347         struct drm_i915_private *dev_priv = dev->dev_private;
1348
1349         wake_up_all(&dev_priv->gmbus_wait_queue);
1350 }
1351
1352 #if defined(CONFIG_DEBUG_FS)
1353 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1354                                          uint32_t crc0, uint32_t crc1,
1355                                          uint32_t crc2, uint32_t crc3,
1356                                          uint32_t crc4)
1357 {
1358         struct drm_i915_private *dev_priv = dev->dev_private;
1359         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1360         struct intel_pipe_crc_entry *entry;
1361         int head, tail;
1362
1363         spin_lock(&pipe_crc->lock);
1364
1365         if (!pipe_crc->entries) {
1366                 spin_unlock(&pipe_crc->lock);
1367                 DRM_DEBUG_KMS("spurious interrupt\n");
1368                 return;
1369         }
1370
1371         head = pipe_crc->head;
1372         tail = pipe_crc->tail;
1373
1374         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1375                 spin_unlock(&pipe_crc->lock);
1376                 DRM_ERROR("CRC buffer overflowing\n");
1377                 return;
1378         }
1379
1380         entry = &pipe_crc->entries[head];
1381
1382         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1383         entry->crc[0] = crc0;
1384         entry->crc[1] = crc1;
1385         entry->crc[2] = crc2;
1386         entry->crc[3] = crc3;
1387         entry->crc[4] = crc4;
1388
1389         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1390         pipe_crc->head = head;
1391
1392         spin_unlock(&pipe_crc->lock);
1393
1394         wake_up_interruptible(&pipe_crc->wq);
1395 }
1396 #else
1397 static inline void
1398 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1399                              uint32_t crc0, uint32_t crc1,
1400                              uint32_t crc2, uint32_t crc3,
1401                              uint32_t crc4) {}
1402 #endif
1403
1404
1405 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1406 {
1407         struct drm_i915_private *dev_priv = dev->dev_private;
1408
1409         display_pipe_crc_irq_handler(dev, pipe,
1410                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1411                                      0, 0, 0, 0);
1412 }
1413
1414 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1415 {
1416         struct drm_i915_private *dev_priv = dev->dev_private;
1417
1418         display_pipe_crc_irq_handler(dev, pipe,
1419                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1420                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1421                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1422                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1423                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1424 }
1425
1426 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1427 {
1428         struct drm_i915_private *dev_priv = dev->dev_private;
1429         uint32_t res1, res2;
1430
1431         if (INTEL_INFO(dev)->gen >= 3)
1432                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1433         else
1434                 res1 = 0;
1435
1436         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1437                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1438         else
1439                 res2 = 0;
1440
1441         display_pipe_crc_irq_handler(dev, pipe,
1442                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1443                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1444                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1445                                      res1, res2);
1446 }
1447
1448 /* The RPS events need forcewake, so we add them to a work queue and mask their
1449  * IMR bits until the work is done. Other interrupts can be processed without
1450  * the work queue. */
1451 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1452 {
1453         if (pm_iir & dev_priv->pm_rps_events) {
1454                 spin_lock(&dev_priv->irq_lock);
1455                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1456                 if (dev_priv->rps.interrupts_enabled) {
1457                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1458                         queue_work(dev_priv->wq, &dev_priv->rps.work);
1459                 }
1460                 spin_unlock(&dev_priv->irq_lock);
1461         }
1462
1463         if (INTEL_INFO(dev_priv)->gen >= 8)
1464                 return;
1465
1466         if (HAS_VEBOX(dev_priv->dev)) {
1467                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1468                         notify_ring(&dev_priv->ring[VECS]);
1469
1470                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1471                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1472         }
1473 }
1474
1475 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1476 {
1477         if (!drm_handle_vblank(dev, pipe))
1478                 return false;
1479
1480         return true;
1481 }
1482
1483 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1484 {
1485         struct drm_i915_private *dev_priv = dev->dev_private;
1486         u32 pipe_stats[I915_MAX_PIPES] = { };
1487         int pipe;
1488
1489         spin_lock(&dev_priv->irq_lock);
1490         for_each_pipe(dev_priv, pipe) {
1491                 int reg;
1492                 u32 mask, iir_bit = 0;
1493
1494                 /*
1495                  * PIPESTAT bits get signalled even when the interrupt is
1496                  * disabled with the mask bits, and some of the status bits do
1497                  * not generate interrupts at all (like the underrun bit). Hence
1498                  * we need to be careful that we only handle what we want to
1499                  * handle.
1500                  */
1501
1502                 /* fifo underruns are filterered in the underrun handler. */
1503                 mask = PIPE_FIFO_UNDERRUN_STATUS;
1504
1505                 switch (pipe) {
1506                 case PIPE_A:
1507                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1508                         break;
1509                 case PIPE_B:
1510                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1511                         break;
1512                 case PIPE_C:
1513                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1514                         break;
1515                 }
1516                 if (iir & iir_bit)
1517                         mask |= dev_priv->pipestat_irq_mask[pipe];
1518
1519                 if (!mask)
1520                         continue;
1521
1522                 reg = PIPESTAT(pipe);
1523                 mask |= PIPESTAT_INT_ENABLE_MASK;
1524                 pipe_stats[pipe] = I915_READ(reg) & mask;
1525
1526                 /*
1527                  * Clear the PIPE*STAT regs before the IIR
1528                  */
1529                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1530                                         PIPESTAT_INT_STATUS_MASK))
1531                         I915_WRITE(reg, pipe_stats[pipe]);
1532         }
1533         spin_unlock(&dev_priv->irq_lock);
1534
1535         for_each_pipe(dev_priv, pipe) {
1536                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1537                     intel_pipe_handle_vblank(dev, pipe))
1538                         intel_check_page_flip(dev, pipe);
1539
1540                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1541                         intel_prepare_page_flip(dev, pipe);
1542                         intel_finish_page_flip(dev, pipe);
1543                 }
1544
1545                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546                         i9xx_pipe_crc_irq_handler(dev, pipe);
1547
1548                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550         }
1551
1552         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1553                 gmbus_irq_handler(dev);
1554 }
1555
1556 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1557 {
1558         struct drm_i915_private *dev_priv = dev->dev_private;
1559         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1560         u32 pin_mask, long_mask;
1561
1562         if (!hotplug_status)
1563                 return;
1564
1565         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1566         /*
1567          * Make sure hotplug status is cleared before we clear IIR, or else we
1568          * may miss hotplug events.
1569          */
1570         POSTING_READ(PORT_HOTPLUG_STAT);
1571
1572         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1573                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1574
1575                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1576                                    hotplug_trigger, hpd_status_g4x,
1577                                    i9xx_port_hotplug_long_detect);
1578                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1579
1580                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1581                         dp_aux_irq_handler(dev);
1582         } else {
1583                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1584
1585                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1586                                    hotplug_trigger, hpd_status_i915,
1587                                    i9xx_port_hotplug_long_detect);
1588                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1589         }
1590 }
1591
1592 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1593 {
1594         struct drm_device *dev = arg;
1595         struct drm_i915_private *dev_priv = dev->dev_private;
1596         u32 iir, gt_iir, pm_iir;
1597         irqreturn_t ret = IRQ_NONE;
1598
1599         if (!intel_irqs_enabled(dev_priv))
1600                 return IRQ_NONE;
1601
1602         while (true) {
1603                 /* Find, clear, then process each source of interrupt */
1604
1605                 gt_iir = I915_READ(GTIIR);
1606                 if (gt_iir)
1607                         I915_WRITE(GTIIR, gt_iir);
1608
1609                 pm_iir = I915_READ(GEN6_PMIIR);
1610                 if (pm_iir)
1611                         I915_WRITE(GEN6_PMIIR, pm_iir);
1612
1613                 iir = I915_READ(VLV_IIR);
1614                 if (iir) {
1615                         /* Consume port before clearing IIR or we'll miss events */
1616                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1617                                 i9xx_hpd_irq_handler(dev);
1618                         I915_WRITE(VLV_IIR, iir);
1619                 }
1620
1621                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1622                         goto out;
1623
1624                 ret = IRQ_HANDLED;
1625
1626                 if (gt_iir)
1627                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1628                 if (pm_iir)
1629                         gen6_rps_irq_handler(dev_priv, pm_iir);
1630                 /* Call regardless, as some status bits might not be
1631                  * signalled in iir */
1632                 valleyview_pipestat_irq_handler(dev, iir);
1633         }
1634
1635 out:
1636         return ret;
1637 }
1638
1639 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1640 {
1641         struct drm_device *dev = arg;
1642         struct drm_i915_private *dev_priv = dev->dev_private;
1643         u32 master_ctl, iir;
1644         irqreturn_t ret = IRQ_NONE;
1645
1646         if (!intel_irqs_enabled(dev_priv))
1647                 return IRQ_NONE;
1648
1649         for (;;) {
1650                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1651                 iir = I915_READ(VLV_IIR);
1652
1653                 if (master_ctl == 0 && iir == 0)
1654                         break;
1655
1656                 ret = IRQ_HANDLED;
1657
1658                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1659
1660                 /* Find, clear, then process each source of interrupt */
1661
1662                 if (iir) {
1663                         /* Consume port before clearing IIR or we'll miss events */
1664                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1665                                 i9xx_hpd_irq_handler(dev);
1666                         I915_WRITE(VLV_IIR, iir);
1667                 }
1668
1669                 gen8_gt_irq_handler(dev_priv, master_ctl);
1670
1671                 /* Call regardless, as some status bits might not be
1672                  * signalled in iir */
1673                 valleyview_pipestat_irq_handler(dev, iir);
1674
1675                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1676                 POSTING_READ(GEN8_MASTER_IRQ);
1677         }
1678
1679         return ret;
1680 }
1681
1682 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1683 {
1684         struct drm_i915_private *dev_priv = dev->dev_private;
1685         int pipe;
1686         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1687
1688         if (hotplug_trigger) {
1689                 u32 dig_hotplug_reg, pin_mask, long_mask;
1690
1691                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1692                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1693
1694                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1695                                    dig_hotplug_reg, hpd_ibx,
1696                                    pch_port_hotplug_long_detect);
1697                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1698         }
1699
1700         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1701                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1702                                SDE_AUDIO_POWER_SHIFT);
1703                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1704                                  port_name(port));
1705         }
1706
1707         if (pch_iir & SDE_AUX_MASK)
1708                 dp_aux_irq_handler(dev);
1709
1710         if (pch_iir & SDE_GMBUS)
1711                 gmbus_irq_handler(dev);
1712
1713         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1714                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1715
1716         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1717                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1718
1719         if (pch_iir & SDE_POISON)
1720                 DRM_ERROR("PCH poison interrupt\n");
1721
1722         if (pch_iir & SDE_FDI_MASK)
1723                 for_each_pipe(dev_priv, pipe)
1724                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1725                                          pipe_name(pipe),
1726                                          I915_READ(FDI_RX_IIR(pipe)));
1727
1728         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1729                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1730
1731         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1732                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1733
1734         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1735                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1736
1737         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1738                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1739 }
1740
1741 static void ivb_err_int_handler(struct drm_device *dev)
1742 {
1743         struct drm_i915_private *dev_priv = dev->dev_private;
1744         u32 err_int = I915_READ(GEN7_ERR_INT);
1745         enum pipe pipe;
1746
1747         if (err_int & ERR_INT_POISON)
1748                 DRM_ERROR("Poison interrupt\n");
1749
1750         for_each_pipe(dev_priv, pipe) {
1751                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1752                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1753
1754                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1755                         if (IS_IVYBRIDGE(dev))
1756                                 ivb_pipe_crc_irq_handler(dev, pipe);
1757                         else
1758                                 hsw_pipe_crc_irq_handler(dev, pipe);
1759                 }
1760         }
1761
1762         I915_WRITE(GEN7_ERR_INT, err_int);
1763 }
1764
1765 static void cpt_serr_int_handler(struct drm_device *dev)
1766 {
1767         struct drm_i915_private *dev_priv = dev->dev_private;
1768         u32 serr_int = I915_READ(SERR_INT);
1769
1770         if (serr_int & SERR_INT_POISON)
1771                 DRM_ERROR("PCH poison interrupt\n");
1772
1773         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1774                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1775
1776         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1777                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1778
1779         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1780                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1781
1782         I915_WRITE(SERR_INT, serr_int);
1783 }
1784
1785 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1786 {
1787         struct drm_i915_private *dev_priv = dev->dev_private;
1788         int pipe;
1789         u32 hotplug_trigger;
1790
1791         if (HAS_PCH_SPT(dev))
1792                 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
1793         else
1794                 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1795
1796         if (hotplug_trigger) {
1797                 u32 dig_hotplug_reg, pin_mask, long_mask;
1798
1799                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1800                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1801
1802                 if (HAS_PCH_SPT(dev)) {
1803                         intel_get_hpd_pins(&pin_mask, &long_mask,
1804                                            hotplug_trigger,
1805                                            dig_hotplug_reg, hpd_spt,
1806                                            pch_port_hotplug_long_detect);
1807
1808                         /* detect PORTE HP event */
1809                         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1810                         if (pch_port_hotplug_long_detect(PORT_E,
1811                                                          dig_hotplug_reg))
1812                                 long_mask |= 1 << HPD_PORT_E;
1813                 } else
1814                         intel_get_hpd_pins(&pin_mask, &long_mask,
1815                                            hotplug_trigger,
1816                                            dig_hotplug_reg, hpd_cpt,
1817                                            pch_port_hotplug_long_detect);
1818
1819                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1820         }
1821
1822         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1823                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1824                                SDE_AUDIO_POWER_SHIFT_CPT);
1825                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1826                                  port_name(port));
1827         }
1828
1829         if (pch_iir & SDE_AUX_MASK_CPT)
1830                 dp_aux_irq_handler(dev);
1831
1832         if (pch_iir & SDE_GMBUS_CPT)
1833                 gmbus_irq_handler(dev);
1834
1835         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1836                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1837
1838         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1839                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1840
1841         if (pch_iir & SDE_FDI_MASK_CPT)
1842                 for_each_pipe(dev_priv, pipe)
1843                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1844                                          pipe_name(pipe),
1845                                          I915_READ(FDI_RX_IIR(pipe)));
1846
1847         if (pch_iir & SDE_ERROR_CPT)
1848                 cpt_serr_int_handler(dev);
1849 }
1850
1851 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1852 {
1853         struct drm_i915_private *dev_priv = dev->dev_private;
1854         enum pipe pipe;
1855
1856         if (de_iir & DE_AUX_CHANNEL_A)
1857                 dp_aux_irq_handler(dev);
1858
1859         if (de_iir & DE_GSE)
1860                 intel_opregion_asle_intr(dev);
1861
1862         if (de_iir & DE_POISON)
1863                 DRM_ERROR("Poison interrupt\n");
1864
1865         for_each_pipe(dev_priv, pipe) {
1866                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
1867                     intel_pipe_handle_vblank(dev, pipe))
1868                         intel_check_page_flip(dev, pipe);
1869
1870                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1871                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1872
1873                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1874                         i9xx_pipe_crc_irq_handler(dev, pipe);
1875
1876                 /* plane/pipes map 1:1 on ilk+ */
1877                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1878                         intel_prepare_page_flip(dev, pipe);
1879                         intel_finish_page_flip_plane(dev, pipe);
1880                 }
1881         }
1882
1883         /* check event from PCH */
1884         if (de_iir & DE_PCH_EVENT) {
1885                 u32 pch_iir = I915_READ(SDEIIR);
1886
1887                 if (HAS_PCH_CPT(dev))
1888                         cpt_irq_handler(dev, pch_iir);
1889                 else
1890                         ibx_irq_handler(dev, pch_iir);
1891
1892                 /* should clear PCH hotplug event before clear CPU irq */
1893                 I915_WRITE(SDEIIR, pch_iir);
1894         }
1895
1896         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1897                 ironlake_rps_change_irq_handler(dev);
1898 }
1899
1900 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1901 {
1902         struct drm_i915_private *dev_priv = dev->dev_private;
1903         enum pipe pipe;
1904
1905         if (de_iir & DE_ERR_INT_IVB)
1906                 ivb_err_int_handler(dev);
1907
1908         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1909                 dp_aux_irq_handler(dev);
1910
1911         if (de_iir & DE_GSE_IVB)
1912                 intel_opregion_asle_intr(dev);
1913
1914         for_each_pipe(dev_priv, pipe) {
1915                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1916                     intel_pipe_handle_vblank(dev, pipe))
1917                         intel_check_page_flip(dev, pipe);
1918
1919                 /* plane/pipes map 1:1 on ilk+ */
1920                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1921                         intel_prepare_page_flip(dev, pipe);
1922                         intel_finish_page_flip_plane(dev, pipe);
1923                 }
1924         }
1925
1926         /* check event from PCH */
1927         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1928                 u32 pch_iir = I915_READ(SDEIIR);
1929
1930                 cpt_irq_handler(dev, pch_iir);
1931
1932                 /* clear PCH hotplug event before clear CPU irq */
1933                 I915_WRITE(SDEIIR, pch_iir);
1934         }
1935 }
1936
1937 /*
1938  * To handle irqs with the minimum potential races with fresh interrupts, we:
1939  * 1 - Disable Master Interrupt Control.
1940  * 2 - Find the source(s) of the interrupt.
1941  * 3 - Clear the Interrupt Identity bits (IIR).
1942  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1943  * 5 - Re-enable Master Interrupt Control.
1944  */
1945 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1946 {
1947         struct drm_device *dev = arg;
1948         struct drm_i915_private *dev_priv = dev->dev_private;
1949         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1950         irqreturn_t ret = IRQ_NONE;
1951
1952         if (!intel_irqs_enabled(dev_priv))
1953                 return IRQ_NONE;
1954
1955         /* We get interrupts on unclaimed registers, so check for this before we
1956          * do any I915_{READ,WRITE}. */
1957         intel_uncore_check_errors(dev);
1958
1959         /* disable master interrupt before clearing iir  */
1960         de_ier = I915_READ(DEIER);
1961         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1962         POSTING_READ(DEIER);
1963
1964         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1965          * interrupts will will be stored on its back queue, and then we'll be
1966          * able to process them after we restore SDEIER (as soon as we restore
1967          * it, we'll get an interrupt if SDEIIR still has something to process
1968          * due to its back queue). */
1969         if (!HAS_PCH_NOP(dev)) {
1970                 sde_ier = I915_READ(SDEIER);
1971                 I915_WRITE(SDEIER, 0);
1972                 POSTING_READ(SDEIER);
1973         }
1974
1975         /* Find, clear, then process each source of interrupt */
1976
1977         gt_iir = I915_READ(GTIIR);
1978         if (gt_iir) {
1979                 I915_WRITE(GTIIR, gt_iir);
1980                 ret = IRQ_HANDLED;
1981                 if (INTEL_INFO(dev)->gen >= 6)
1982                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1983                 else
1984                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1985         }
1986
1987         de_iir = I915_READ(DEIIR);
1988         if (de_iir) {
1989                 I915_WRITE(DEIIR, de_iir);
1990                 ret = IRQ_HANDLED;
1991                 if (INTEL_INFO(dev)->gen >= 7)
1992                         ivb_display_irq_handler(dev, de_iir);
1993                 else
1994                         ilk_display_irq_handler(dev, de_iir);
1995         }
1996
1997         if (INTEL_INFO(dev)->gen >= 6) {
1998                 u32 pm_iir = I915_READ(GEN6_PMIIR);
1999                 if (pm_iir) {
2000                         I915_WRITE(GEN6_PMIIR, pm_iir);
2001                         ret = IRQ_HANDLED;
2002                         gen6_rps_irq_handler(dev_priv, pm_iir);
2003                 }
2004         }
2005
2006         I915_WRITE(DEIER, de_ier);
2007         POSTING_READ(DEIER);
2008         if (!HAS_PCH_NOP(dev)) {
2009                 I915_WRITE(SDEIER, sde_ier);
2010                 POSTING_READ(SDEIER);
2011         }
2012
2013         return ret;
2014 }
2015
2016 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2017 {
2018         struct drm_i915_private *dev_priv = dev->dev_private;
2019         u32 hp_control, hp_trigger;
2020         u32 pin_mask, long_mask;
2021
2022         /* Get the status */
2023         hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2024         hp_control = I915_READ(BXT_HOTPLUG_CTL);
2025
2026         /* Hotplug not enabled ? */
2027         if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2028                 DRM_ERROR("Interrupt when HPD disabled\n");
2029                 return;
2030         }
2031
2032         /* Clear sticky bits in hpd status */
2033         I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
2034
2035         intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
2036                            hpd_bxt, bxt_port_hotplug_long_detect);
2037         intel_hpd_irq_handler(dev, pin_mask, long_mask);
2038 }
2039
2040 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2041 {
2042         struct drm_device *dev = arg;
2043         struct drm_i915_private *dev_priv = dev->dev_private;
2044         u32 master_ctl;
2045         irqreturn_t ret = IRQ_NONE;
2046         uint32_t tmp = 0;
2047         enum pipe pipe;
2048         u32 aux_mask = GEN8_AUX_CHANNEL_A;
2049
2050         if (!intel_irqs_enabled(dev_priv))
2051                 return IRQ_NONE;
2052
2053         if (IS_GEN9(dev))
2054                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2055                         GEN9_AUX_CHANNEL_D;
2056
2057         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2058         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2059         if (!master_ctl)
2060                 return IRQ_NONE;
2061
2062         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2063
2064         /* Find, clear, then process each source of interrupt */
2065
2066         ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2067
2068         if (master_ctl & GEN8_DE_MISC_IRQ) {
2069                 tmp = I915_READ(GEN8_DE_MISC_IIR);
2070                 if (tmp) {
2071                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2072                         ret = IRQ_HANDLED;
2073                         if (tmp & GEN8_DE_MISC_GSE)
2074                                 intel_opregion_asle_intr(dev);
2075                         else
2076                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2077                 }
2078                 else
2079                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2080         }
2081
2082         if (master_ctl & GEN8_DE_PORT_IRQ) {
2083                 tmp = I915_READ(GEN8_DE_PORT_IIR);
2084                 if (tmp) {
2085                         bool found = false;
2086
2087                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2088                         ret = IRQ_HANDLED;
2089
2090                         if (tmp & aux_mask) {
2091                                 dp_aux_irq_handler(dev);
2092                                 found = true;
2093                         }
2094
2095                         if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2096                                 bxt_hpd_handler(dev, tmp);
2097                                 found = true;
2098                         }
2099
2100                         if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2101                                 gmbus_irq_handler(dev);
2102                                 found = true;
2103                         }
2104
2105                         if (!found)
2106                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2107                 }
2108                 else
2109                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2110         }
2111
2112         for_each_pipe(dev_priv, pipe) {
2113                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2114
2115                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2116                         continue;
2117
2118                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2119                 if (pipe_iir) {
2120                         ret = IRQ_HANDLED;
2121                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2122
2123                         if (pipe_iir & GEN8_PIPE_VBLANK &&
2124                             intel_pipe_handle_vblank(dev, pipe))
2125                                 intel_check_page_flip(dev, pipe);
2126
2127                         if (IS_GEN9(dev))
2128                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2129                         else
2130                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2131
2132                         if (flip_done) {
2133                                 intel_prepare_page_flip(dev, pipe);
2134                                 intel_finish_page_flip_plane(dev, pipe);
2135                         }
2136
2137                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2138                                 hsw_pipe_crc_irq_handler(dev, pipe);
2139
2140                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2141                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2142                                                                     pipe);
2143
2144
2145                         if (IS_GEN9(dev))
2146                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2147                         else
2148                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2149
2150                         if (fault_errors)
2151                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2152                                           pipe_name(pipe),
2153                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2154                 } else
2155                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2156         }
2157
2158         if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2159             master_ctl & GEN8_DE_PCH_IRQ) {
2160                 /*
2161                  * FIXME(BDW): Assume for now that the new interrupt handling
2162                  * scheme also closed the SDE interrupt handling race we've seen
2163                  * on older pch-split platforms. But this needs testing.
2164                  */
2165                 u32 pch_iir = I915_READ(SDEIIR);
2166                 if (pch_iir) {
2167                         I915_WRITE(SDEIIR, pch_iir);
2168                         ret = IRQ_HANDLED;
2169                         cpt_irq_handler(dev, pch_iir);
2170                 } else
2171                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
2172
2173         }
2174
2175         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2176         POSTING_READ_FW(GEN8_MASTER_IRQ);
2177
2178         return ret;
2179 }
2180
2181 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2182                                bool reset_completed)
2183 {
2184         struct intel_engine_cs *ring;
2185         int i;
2186
2187         /*
2188          * Notify all waiters for GPU completion events that reset state has
2189          * been changed, and that they need to restart their wait after
2190          * checking for potential errors (and bail out to drop locks if there is
2191          * a gpu reset pending so that i915_error_work_func can acquire them).
2192          */
2193
2194         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2195         for_each_ring(ring, dev_priv, i)
2196                 wake_up_all(&ring->irq_queue);
2197
2198         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2199         wake_up_all(&dev_priv->pending_flip_queue);
2200
2201         /*
2202          * Signal tasks blocked in i915_gem_wait_for_error that the pending
2203          * reset state is cleared.
2204          */
2205         if (reset_completed)
2206                 wake_up_all(&dev_priv->gpu_error.reset_queue);
2207 }
2208
2209 /**
2210  * i915_reset_and_wakeup - do process context error handling work
2211  *
2212  * Fire an error uevent so userspace can see that a hang or error
2213  * was detected.
2214  */
2215 static void i915_reset_and_wakeup(struct drm_device *dev)
2216 {
2217         struct drm_i915_private *dev_priv = to_i915(dev);
2218         struct i915_gpu_error *error = &dev_priv->gpu_error;
2219         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2220         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2221         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2222         int ret;
2223
2224         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2225
2226         /*
2227          * Note that there's only one work item which does gpu resets, so we
2228          * need not worry about concurrent gpu resets potentially incrementing
2229          * error->reset_counter twice. We only need to take care of another
2230          * racing irq/hangcheck declaring the gpu dead for a second time. A
2231          * quick check for that is good enough: schedule_work ensures the
2232          * correct ordering between hang detection and this work item, and since
2233          * the reset in-progress bit is only ever set by code outside of this
2234          * work we don't need to worry about any other races.
2235          */
2236         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2237                 DRM_DEBUG_DRIVER("resetting chip\n");
2238                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2239                                    reset_event);
2240
2241                 /*
2242                  * In most cases it's guaranteed that we get here with an RPM
2243                  * reference held, for example because there is a pending GPU
2244                  * request that won't finish until the reset is done. This
2245                  * isn't the case at least when we get here by doing a
2246                  * simulated reset via debugs, so get an RPM reference.
2247                  */
2248                 intel_runtime_pm_get(dev_priv);
2249
2250                 intel_prepare_reset(dev);
2251
2252                 /*
2253                  * All state reset _must_ be completed before we update the
2254                  * reset counter, for otherwise waiters might miss the reset
2255                  * pending state and not properly drop locks, resulting in
2256                  * deadlocks with the reset work.
2257                  */
2258                 ret = i915_reset(dev);
2259
2260                 intel_finish_reset(dev);
2261
2262                 intel_runtime_pm_put(dev_priv);
2263
2264                 if (ret == 0) {
2265                         /*
2266                          * After all the gem state is reset, increment the reset
2267                          * counter and wake up everyone waiting for the reset to
2268                          * complete.
2269                          *
2270                          * Since unlock operations are a one-sided barrier only,
2271                          * we need to insert a barrier here to order any seqno
2272                          * updates before
2273                          * the counter increment.
2274                          */
2275                         smp_mb__before_atomic();
2276                         atomic_inc(&dev_priv->gpu_error.reset_counter);
2277
2278                         kobject_uevent_env(&dev->primary->kdev->kobj,
2279                                            KOBJ_CHANGE, reset_done_event);
2280                 } else {
2281                         atomic_or(I915_WEDGED, &error->reset_counter);
2282                 }
2283
2284                 /*
2285                  * Note: The wake_up also serves as a memory barrier so that
2286                  * waiters see the update value of the reset counter atomic_t.
2287                  */
2288                 i915_error_wake_up(dev_priv, true);
2289         }
2290 }
2291
2292 static void i915_report_and_clear_eir(struct drm_device *dev)
2293 {
2294         struct drm_i915_private *dev_priv = dev->dev_private;
2295         uint32_t instdone[I915_NUM_INSTDONE_REG];
2296         u32 eir = I915_READ(EIR);
2297         int pipe, i;
2298
2299         if (!eir)
2300                 return;
2301
2302         pr_err("render error detected, EIR: 0x%08x\n", eir);
2303
2304         i915_get_extra_instdone(dev, instdone);
2305
2306         if (IS_G4X(dev)) {
2307                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2308                         u32 ipeir = I915_READ(IPEIR_I965);
2309
2310                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2311                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2312                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2313                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2314                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2315                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2316                         I915_WRITE(IPEIR_I965, ipeir);
2317                         POSTING_READ(IPEIR_I965);
2318                 }
2319                 if (eir & GM45_ERROR_PAGE_TABLE) {
2320                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2321                         pr_err("page table error\n");
2322                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2323                         I915_WRITE(PGTBL_ER, pgtbl_err);
2324                         POSTING_READ(PGTBL_ER);
2325                 }
2326         }
2327
2328         if (!IS_GEN2(dev)) {
2329                 if (eir & I915_ERROR_PAGE_TABLE) {
2330                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2331                         pr_err("page table error\n");
2332                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2333                         I915_WRITE(PGTBL_ER, pgtbl_err);
2334                         POSTING_READ(PGTBL_ER);
2335                 }
2336         }
2337
2338         if (eir & I915_ERROR_MEMORY_REFRESH) {
2339                 pr_err("memory refresh error:\n");
2340                 for_each_pipe(dev_priv, pipe)
2341                         pr_err("pipe %c stat: 0x%08x\n",
2342                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2343                 /* pipestat has already been acked */
2344         }
2345         if (eir & I915_ERROR_INSTRUCTION) {
2346                 pr_err("instruction error\n");
2347                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2348                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2349                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2350                 if (INTEL_INFO(dev)->gen < 4) {
2351                         u32 ipeir = I915_READ(IPEIR);
2352
2353                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2354                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2355                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2356                         I915_WRITE(IPEIR, ipeir);
2357                         POSTING_READ(IPEIR);
2358                 } else {
2359                         u32 ipeir = I915_READ(IPEIR_I965);
2360
2361                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2362                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2363                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2364                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2365                         I915_WRITE(IPEIR_I965, ipeir);
2366                         POSTING_READ(IPEIR_I965);
2367                 }
2368         }
2369
2370         I915_WRITE(EIR, eir);
2371         POSTING_READ(EIR);
2372         eir = I915_READ(EIR);
2373         if (eir) {
2374                 /*
2375                  * some errors might have become stuck,
2376                  * mask them.
2377                  */
2378                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2379                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2380                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2381         }
2382 }
2383
2384 /**
2385  * i915_handle_error - handle a gpu error
2386  * @dev: drm device
2387  *
2388  * Do some basic checking of regsiter state at error time and
2389  * dump it to the syslog.  Also call i915_capture_error_state() to make
2390  * sure we get a record and make it available in debugfs.  Fire a uevent
2391  * so userspace knows something bad happened (should trigger collection
2392  * of a ring dump etc.).
2393  */
2394 void i915_handle_error(struct drm_device *dev, bool wedged,
2395                        const char *fmt, ...)
2396 {
2397         struct drm_i915_private *dev_priv = dev->dev_private;
2398         va_list args;
2399         char error_msg[80];
2400
2401         va_start(args, fmt);
2402         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2403         va_end(args);
2404
2405         i915_capture_error_state(dev, wedged, error_msg);
2406         i915_report_and_clear_eir(dev);
2407
2408         if (wedged) {
2409                 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2410                                 &dev_priv->gpu_error.reset_counter);
2411
2412                 /*
2413                  * Wakeup waiting processes so that the reset function
2414                  * i915_reset_and_wakeup doesn't deadlock trying to grab
2415                  * various locks. By bumping the reset counter first, the woken
2416                  * processes will see a reset in progress and back off,
2417                  * releasing their locks and then wait for the reset completion.
2418                  * We must do this for _all_ gpu waiters that might hold locks
2419                  * that the reset work needs to acquire.
2420                  *
2421                  * Note: The wake_up serves as the required memory barrier to
2422                  * ensure that the waiters see the updated value of the reset
2423                  * counter atomic_t.
2424                  */
2425                 i915_error_wake_up(dev_priv, false);
2426         }
2427
2428         i915_reset_and_wakeup(dev);
2429 }
2430
2431 /* Called from drm generic code, passed 'crtc' which
2432  * we use as a pipe index
2433  */
2434 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2435 {
2436         struct drm_i915_private *dev_priv = dev->dev_private;
2437         unsigned long irqflags;
2438
2439         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2440         if (INTEL_INFO(dev)->gen >= 4)
2441                 i915_enable_pipestat(dev_priv, pipe,
2442                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
2443         else
2444                 i915_enable_pipestat(dev_priv, pipe,
2445                                      PIPE_VBLANK_INTERRUPT_STATUS);
2446         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2447
2448         return 0;
2449 }
2450
2451 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2452 {
2453         struct drm_i915_private *dev_priv = dev->dev_private;
2454         unsigned long irqflags;
2455         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2456                                                      DE_PIPE_VBLANK(pipe);
2457
2458         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2459         ironlake_enable_display_irq(dev_priv, bit);
2460         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2461
2462         return 0;
2463 }
2464
2465 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2466 {
2467         struct drm_i915_private *dev_priv = dev->dev_private;
2468         unsigned long irqflags;
2469
2470         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2471         i915_enable_pipestat(dev_priv, pipe,
2472                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2473         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2474
2475         return 0;
2476 }
2477
2478 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2479 {
2480         struct drm_i915_private *dev_priv = dev->dev_private;
2481         unsigned long irqflags;
2482
2483         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2484         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2485         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2486         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2487         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2488         return 0;
2489 }
2490
2491 /* Called from drm generic code, passed 'crtc' which
2492  * we use as a pipe index
2493  */
2494 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2495 {
2496         struct drm_i915_private *dev_priv = dev->dev_private;
2497         unsigned long irqflags;
2498
2499         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2500         i915_disable_pipestat(dev_priv, pipe,
2501                               PIPE_VBLANK_INTERRUPT_STATUS |
2502                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2503         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2504 }
2505
2506 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2507 {
2508         struct drm_i915_private *dev_priv = dev->dev_private;
2509         unsigned long irqflags;
2510         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2511                                                      DE_PIPE_VBLANK(pipe);
2512
2513         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2514         ironlake_disable_display_irq(dev_priv, bit);
2515         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2516 }
2517
2518 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2519 {
2520         struct drm_i915_private *dev_priv = dev->dev_private;
2521         unsigned long irqflags;
2522
2523         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2524         i915_disable_pipestat(dev_priv, pipe,
2525                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2526         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2527 }
2528
2529 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2530 {
2531         struct drm_i915_private *dev_priv = dev->dev_private;
2532         unsigned long irqflags;
2533
2534         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2535         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2536         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2537         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2538         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2539 }
2540
2541 static bool
2542 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2543 {
2544         return (list_empty(&ring->request_list) ||
2545                 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2546 }
2547
2548 static bool
2549 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2550 {
2551         if (INTEL_INFO(dev)->gen >= 8) {
2552                 return (ipehr >> 23) == 0x1c;
2553         } else {
2554                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2555                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2556                                  MI_SEMAPHORE_REGISTER);
2557         }
2558 }
2559
2560 static struct intel_engine_cs *
2561 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2562 {
2563         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2564         struct intel_engine_cs *signaller;
2565         int i;
2566
2567         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2568                 for_each_ring(signaller, dev_priv, i) {
2569                         if (ring == signaller)
2570                                 continue;
2571
2572                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
2573                                 return signaller;
2574                 }
2575         } else {
2576                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2577
2578                 for_each_ring(signaller, dev_priv, i) {
2579                         if(ring == signaller)
2580                                 continue;
2581
2582                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2583                                 return signaller;
2584                 }
2585         }
2586
2587         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2588                   ring->id, ipehr, offset);
2589
2590         return NULL;
2591 }
2592
2593 static struct intel_engine_cs *
2594 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2595 {
2596         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2597         u32 cmd, ipehr, head;
2598         u64 offset = 0;
2599         int i, backwards;
2600
2601         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2602         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2603                 return NULL;
2604
2605         /*
2606          * HEAD is likely pointing to the dword after the actual command,
2607          * so scan backwards until we find the MBOX. But limit it to just 3
2608          * or 4 dwords depending on the semaphore wait command size.
2609          * Note that we don't care about ACTHD here since that might
2610          * point at at batch, and semaphores are always emitted into the
2611          * ringbuffer itself.
2612          */
2613         head = I915_READ_HEAD(ring) & HEAD_ADDR;
2614         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2615
2616         for (i = backwards; i; --i) {
2617                 /*
2618                  * Be paranoid and presume the hw has gone off into the wild -
2619                  * our ring is smaller than what the hardware (and hence
2620                  * HEAD_ADDR) allows. Also handles wrap-around.
2621                  */
2622                 head &= ring->buffer->size - 1;
2623
2624                 /* This here seems to blow up */
2625                 cmd = ioread32(ring->buffer->virtual_start + head);
2626                 if (cmd == ipehr)
2627                         break;
2628
2629                 head -= 4;
2630         }
2631
2632         if (!i)
2633                 return NULL;
2634
2635         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2636         if (INTEL_INFO(ring->dev)->gen >= 8) {
2637                 offset = ioread32(ring->buffer->virtual_start + head + 12);
2638                 offset <<= 32;
2639                 offset = ioread32(ring->buffer->virtual_start + head + 8);
2640         }
2641         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2642 }
2643
2644 static int semaphore_passed(struct intel_engine_cs *ring)
2645 {
2646         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2647         struct intel_engine_cs *signaller;
2648         u32 seqno;
2649
2650         ring->hangcheck.deadlock++;
2651
2652         signaller = semaphore_waits_for(ring, &seqno);
2653         if (signaller == NULL)
2654                 return -1;
2655
2656         /* Prevent pathological recursion due to driver bugs */
2657         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2658                 return -1;
2659
2660         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2661                 return 1;
2662
2663         /* cursory check for an unkickable deadlock */
2664         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2665             semaphore_passed(signaller) < 0)
2666                 return -1;
2667
2668         return 0;
2669 }
2670
2671 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2672 {
2673         struct intel_engine_cs *ring;
2674         int i;
2675
2676         for_each_ring(ring, dev_priv, i)
2677                 ring->hangcheck.deadlock = 0;
2678 }
2679
2680 static enum intel_ring_hangcheck_action
2681 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2682 {
2683         struct drm_device *dev = ring->dev;
2684         struct drm_i915_private *dev_priv = dev->dev_private;
2685         u32 tmp;
2686
2687         if (acthd != ring->hangcheck.acthd) {
2688                 if (acthd > ring->hangcheck.max_acthd) {
2689                         ring->hangcheck.max_acthd = acthd;
2690                         return HANGCHECK_ACTIVE;
2691                 }
2692
2693                 return HANGCHECK_ACTIVE_LOOP;
2694         }
2695
2696         if (IS_GEN2(dev))
2697                 return HANGCHECK_HUNG;
2698
2699         /* Is the chip hanging on a WAIT_FOR_EVENT?
2700          * If so we can simply poke the RB_WAIT bit
2701          * and break the hang. This should work on
2702          * all but the second generation chipsets.
2703          */
2704         tmp = I915_READ_CTL(ring);
2705         if (tmp & RING_WAIT) {
2706                 i915_handle_error(dev, false,
2707                                   "Kicking stuck wait on %s",
2708                                   ring->name);
2709                 I915_WRITE_CTL(ring, tmp);
2710                 return HANGCHECK_KICK;
2711         }
2712
2713         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2714                 switch (semaphore_passed(ring)) {
2715                 default:
2716                         return HANGCHECK_HUNG;
2717                 case 1:
2718                         i915_handle_error(dev, false,
2719                                           "Kicking stuck semaphore on %s",
2720                                           ring->name);
2721                         I915_WRITE_CTL(ring, tmp);
2722                         return HANGCHECK_KICK;
2723                 case 0:
2724                         return HANGCHECK_WAIT;
2725                 }
2726         }
2727
2728         return HANGCHECK_HUNG;
2729 }
2730
2731 /*
2732  * This is called when the chip hasn't reported back with completed
2733  * batchbuffers in a long time. We keep track per ring seqno progress and
2734  * if there are no progress, hangcheck score for that ring is increased.
2735  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2736  * we kick the ring. If we see no progress on three subsequent calls
2737  * we assume chip is wedged and try to fix it by resetting the chip.
2738  */
2739 static void i915_hangcheck_elapsed(struct work_struct *work)
2740 {
2741         struct drm_i915_private *dev_priv =
2742                 container_of(work, typeof(*dev_priv),
2743                              gpu_error.hangcheck_work.work);
2744         struct drm_device *dev = dev_priv->dev;
2745         struct intel_engine_cs *ring;
2746         int i;
2747         int busy_count = 0, rings_hung = 0;
2748         bool stuck[I915_NUM_RINGS] = { 0 };
2749 #define BUSY 1
2750 #define KICK 5
2751 #define HUNG 20
2752
2753         if (!i915.enable_hangcheck)
2754                 return;
2755
2756         for_each_ring(ring, dev_priv, i) {
2757                 u64 acthd;
2758                 u32 seqno;
2759                 bool busy = true;
2760
2761                 semaphore_clear_deadlocks(dev_priv);
2762
2763                 seqno = ring->get_seqno(ring, false);
2764                 acthd = intel_ring_get_active_head(ring);
2765
2766                 if (ring->hangcheck.seqno == seqno) {
2767                         if (ring_idle(ring, seqno)) {
2768                                 ring->hangcheck.action = HANGCHECK_IDLE;
2769
2770                                 if (waitqueue_active(&ring->irq_queue)) {
2771                                         /* Issue a wake-up to catch stuck h/w. */
2772                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2773                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2774                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2775                                                                   ring->name);
2776                                                 else
2777                                                         DRM_INFO("Fake missed irq on %s\n",
2778                                                                  ring->name);
2779                                                 wake_up_all(&ring->irq_queue);
2780                                         }
2781                                         /* Safeguard against driver failure */
2782                                         ring->hangcheck.score += BUSY;
2783                                 } else
2784                                         busy = false;
2785                         } else {
2786                                 /* We always increment the hangcheck score
2787                                  * if the ring is busy and still processing
2788                                  * the same request, so that no single request
2789                                  * can run indefinitely (such as a chain of
2790                                  * batches). The only time we do not increment
2791                                  * the hangcheck score on this ring, if this
2792                                  * ring is in a legitimate wait for another
2793                                  * ring. In that case the waiting ring is a
2794                                  * victim and we want to be sure we catch the
2795                                  * right culprit. Then every time we do kick
2796                                  * the ring, add a small increment to the
2797                                  * score so that we can catch a batch that is
2798                                  * being repeatedly kicked and so responsible
2799                                  * for stalling the machine.
2800                                  */
2801                                 ring->hangcheck.action = ring_stuck(ring,
2802                                                                     acthd);
2803
2804                                 switch (ring->hangcheck.action) {
2805                                 case HANGCHECK_IDLE:
2806                                 case HANGCHECK_WAIT:
2807                                 case HANGCHECK_ACTIVE:
2808                                         break;
2809                                 case HANGCHECK_ACTIVE_LOOP:
2810                                         ring->hangcheck.score += BUSY;
2811                                         break;
2812                                 case HANGCHECK_KICK:
2813                                         ring->hangcheck.score += KICK;
2814                                         break;
2815                                 case HANGCHECK_HUNG:
2816                                         ring->hangcheck.score += HUNG;
2817                                         stuck[i] = true;
2818                                         break;
2819                                 }
2820                         }
2821                 } else {
2822                         ring->hangcheck.action = HANGCHECK_ACTIVE;
2823
2824                         /* Gradually reduce the count so that we catch DoS
2825                          * attempts across multiple batches.
2826                          */
2827                         if (ring->hangcheck.score > 0)
2828                                 ring->hangcheck.score--;
2829
2830                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2831                 }
2832
2833                 ring->hangcheck.seqno = seqno;
2834                 ring->hangcheck.acthd = acthd;
2835                 busy_count += busy;
2836         }
2837
2838         for_each_ring(ring, dev_priv, i) {
2839                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2840                         DRM_INFO("%s on %s\n",
2841                                  stuck[i] ? "stuck" : "no progress",
2842                                  ring->name);
2843                         rings_hung++;
2844                 }
2845         }
2846
2847         if (rings_hung)
2848                 return i915_handle_error(dev, true, "Ring hung");
2849
2850         if (busy_count)
2851                 /* Reset timer case chip hangs without another request
2852                  * being added */
2853                 i915_queue_hangcheck(dev);
2854 }
2855
2856 void i915_queue_hangcheck(struct drm_device *dev)
2857 {
2858         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
2859
2860         if (!i915.enable_hangcheck)
2861                 return;
2862
2863         /* Don't continually defer the hangcheck so that it is always run at
2864          * least once after work has been scheduled on any ring. Otherwise,
2865          * we will ignore a hung ring if a second ring is kept busy.
2866          */
2867
2868         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2869                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
2870 }
2871
2872 static void ibx_irq_reset(struct drm_device *dev)
2873 {
2874         struct drm_i915_private *dev_priv = dev->dev_private;
2875
2876         if (HAS_PCH_NOP(dev))
2877                 return;
2878
2879         GEN5_IRQ_RESET(SDE);
2880
2881         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2882                 I915_WRITE(SERR_INT, 0xffffffff);
2883 }
2884
2885 /*
2886  * SDEIER is also touched by the interrupt handler to work around missed PCH
2887  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2888  * instead we unconditionally enable all PCH interrupt sources here, but then
2889  * only unmask them as needed with SDEIMR.
2890  *
2891  * This function needs to be called before interrupts are enabled.
2892  */
2893 static void ibx_irq_pre_postinstall(struct drm_device *dev)
2894 {
2895         struct drm_i915_private *dev_priv = dev->dev_private;
2896
2897         if (HAS_PCH_NOP(dev))
2898                 return;
2899
2900         WARN_ON(I915_READ(SDEIER) != 0);
2901         I915_WRITE(SDEIER, 0xffffffff);
2902         POSTING_READ(SDEIER);
2903 }
2904
2905 static void gen5_gt_irq_reset(struct drm_device *dev)
2906 {
2907         struct drm_i915_private *dev_priv = dev->dev_private;
2908
2909         GEN5_IRQ_RESET(GT);
2910         if (INTEL_INFO(dev)->gen >= 6)
2911                 GEN5_IRQ_RESET(GEN6_PM);
2912 }
2913
2914 /* drm_dma.h hooks
2915 */
2916 static void ironlake_irq_reset(struct drm_device *dev)
2917 {
2918         struct drm_i915_private *dev_priv = dev->dev_private;
2919
2920         I915_WRITE(HWSTAM, 0xffffffff);
2921
2922         GEN5_IRQ_RESET(DE);
2923         if (IS_GEN7(dev))
2924                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2925
2926         gen5_gt_irq_reset(dev);
2927
2928         ibx_irq_reset(dev);
2929 }
2930
2931 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2932 {
2933         enum pipe pipe;
2934
2935         I915_WRITE(PORT_HOTPLUG_EN, 0);
2936         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2937
2938         for_each_pipe(dev_priv, pipe)
2939                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2940
2941         GEN5_IRQ_RESET(VLV_);
2942 }
2943
2944 static void valleyview_irq_preinstall(struct drm_device *dev)
2945 {
2946         struct drm_i915_private *dev_priv = dev->dev_private;
2947
2948         /* VLV magic */
2949         I915_WRITE(VLV_IMR, 0);
2950         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2951         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2952         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2953
2954         gen5_gt_irq_reset(dev);
2955
2956         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2957
2958         vlv_display_irq_reset(dev_priv);
2959 }
2960
2961 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2962 {
2963         GEN8_IRQ_RESET_NDX(GT, 0);
2964         GEN8_IRQ_RESET_NDX(GT, 1);
2965         GEN8_IRQ_RESET_NDX(GT, 2);
2966         GEN8_IRQ_RESET_NDX(GT, 3);
2967 }
2968
2969 static void gen8_irq_reset(struct drm_device *dev)
2970 {
2971         struct drm_i915_private *dev_priv = dev->dev_private;
2972         int pipe;
2973
2974         I915_WRITE(GEN8_MASTER_IRQ, 0);
2975         POSTING_READ(GEN8_MASTER_IRQ);
2976
2977         gen8_gt_irq_reset(dev_priv);
2978
2979         for_each_pipe(dev_priv, pipe)
2980                 if (intel_display_power_is_enabled(dev_priv,
2981                                                    POWER_DOMAIN_PIPE(pipe)))
2982                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2983
2984         GEN5_IRQ_RESET(GEN8_DE_PORT_);
2985         GEN5_IRQ_RESET(GEN8_DE_MISC_);
2986         GEN5_IRQ_RESET(GEN8_PCU_);
2987
2988         if (HAS_PCH_SPLIT(dev))
2989                 ibx_irq_reset(dev);
2990 }
2991
2992 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2993                                      unsigned int pipe_mask)
2994 {
2995         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2996
2997         spin_lock_irq(&dev_priv->irq_lock);
2998         if (pipe_mask & 1 << PIPE_A)
2999                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3000                                   dev_priv->de_irq_mask[PIPE_A],
3001                                   ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3002         if (pipe_mask & 1 << PIPE_B)
3003                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3004                                   dev_priv->de_irq_mask[PIPE_B],
3005                                   ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3006         if (pipe_mask & 1 << PIPE_C)
3007                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3008                                   dev_priv->de_irq_mask[PIPE_C],
3009                                   ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3010         spin_unlock_irq(&dev_priv->irq_lock);
3011 }
3012
3013 static void cherryview_irq_preinstall(struct drm_device *dev)
3014 {
3015         struct drm_i915_private *dev_priv = dev->dev_private;
3016
3017         I915_WRITE(GEN8_MASTER_IRQ, 0);
3018         POSTING_READ(GEN8_MASTER_IRQ);
3019
3020         gen8_gt_irq_reset(dev_priv);
3021
3022         GEN5_IRQ_RESET(GEN8_PCU_);
3023
3024         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3025
3026         vlv_display_irq_reset(dev_priv);
3027 }
3028
3029 static void ibx_hpd_irq_setup(struct drm_device *dev)
3030 {
3031         struct drm_i915_private *dev_priv = dev->dev_private;
3032         struct intel_encoder *intel_encoder;
3033         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3034
3035         if (HAS_PCH_IBX(dev)) {
3036                 hotplug_irqs = SDE_HOTPLUG_MASK;
3037                 for_each_intel_encoder(dev, intel_encoder)
3038                         if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3039                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3040         } else if (HAS_PCH_SPT(dev)) {
3041                 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3042                 for_each_intel_encoder(dev, intel_encoder)
3043                         if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3044                                 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
3045         } else {
3046                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3047                 for_each_intel_encoder(dev, intel_encoder)
3048                         if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3049                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3050         }
3051
3052         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3053
3054         /*
3055          * Enable digital hotplug on the PCH, and configure the DP short pulse
3056          * duration to 2ms (which is the minimum in the Display Port spec)
3057          *
3058          * This register is the same on all known PCH chips.
3059          */
3060         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3061         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3062         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3063         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3064         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3065         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3066
3067         /* enable SPT PORTE hot plug */
3068         if (HAS_PCH_SPT(dev)) {
3069                 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3070                 hotplug |= PORTE_HOTPLUG_ENABLE;
3071                 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3072         }
3073 }
3074
3075 static void bxt_hpd_irq_setup(struct drm_device *dev)
3076 {
3077         struct drm_i915_private *dev_priv = dev->dev_private;
3078         struct intel_encoder *intel_encoder;
3079         u32 hotplug_port = 0;
3080         u32 hotplug_ctrl;
3081
3082         /* Now, enable HPD */
3083         for_each_intel_encoder(dev, intel_encoder) {
3084                 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
3085                                 == HPD_ENABLED)
3086                         hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
3087         }
3088
3089         /* Mask all HPD control bits */
3090         hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3091
3092         /* Enable requested port in hotplug control */
3093         /* TODO: implement (short) HPD support on port A */
3094         WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
3095         if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3096                 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3097         if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3098                 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3099         I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3100
3101         /* Unmask DDI hotplug in IMR */
3102         hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3103         I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3104
3105         /* Enable DDI hotplug in IER */
3106         hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3107         I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3108         POSTING_READ(GEN8_DE_PORT_IER);
3109 }
3110
3111 static void ibx_irq_postinstall(struct drm_device *dev)
3112 {
3113         struct drm_i915_private *dev_priv = dev->dev_private;
3114         u32 mask;
3115
3116         if (HAS_PCH_NOP(dev))
3117                 return;
3118
3119         if (HAS_PCH_IBX(dev))
3120                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3121         else
3122                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3123
3124         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3125         I915_WRITE(SDEIMR, ~mask);
3126 }
3127
3128 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3129 {
3130         struct drm_i915_private *dev_priv = dev->dev_private;
3131         u32 pm_irqs, gt_irqs;
3132
3133         pm_irqs = gt_irqs = 0;
3134
3135         dev_priv->gt_irq_mask = ~0;
3136         if (HAS_L3_DPF(dev)) {
3137                 /* L3 parity interrupt is always unmasked. */
3138                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3139                 gt_irqs |= GT_PARITY_ERROR(dev);
3140         }
3141
3142         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3143         if (IS_GEN5(dev)) {
3144                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3145                            ILK_BSD_USER_INTERRUPT;
3146         } else {
3147                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3148         }
3149
3150         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3151
3152         if (INTEL_INFO(dev)->gen >= 6) {
3153                 /*
3154                  * RPS interrupts will get enabled/disabled on demand when RPS
3155                  * itself is enabled/disabled.
3156                  */
3157                 if (HAS_VEBOX(dev))
3158                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3159
3160                 dev_priv->pm_irq_mask = 0xffffffff;
3161                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3162         }
3163 }
3164
3165 static int ironlake_irq_postinstall(struct drm_device *dev)
3166 {
3167         struct drm_i915_private *dev_priv = dev->dev_private;
3168         u32 display_mask, extra_mask;
3169
3170         if (INTEL_INFO(dev)->gen >= 7) {
3171                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3172                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3173                                 DE_PLANEB_FLIP_DONE_IVB |
3174                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3175                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3176                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3177         } else {
3178                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3179                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3180                                 DE_AUX_CHANNEL_A |
3181                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3182                                 DE_POISON);
3183                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3184                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3185         }
3186
3187         dev_priv->irq_mask = ~display_mask;
3188
3189         I915_WRITE(HWSTAM, 0xeffe);
3190
3191         ibx_irq_pre_postinstall(dev);
3192
3193         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3194
3195         gen5_gt_irq_postinstall(dev);
3196
3197         ibx_irq_postinstall(dev);
3198
3199         if (IS_IRONLAKE_M(dev)) {
3200                 /* Enable PCU event interrupts
3201                  *
3202                  * spinlocking not required here for correctness since interrupt
3203                  * setup is guaranteed to run in single-threaded context. But we
3204                  * need it to make the assert_spin_locked happy. */
3205                 spin_lock_irq(&dev_priv->irq_lock);
3206                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3207                 spin_unlock_irq(&dev_priv->irq_lock);
3208         }
3209
3210         return 0;
3211 }
3212
3213 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3214 {
3215         u32 pipestat_mask;
3216         u32 iir_mask;
3217         enum pipe pipe;
3218
3219         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3220                         PIPE_FIFO_UNDERRUN_STATUS;
3221
3222         for_each_pipe(dev_priv, pipe)
3223                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3224         POSTING_READ(PIPESTAT(PIPE_A));
3225
3226         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3227                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3228
3229         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3230         for_each_pipe(dev_priv, pipe)
3231                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3232
3233         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3234                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3235                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3236         if (IS_CHERRYVIEW(dev_priv))
3237                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3238         dev_priv->irq_mask &= ~iir_mask;
3239
3240         I915_WRITE(VLV_IIR, iir_mask);
3241         I915_WRITE(VLV_IIR, iir_mask);
3242         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3243         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3244         POSTING_READ(VLV_IMR);
3245 }
3246
3247 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3248 {
3249         u32 pipestat_mask;
3250         u32 iir_mask;
3251         enum pipe pipe;
3252
3253         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3254                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3255                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3256         if (IS_CHERRYVIEW(dev_priv))
3257                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3258
3259         dev_priv->irq_mask |= iir_mask;
3260         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3261         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3262         I915_WRITE(VLV_IIR, iir_mask);
3263         I915_WRITE(VLV_IIR, iir_mask);
3264         POSTING_READ(VLV_IIR);
3265
3266         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3267                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3268
3269         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3270         for_each_pipe(dev_priv, pipe)
3271                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3272
3273         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3274                         PIPE_FIFO_UNDERRUN_STATUS;
3275
3276         for_each_pipe(dev_priv, pipe)
3277                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3278         POSTING_READ(PIPESTAT(PIPE_A));
3279 }
3280
3281 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3282 {
3283         assert_spin_locked(&dev_priv->irq_lock);
3284
3285         if (dev_priv->display_irqs_enabled)
3286                 return;
3287
3288         dev_priv->display_irqs_enabled = true;
3289
3290         if (intel_irqs_enabled(dev_priv))
3291                 valleyview_display_irqs_install(dev_priv);
3292 }
3293
3294 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3295 {
3296         assert_spin_locked(&dev_priv->irq_lock);
3297
3298         if (!dev_priv->display_irqs_enabled)
3299                 return;
3300
3301         dev_priv->display_irqs_enabled = false;
3302
3303         if (intel_irqs_enabled(dev_priv))
3304                 valleyview_display_irqs_uninstall(dev_priv);
3305 }
3306
3307 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3308 {
3309         dev_priv->irq_mask = ~0;
3310
3311         I915_WRITE(PORT_HOTPLUG_EN, 0);
3312         POSTING_READ(PORT_HOTPLUG_EN);
3313
3314         I915_WRITE(VLV_IIR, 0xffffffff);
3315         I915_WRITE(VLV_IIR, 0xffffffff);
3316         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3317         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3318         POSTING_READ(VLV_IMR);
3319
3320         /* Interrupt setup is already guaranteed to be single-threaded, this is
3321          * just to make the assert_spin_locked check happy. */
3322         spin_lock_irq(&dev_priv->irq_lock);
3323         if (dev_priv->display_irqs_enabled)
3324                 valleyview_display_irqs_install(dev_priv);
3325         spin_unlock_irq(&dev_priv->irq_lock);
3326 }
3327
3328 static int valleyview_irq_postinstall(struct drm_device *dev)
3329 {
3330         struct drm_i915_private *dev_priv = dev->dev_private;
3331
3332         vlv_display_irq_postinstall(dev_priv);
3333
3334         gen5_gt_irq_postinstall(dev);
3335
3336         /* ack & enable invalid PTE error interrupts */
3337 #if 0 /* FIXME: add support to irq handler for checking these bits */
3338         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3339         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3340 #endif
3341
3342         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3343
3344         return 0;
3345 }
3346
3347 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3348 {
3349         /* These are interrupts we'll toggle with the ring mask register */
3350         uint32_t gt_interrupts[] = {
3351                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3352                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3353                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3354                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3355                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3356                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3357                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3358                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3359                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3360                 0,
3361                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3362                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3363                 };
3364
3365         dev_priv->pm_irq_mask = 0xffffffff;
3366         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3367         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3368         /*
3369          * RPS interrupts will get enabled/disabled on demand when RPS itself
3370          * is enabled/disabled.
3371          */
3372         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3373         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3374 }
3375
3376 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3377 {
3378         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3379         uint32_t de_pipe_enables;
3380         int pipe;
3381         u32 de_port_en = GEN8_AUX_CHANNEL_A;
3382
3383         if (IS_GEN9(dev_priv)) {
3384                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3385                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3386                 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3387                         GEN9_AUX_CHANNEL_D;
3388
3389                 if (IS_BROXTON(dev_priv))
3390                         de_port_en |= BXT_DE_PORT_GMBUS;
3391         } else
3392                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3393                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3394
3395         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3396                                            GEN8_PIPE_FIFO_UNDERRUN;
3397
3398         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3399         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3400         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3401
3402         for_each_pipe(dev_priv, pipe)
3403                 if (intel_display_power_is_enabled(dev_priv,
3404                                 POWER_DOMAIN_PIPE(pipe)))
3405                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3406                                           dev_priv->de_irq_mask[pipe],
3407                                           de_pipe_enables);
3408
3409         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
3410 }
3411
3412 static int gen8_irq_postinstall(struct drm_device *dev)
3413 {
3414         struct drm_i915_private *dev_priv = dev->dev_private;
3415
3416         if (HAS_PCH_SPLIT(dev))
3417                 ibx_irq_pre_postinstall(dev);
3418
3419         gen8_gt_irq_postinstall(dev_priv);
3420         gen8_de_irq_postinstall(dev_priv);
3421
3422         if (HAS_PCH_SPLIT(dev))
3423                 ibx_irq_postinstall(dev);
3424
3425         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3426         POSTING_READ(GEN8_MASTER_IRQ);
3427
3428         return 0;
3429 }
3430
3431 static int cherryview_irq_postinstall(struct drm_device *dev)
3432 {
3433         struct drm_i915_private *dev_priv = dev->dev_private;
3434
3435         vlv_display_irq_postinstall(dev_priv);
3436
3437         gen8_gt_irq_postinstall(dev_priv);
3438
3439         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3440         POSTING_READ(GEN8_MASTER_IRQ);
3441
3442         return 0;
3443 }
3444
3445 static void gen8_irq_uninstall(struct drm_device *dev)
3446 {
3447         struct drm_i915_private *dev_priv = dev->dev_private;
3448
3449         if (!dev_priv)
3450                 return;
3451
3452         gen8_irq_reset(dev);
3453 }
3454
3455 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3456 {
3457         /* Interrupt setup is already guaranteed to be single-threaded, this is
3458          * just to make the assert_spin_locked check happy. */
3459         spin_lock_irq(&dev_priv->irq_lock);
3460         if (dev_priv->display_irqs_enabled)
3461                 valleyview_display_irqs_uninstall(dev_priv);
3462         spin_unlock_irq(&dev_priv->irq_lock);
3463
3464         vlv_display_irq_reset(dev_priv);
3465
3466         dev_priv->irq_mask = ~0;
3467 }
3468
3469 static void valleyview_irq_uninstall(struct drm_device *dev)
3470 {
3471         struct drm_i915_private *dev_priv = dev->dev_private;
3472
3473         if (!dev_priv)
3474                 return;
3475
3476         I915_WRITE(VLV_MASTER_IER, 0);
3477
3478         gen5_gt_irq_reset(dev);
3479
3480         I915_WRITE(HWSTAM, 0xffffffff);
3481
3482         vlv_display_irq_uninstall(dev_priv);
3483 }
3484
3485 static void cherryview_irq_uninstall(struct drm_device *dev)
3486 {
3487         struct drm_i915_private *dev_priv = dev->dev_private;
3488
3489         if (!dev_priv)
3490                 return;
3491
3492         I915_WRITE(GEN8_MASTER_IRQ, 0);
3493         POSTING_READ(GEN8_MASTER_IRQ);
3494
3495         gen8_gt_irq_reset(dev_priv);
3496
3497         GEN5_IRQ_RESET(GEN8_PCU_);
3498
3499         vlv_display_irq_uninstall(dev_priv);
3500 }
3501
3502 static void ironlake_irq_uninstall(struct drm_device *dev)
3503 {
3504         struct drm_i915_private *dev_priv = dev->dev_private;
3505
3506         if (!dev_priv)
3507                 return;
3508
3509         ironlake_irq_reset(dev);
3510 }
3511
3512 static void i8xx_irq_preinstall(struct drm_device * dev)
3513 {
3514         struct drm_i915_private *dev_priv = dev->dev_private;
3515         int pipe;
3516
3517         for_each_pipe(dev_priv, pipe)
3518                 I915_WRITE(PIPESTAT(pipe), 0);
3519         I915_WRITE16(IMR, 0xffff);
3520         I915_WRITE16(IER, 0x0);
3521         POSTING_READ16(IER);
3522 }
3523
3524 static int i8xx_irq_postinstall(struct drm_device *dev)
3525 {
3526         struct drm_i915_private *dev_priv = dev->dev_private;
3527
3528         I915_WRITE16(EMR,
3529                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3530
3531         /* Unmask the interrupts that we always want on. */
3532         dev_priv->irq_mask =
3533                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3534                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3535                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3536                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3537         I915_WRITE16(IMR, dev_priv->irq_mask);
3538
3539         I915_WRITE16(IER,
3540                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3541                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3542                      I915_USER_INTERRUPT);
3543         POSTING_READ16(IER);
3544
3545         /* Interrupt setup is already guaranteed to be single-threaded, this is
3546          * just to make the assert_spin_locked check happy. */
3547         spin_lock_irq(&dev_priv->irq_lock);
3548         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3549         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3550         spin_unlock_irq(&dev_priv->irq_lock);
3551
3552         return 0;
3553 }
3554
3555 /*
3556  * Returns true when a page flip has completed.
3557  */
3558 static bool i8xx_handle_vblank(struct drm_device *dev,
3559                                int plane, int pipe, u32 iir)
3560 {
3561         struct drm_i915_private *dev_priv = dev->dev_private;
3562         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3563
3564         if (!intel_pipe_handle_vblank(dev, pipe))
3565                 return false;
3566
3567         if ((iir & flip_pending) == 0)
3568                 goto check_page_flip;
3569
3570         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3571          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3572          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3573          * the flip is completed (no longer pending). Since this doesn't raise
3574          * an interrupt per se, we watch for the change at vblank.
3575          */
3576         if (I915_READ16(ISR) & flip_pending)
3577                 goto check_page_flip;
3578
3579         intel_prepare_page_flip(dev, plane);
3580         intel_finish_page_flip(dev, pipe);
3581         return true;
3582
3583 check_page_flip:
3584         intel_check_page_flip(dev, pipe);
3585         return false;
3586 }
3587
3588 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3589 {
3590         struct drm_device *dev = arg;
3591         struct drm_i915_private *dev_priv = dev->dev_private;
3592         u16 iir, new_iir;
3593         u32 pipe_stats[2];
3594         int pipe;
3595         u16 flip_mask =
3596                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3597                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3598
3599         if (!intel_irqs_enabled(dev_priv))
3600                 return IRQ_NONE;
3601
3602         iir = I915_READ16(IIR);
3603         if (iir == 0)
3604                 return IRQ_NONE;
3605
3606         while (iir & ~flip_mask) {
3607                 /* Can't rely on pipestat interrupt bit in iir as it might
3608                  * have been cleared after the pipestat interrupt was received.
3609                  * It doesn't set the bit in iir again, but it still produces
3610                  * interrupts (for non-MSI).
3611                  */
3612                 spin_lock(&dev_priv->irq_lock);
3613                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3614                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3615
3616                 for_each_pipe(dev_priv, pipe) {
3617                         int reg = PIPESTAT(pipe);
3618                         pipe_stats[pipe] = I915_READ(reg);
3619
3620                         /*
3621                          * Clear the PIPE*STAT regs before the IIR
3622                          */
3623                         if (pipe_stats[pipe] & 0x8000ffff)
3624                                 I915_WRITE(reg, pipe_stats[pipe]);
3625                 }
3626                 spin_unlock(&dev_priv->irq_lock);
3627
3628                 I915_WRITE16(IIR, iir & ~flip_mask);
3629                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3630
3631                 if (iir & I915_USER_INTERRUPT)
3632                         notify_ring(&dev_priv->ring[RCS]);
3633
3634                 for_each_pipe(dev_priv, pipe) {
3635                         int plane = pipe;
3636                         if (HAS_FBC(dev))
3637                                 plane = !plane;
3638
3639                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3640                             i8xx_handle_vblank(dev, plane, pipe, iir))
3641                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3642
3643                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3644                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3645
3646                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3647                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3648                                                                     pipe);
3649                 }
3650
3651                 iir = new_iir;
3652         }
3653
3654         return IRQ_HANDLED;
3655 }
3656
3657 static void i8xx_irq_uninstall(struct drm_device * dev)
3658 {
3659         struct drm_i915_private *dev_priv = dev->dev_private;
3660         int pipe;
3661
3662         for_each_pipe(dev_priv, pipe) {
3663                 /* Clear enable bits; then clear status bits */
3664                 I915_WRITE(PIPESTAT(pipe), 0);
3665                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3666         }
3667         I915_WRITE16(IMR, 0xffff);
3668         I915_WRITE16(IER, 0x0);
3669         I915_WRITE16(IIR, I915_READ16(IIR));
3670 }
3671
3672 static void i915_irq_preinstall(struct drm_device * dev)
3673 {
3674         struct drm_i915_private *dev_priv = dev->dev_private;
3675         int pipe;
3676
3677         if (I915_HAS_HOTPLUG(dev)) {
3678                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3679                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3680         }
3681
3682         I915_WRITE16(HWSTAM, 0xeffe);
3683         for_each_pipe(dev_priv, pipe)
3684                 I915_WRITE(PIPESTAT(pipe), 0);
3685         I915_WRITE(IMR, 0xffffffff);
3686         I915_WRITE(IER, 0x0);
3687         POSTING_READ(IER);
3688 }
3689
3690 static int i915_irq_postinstall(struct drm_device *dev)
3691 {
3692         struct drm_i915_private *dev_priv = dev->dev_private;
3693         u32 enable_mask;
3694
3695         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3696
3697         /* Unmask the interrupts that we always want on. */
3698         dev_priv->irq_mask =
3699                 ~(I915_ASLE_INTERRUPT |
3700                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3701                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3702                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3703                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3704
3705         enable_mask =
3706                 I915_ASLE_INTERRUPT |
3707                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3708                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3709                 I915_USER_INTERRUPT;
3710
3711         if (I915_HAS_HOTPLUG(dev)) {
3712                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3713                 POSTING_READ(PORT_HOTPLUG_EN);
3714
3715                 /* Enable in IER... */
3716                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3717                 /* and unmask in IMR */
3718                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3719         }
3720
3721         I915_WRITE(IMR, dev_priv->irq_mask);
3722         I915_WRITE(IER, enable_mask);
3723         POSTING_READ(IER);
3724
3725         i915_enable_asle_pipestat(dev);
3726
3727         /* Interrupt setup is already guaranteed to be single-threaded, this is
3728          * just to make the assert_spin_locked check happy. */
3729         spin_lock_irq(&dev_priv->irq_lock);
3730         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3731         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3732         spin_unlock_irq(&dev_priv->irq_lock);
3733
3734         return 0;
3735 }
3736
3737 /*
3738  * Returns true when a page flip has completed.
3739  */
3740 static bool i915_handle_vblank(struct drm_device *dev,
3741                                int plane, int pipe, u32 iir)
3742 {
3743         struct drm_i915_private *dev_priv = dev->dev_private;
3744         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3745
3746         if (!intel_pipe_handle_vblank(dev, pipe))
3747                 return false;
3748
3749         if ((iir & flip_pending) == 0)
3750                 goto check_page_flip;
3751
3752         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3753          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3754          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3755          * the flip is completed (no longer pending). Since this doesn't raise
3756          * an interrupt per se, we watch for the change at vblank.
3757          */
3758         if (I915_READ(ISR) & flip_pending)
3759                 goto check_page_flip;
3760
3761         intel_prepare_page_flip(dev, plane);
3762         intel_finish_page_flip(dev, pipe);
3763         return true;
3764
3765 check_page_flip:
3766         intel_check_page_flip(dev, pipe);
3767         return false;
3768 }
3769
3770 static irqreturn_t i915_irq_handler(int irq, void *arg)
3771 {
3772         struct drm_device *dev = arg;
3773         struct drm_i915_private *dev_priv = dev->dev_private;
3774         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3775         u32 flip_mask =
3776                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3777                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3778         int pipe, ret = IRQ_NONE;
3779
3780         if (!intel_irqs_enabled(dev_priv))
3781                 return IRQ_NONE;
3782
3783         iir = I915_READ(IIR);
3784         do {
3785                 bool irq_received = (iir & ~flip_mask) != 0;
3786                 bool blc_event = false;
3787
3788                 /* Can't rely on pipestat interrupt bit in iir as it might
3789                  * have been cleared after the pipestat interrupt was received.
3790                  * It doesn't set the bit in iir again, but it still produces
3791                  * interrupts (for non-MSI).
3792                  */
3793                 spin_lock(&dev_priv->irq_lock);
3794                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3795                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3796
3797                 for_each_pipe(dev_priv, pipe) {
3798                         int reg = PIPESTAT(pipe);
3799                         pipe_stats[pipe] = I915_READ(reg);
3800
3801                         /* Clear the PIPE*STAT regs before the IIR */
3802                         if (pipe_stats[pipe] & 0x8000ffff) {
3803                                 I915_WRITE(reg, pipe_stats[pipe]);
3804                                 irq_received = true;
3805                         }
3806                 }
3807                 spin_unlock(&dev_priv->irq_lock);
3808
3809                 if (!irq_received)
3810                         break;
3811
3812                 /* Consume port.  Then clear IIR or we'll miss events */
3813                 if (I915_HAS_HOTPLUG(dev) &&
3814                     iir & I915_DISPLAY_PORT_INTERRUPT)
3815                         i9xx_hpd_irq_handler(dev);
3816
3817                 I915_WRITE(IIR, iir & ~flip_mask);
3818                 new_iir = I915_READ(IIR); /* Flush posted writes */
3819
3820                 if (iir & I915_USER_INTERRUPT)
3821                         notify_ring(&dev_priv->ring[RCS]);
3822
3823                 for_each_pipe(dev_priv, pipe) {
3824                         int plane = pipe;
3825                         if (HAS_FBC(dev))
3826                                 plane = !plane;
3827
3828                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3829                             i915_handle_vblank(dev, plane, pipe, iir))
3830                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3831
3832                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3833                                 blc_event = true;
3834
3835                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3836                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3837
3838                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3839                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3840                                                                     pipe);
3841                 }
3842
3843                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3844                         intel_opregion_asle_intr(dev);
3845
3846                 /* With MSI, interrupts are only generated when iir
3847                  * transitions from zero to nonzero.  If another bit got
3848                  * set while we were handling the existing iir bits, then
3849                  * we would never get another interrupt.
3850                  *
3851                  * This is fine on non-MSI as well, as if we hit this path
3852                  * we avoid exiting the interrupt handler only to generate
3853                  * another one.
3854                  *
3855                  * Note that for MSI this could cause a stray interrupt report
3856                  * if an interrupt landed in the time between writing IIR and
3857                  * the posting read.  This should be rare enough to never
3858                  * trigger the 99% of 100,000 interrupts test for disabling
3859                  * stray interrupts.
3860                  */
3861                 ret = IRQ_HANDLED;
3862                 iir = new_iir;
3863         } while (iir & ~flip_mask);
3864
3865         return ret;
3866 }
3867
3868 static void i915_irq_uninstall(struct drm_device * dev)
3869 {
3870         struct drm_i915_private *dev_priv = dev->dev_private;
3871         int pipe;
3872
3873         if (I915_HAS_HOTPLUG(dev)) {
3874                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3875                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3876         }
3877
3878         I915_WRITE16(HWSTAM, 0xffff);
3879         for_each_pipe(dev_priv, pipe) {
3880                 /* Clear enable bits; then clear status bits */
3881                 I915_WRITE(PIPESTAT(pipe), 0);
3882                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3883         }
3884         I915_WRITE(IMR, 0xffffffff);
3885         I915_WRITE(IER, 0x0);
3886
3887         I915_WRITE(IIR, I915_READ(IIR));
3888 }
3889
3890 static void i965_irq_preinstall(struct drm_device * dev)
3891 {
3892         struct drm_i915_private *dev_priv = dev->dev_private;
3893         int pipe;
3894
3895         I915_WRITE(PORT_HOTPLUG_EN, 0);
3896         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3897
3898         I915_WRITE(HWSTAM, 0xeffe);
3899         for_each_pipe(dev_priv, pipe)
3900                 I915_WRITE(PIPESTAT(pipe), 0);
3901         I915_WRITE(IMR, 0xffffffff);
3902         I915_WRITE(IER, 0x0);
3903         POSTING_READ(IER);
3904 }
3905
3906 static int i965_irq_postinstall(struct drm_device *dev)
3907 {
3908         struct drm_i915_private *dev_priv = dev->dev_private;
3909         u32 enable_mask;
3910         u32 error_mask;
3911
3912         /* Unmask the interrupts that we always want on. */
3913         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3914                                I915_DISPLAY_PORT_INTERRUPT |
3915                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3916                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3917                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3918                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3919                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3920
3921         enable_mask = ~dev_priv->irq_mask;
3922         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3923                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3924         enable_mask |= I915_USER_INTERRUPT;
3925
3926         if (IS_G4X(dev))
3927                 enable_mask |= I915_BSD_USER_INTERRUPT;
3928
3929         /* Interrupt setup is already guaranteed to be single-threaded, this is
3930          * just to make the assert_spin_locked check happy. */
3931         spin_lock_irq(&dev_priv->irq_lock);
3932         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3933         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3934         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3935         spin_unlock_irq(&dev_priv->irq_lock);
3936
3937         /*
3938          * Enable some error detection, note the instruction error mask
3939          * bit is reserved, so we leave it masked.
3940          */
3941         if (IS_G4X(dev)) {
3942                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3943                                GM45_ERROR_MEM_PRIV |
3944                                GM45_ERROR_CP_PRIV |
3945                                I915_ERROR_MEMORY_REFRESH);
3946         } else {
3947                 error_mask = ~(I915_ERROR_PAGE_TABLE |
3948                                I915_ERROR_MEMORY_REFRESH);
3949         }
3950         I915_WRITE(EMR, error_mask);
3951
3952         I915_WRITE(IMR, dev_priv->irq_mask);
3953         I915_WRITE(IER, enable_mask);
3954         POSTING_READ(IER);
3955
3956         I915_WRITE(PORT_HOTPLUG_EN, 0);
3957         POSTING_READ(PORT_HOTPLUG_EN);
3958
3959         i915_enable_asle_pipestat(dev);
3960
3961         return 0;
3962 }
3963
3964 static void i915_hpd_irq_setup(struct drm_device *dev)
3965 {
3966         struct drm_i915_private *dev_priv = dev->dev_private;
3967         struct intel_encoder *intel_encoder;
3968         u32 hotplug_en;
3969
3970         assert_spin_locked(&dev_priv->irq_lock);
3971
3972         hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3973         hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3974         /* Note HDMI and DP share hotplug bits */
3975         /* enable bits are the same for all generations */
3976         for_each_intel_encoder(dev, intel_encoder)
3977                 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3978                         hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3979         /* Programming the CRT detection parameters tends
3980            to generate a spurious hotplug event about three
3981            seconds later.  So just do it once.
3982         */
3983         if (IS_G4X(dev))
3984                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3985         hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3986         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3987
3988         /* Ignore TV since it's buggy */
3989         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3990 }
3991
3992 static irqreturn_t i965_irq_handler(int irq, void *arg)
3993 {
3994         struct drm_device *dev = arg;
3995         struct drm_i915_private *dev_priv = dev->dev_private;
3996         u32 iir, new_iir;
3997         u32 pipe_stats[I915_MAX_PIPES];
3998         int ret = IRQ_NONE, pipe;
3999         u32 flip_mask =
4000                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4001                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4002
4003         if (!intel_irqs_enabled(dev_priv))
4004                 return IRQ_NONE;
4005
4006         iir = I915_READ(IIR);
4007
4008         for (;;) {
4009                 bool irq_received = (iir & ~flip_mask) != 0;
4010                 bool blc_event = false;
4011
4012                 /* Can't rely on pipestat interrupt bit in iir as it might
4013                  * have been cleared after the pipestat interrupt was received.
4014                  * It doesn't set the bit in iir again, but it still produces
4015                  * interrupts (for non-MSI).
4016                  */
4017                 spin_lock(&dev_priv->irq_lock);
4018                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4019                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4020
4021                 for_each_pipe(dev_priv, pipe) {
4022                         int reg = PIPESTAT(pipe);
4023                         pipe_stats[pipe] = I915_READ(reg);
4024
4025                         /*
4026                          * Clear the PIPE*STAT regs before the IIR
4027                          */
4028                         if (pipe_stats[pipe] & 0x8000ffff) {
4029                                 I915_WRITE(reg, pipe_stats[pipe]);
4030                                 irq_received = true;
4031                         }
4032                 }
4033                 spin_unlock(&dev_priv->irq_lock);
4034
4035                 if (!irq_received)
4036                         break;
4037
4038                 ret = IRQ_HANDLED;
4039
4040                 /* Consume port.  Then clear IIR or we'll miss events */
4041                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4042                         i9xx_hpd_irq_handler(dev);
4043
4044                 I915_WRITE(IIR, iir & ~flip_mask);
4045                 new_iir = I915_READ(IIR); /* Flush posted writes */
4046
4047                 if (iir & I915_USER_INTERRUPT)
4048                         notify_ring(&dev_priv->ring[RCS]);
4049                 if (iir & I915_BSD_USER_INTERRUPT)
4050                         notify_ring(&dev_priv->ring[VCS]);
4051
4052                 for_each_pipe(dev_priv, pipe) {
4053                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4054                             i915_handle_vblank(dev, pipe, pipe, iir))
4055                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4056
4057                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4058                                 blc_event = true;
4059
4060                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4061                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4062
4063                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4064                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4065                 }
4066
4067                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4068                         intel_opregion_asle_intr(dev);
4069
4070                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4071                         gmbus_irq_handler(dev);
4072
4073                 /* With MSI, interrupts are only generated when iir
4074                  * transitions from zero to nonzero.  If another bit got
4075                  * set while we were handling the existing iir bits, then
4076                  * we would never get another interrupt.
4077                  *
4078                  * This is fine on non-MSI as well, as if we hit this path
4079                  * we avoid exiting the interrupt handler only to generate
4080                  * another one.
4081                  *
4082                  * Note that for MSI this could cause a stray interrupt report
4083                  * if an interrupt landed in the time between writing IIR and
4084                  * the posting read.  This should be rare enough to never
4085                  * trigger the 99% of 100,000 interrupts test for disabling
4086                  * stray interrupts.
4087                  */
4088                 iir = new_iir;
4089         }
4090
4091         return ret;
4092 }
4093
4094 static void i965_irq_uninstall(struct drm_device * dev)
4095 {
4096         struct drm_i915_private *dev_priv = dev->dev_private;
4097         int pipe;
4098
4099         if (!dev_priv)
4100                 return;
4101
4102         I915_WRITE(PORT_HOTPLUG_EN, 0);
4103         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4104
4105         I915_WRITE(HWSTAM, 0xffffffff);
4106         for_each_pipe(dev_priv, pipe)
4107                 I915_WRITE(PIPESTAT(pipe), 0);
4108         I915_WRITE(IMR, 0xffffffff);
4109         I915_WRITE(IER, 0x0);
4110
4111         for_each_pipe(dev_priv, pipe)
4112                 I915_WRITE(PIPESTAT(pipe),
4113                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4114         I915_WRITE(IIR, I915_READ(IIR));
4115 }
4116
4117 /**
4118  * intel_irq_init - initializes irq support
4119  * @dev_priv: i915 device instance
4120  *
4121  * This function initializes all the irq support including work items, timers
4122  * and all the vtables. It does not setup the interrupt itself though.
4123  */
4124 void intel_irq_init(struct drm_i915_private *dev_priv)
4125 {
4126         struct drm_device *dev = dev_priv->dev;
4127
4128         intel_hpd_init_work(dev_priv);
4129
4130         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4131         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4132
4133         /* Let's track the enabled rps events */
4134         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4135                 /* WaGsvRC0ResidencyMethod:vlv */
4136                 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4137         else
4138                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4139
4140         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4141                           i915_hangcheck_elapsed);
4142
4143         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4144
4145         if (IS_GEN2(dev_priv)) {
4146                 dev->max_vblank_count = 0;
4147                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4148         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4149                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4150                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4151         } else {
4152                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4153                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4154         }
4155
4156         /*
4157          * Opt out of the vblank disable timer on everything except gen2.
4158          * Gen2 doesn't have a hardware frame counter and so depends on
4159          * vblank interrupts to produce sane vblank seuquence numbers.
4160          */
4161         if (!IS_GEN2(dev_priv))
4162                 dev->vblank_disable_immediate = true;
4163
4164         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4165         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4166
4167         if (IS_CHERRYVIEW(dev_priv)) {
4168                 dev->driver->irq_handler = cherryview_irq_handler;
4169                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4170                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4171                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4172                 dev->driver->enable_vblank = valleyview_enable_vblank;
4173                 dev->driver->disable_vblank = valleyview_disable_vblank;
4174                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4175         } else if (IS_VALLEYVIEW(dev_priv)) {
4176                 dev->driver->irq_handler = valleyview_irq_handler;
4177                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4178                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4179                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4180                 dev->driver->enable_vblank = valleyview_enable_vblank;
4181                 dev->driver->disable_vblank = valleyview_disable_vblank;
4182                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4183         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4184                 dev->driver->irq_handler = gen8_irq_handler;
4185                 dev->driver->irq_preinstall = gen8_irq_reset;
4186                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4187                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4188                 dev->driver->enable_vblank = gen8_enable_vblank;
4189                 dev->driver->disable_vblank = gen8_disable_vblank;
4190                 if (HAS_PCH_SPLIT(dev))
4191                         dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4192                 else
4193                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4194         } else if (HAS_PCH_SPLIT(dev)) {
4195                 dev->driver->irq_handler = ironlake_irq_handler;
4196                 dev->driver->irq_preinstall = ironlake_irq_reset;
4197                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4198                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4199                 dev->driver->enable_vblank = ironlake_enable_vblank;
4200                 dev->driver->disable_vblank = ironlake_disable_vblank;
4201                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4202         } else {
4203                 if (INTEL_INFO(dev_priv)->gen == 2) {
4204                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4205                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4206                         dev->driver->irq_handler = i8xx_irq_handler;
4207                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4208                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4209                         dev->driver->irq_preinstall = i915_irq_preinstall;
4210                         dev->driver->irq_postinstall = i915_irq_postinstall;
4211                         dev->driver->irq_uninstall = i915_irq_uninstall;
4212                         dev->driver->irq_handler = i915_irq_handler;
4213                 } else {
4214                         dev->driver->irq_preinstall = i965_irq_preinstall;
4215                         dev->driver->irq_postinstall = i965_irq_postinstall;
4216                         dev->driver->irq_uninstall = i965_irq_uninstall;
4217                         dev->driver->irq_handler = i965_irq_handler;
4218                 }
4219                 if (I915_HAS_HOTPLUG(dev_priv))
4220                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4221                 dev->driver->enable_vblank = i915_enable_vblank;
4222                 dev->driver->disable_vblank = i915_disable_vblank;
4223         }
4224 }
4225
4226 /**
4227  * intel_irq_install - enables the hardware interrupt
4228  * @dev_priv: i915 device instance
4229  *
4230  * This function enables the hardware interrupt handling, but leaves the hotplug
4231  * handling still disabled. It is called after intel_irq_init().
4232  *
4233  * In the driver load and resume code we need working interrupts in a few places
4234  * but don't want to deal with the hassle of concurrent probe and hotplug
4235  * workers. Hence the split into this two-stage approach.
4236  */
4237 int intel_irq_install(struct drm_i915_private *dev_priv)
4238 {
4239         /*
4240          * We enable some interrupt sources in our postinstall hooks, so mark
4241          * interrupts as enabled _before_ actually enabling them to avoid
4242          * special cases in our ordering checks.
4243          */
4244         dev_priv->pm.irqs_enabled = true;
4245
4246         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4247 }
4248
4249 /**
4250  * intel_irq_uninstall - finilizes all irq handling
4251  * @dev_priv: i915 device instance
4252  *
4253  * This stops interrupt and hotplug handling and unregisters and frees all
4254  * resources acquired in the init functions.
4255  */
4256 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4257 {
4258         drm_irq_uninstall(dev_priv->dev);
4259         intel_hpd_cancel_work(dev_priv);
4260         dev_priv->pm.irqs_enabled = false;
4261 }
4262
4263 /**
4264  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4265  * @dev_priv: i915 device instance
4266  *
4267  * This function is used to disable interrupts at runtime, both in the runtime
4268  * pm and the system suspend/resume code.
4269  */
4270 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4271 {
4272         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4273         dev_priv->pm.irqs_enabled = false;
4274         synchronize_irq(dev_priv->dev->irq);
4275 }
4276
4277 /**
4278  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4279  * @dev_priv: i915 device instance
4280  *
4281  * This function is used to enable interrupts at runtime, both in the runtime
4282  * pm and the system suspend/resume code.
4283  */
4284 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4285 {
4286         dev_priv->pm.irqs_enabled = true;
4287         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4288         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4289 }