Merge tag 'drm-vc4-fixes-2016-09-14' of https://github.com/anholt/linux into drm...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
e4ce95aa
VS
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
23bb4cb5
VS
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
3a3b3c7d
VS
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
7c7e10db 60static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
7c7e10db 68static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
26951caf 76static const u32 hpd_spt[HPD_NUM_PINS] = {
74c0b395 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
26951caf
XZ
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
7c7e10db 84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
7c7e10db 93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
4bca26d0 102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
e0a20ad7
SS
111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7
SS
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
5c502442 118/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 119#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
f86f3fb0 129#define GEN5_IRQ_RESET(type) do { \
a9d356a6 130 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 131 POSTING_READ(type##IMR); \
a9d356a6 132 I915_WRITE(type##IER, 0); \
5c502442
PZ
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
a9d356a6
PZ
137} while (0)
138
337ba017
PZ
139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
f0f59a00
VS
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
b51a2842
VS
144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
f0f59a00 151 i915_mmio_reg_offset(reg), val);
b51a2842
VS
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
337ba017 157
35079899 158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
b51a2842 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
35079899 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
b51a2842 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
35079899 167 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
35079899
PZ
170} while (0)
171
c9a9a268
ID
172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
0706f17c
EE
174/* For display hotplug interrupt */
175static inline void
176i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179{
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189}
190
191/**
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
202 */
203void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206{
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210}
211
d9dc34f1
VS
212/**
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
217 */
fbdedaea
VS
218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
036a4a7d 221{
d9dc34f1
VS
222 uint32_t new_val;
223
4bc9d430
DV
224 assert_spin_locked(&dev_priv->irq_lock);
225
d9dc34f1
VS
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
9df7575f 228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 229 return;
c67a470b 230
d9dc34f1
VS
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
1ec14ad3 237 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 238 POSTING_READ(DEIMR);
036a4a7d
ZW
239 }
240}
241
43eaea13
PZ
242/**
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
247 */
248static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251{
252 assert_spin_locked(&dev_priv->irq_lock);
253
15a17aae
DV
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
9df7575f 256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 257 return;
c67a470b 258
43eaea13
PZ
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
43eaea13
PZ
262}
263
480c8033 264void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
265{
266 ilk_update_gt_irq(dev_priv, mask, mask);
31bb59cc 267 POSTING_READ_FW(GTIMR);
43eaea13
PZ
268}
269
480c8033 270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
271{
272 ilk_update_gt_irq(dev_priv, mask, 0);
273}
274
f0f59a00 275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
b900b949
ID
276{
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278}
279
f0f59a00 280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
a72fbc3a
ID
281{
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283}
284
f0f59a00 285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
b900b949
ID
286{
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288}
289
edbfdb45 290/**
81fd874e
VS
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
295 */
edbfdb45
PZ
296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299{
605cd25b 300 uint32_t new_val;
edbfdb45 301
15a17aae
DV
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
edbfdb45
PZ
304 assert_spin_locked(&dev_priv->irq_lock);
305
605cd25b 306 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
605cd25b
PZ
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 314 }
edbfdb45
PZ
315}
316
480c8033 317void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 318{
9939fba2
ID
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
edbfdb45
PZ
322 snb_update_pm_irq(dev_priv, mask, mask);
323}
324
9939fba2
ID
325static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
edbfdb45
PZ
327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
9939fba2
ID
331void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332{
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
337}
338
dc97997a 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
3cc134e3 340{
f0f59a00 341 i915_reg_t reg = gen6_pm_iir(dev_priv);
3cc134e3
ID
342
343 spin_lock_irq(&dev_priv->irq_lock);
344 I915_WRITE(reg, dev_priv->pm_rps_events);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 POSTING_READ(reg);
096fad9e 347 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
348 spin_unlock_irq(&dev_priv->irq_lock);
349}
350
91d14251 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
b900b949 352{
b900b949 353 spin_lock_irq(&dev_priv->irq_lock);
c33d247d
CW
354 WARN_ON_ONCE(dev_priv->rps.pm_iir);
355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 356 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
358 dev_priv->pm_rps_events);
b900b949 359 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 360
b900b949
ID
361 spin_unlock_irq(&dev_priv->irq_lock);
362}
363
59d02a1f
ID
364u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
365{
1800ad25 366 return (mask & ~dev_priv->rps.pm_intr_keep);
59d02a1f
ID
367}
368
91d14251 369void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
b900b949 370{
d4d70aa5
ID
371 spin_lock_irq(&dev_priv->irq_lock);
372 dev_priv->rps.interrupts_enabled = false;
9939fba2 373
59d02a1f 374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
375
376 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
377 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
378 ~dev_priv->pm_rps_events);
58072ccb
ID
379
380 spin_unlock_irq(&dev_priv->irq_lock);
91c8a326 381 synchronize_irq(dev_priv->drm.irq);
c33d247d
CW
382
383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
387 */
388 cancel_work_sync(&dev_priv->rps.work);
389 gen6_reset_rps_interrupts(dev_priv);
b900b949
ID
390}
391
3a3b3c7d 392/**
81fd874e
VS
393 * bdw_update_port_irq - update DE port interrupt
394 * @dev_priv: driver private
395 * @interrupt_mask: mask of interrupt bits to update
396 * @enabled_irq_mask: mask of interrupt bits to enable
397 */
3a3b3c7d
VS
398static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
399 uint32_t interrupt_mask,
400 uint32_t enabled_irq_mask)
401{
402 uint32_t new_val;
403 uint32_t old_val;
404
405 assert_spin_locked(&dev_priv->irq_lock);
406
407 WARN_ON(enabled_irq_mask & ~interrupt_mask);
408
409 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
410 return;
411
412 old_val = I915_READ(GEN8_DE_PORT_IMR);
413
414 new_val = old_val;
415 new_val &= ~interrupt_mask;
416 new_val |= (~enabled_irq_mask & interrupt_mask);
417
418 if (new_val != old_val) {
419 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
420 POSTING_READ(GEN8_DE_PORT_IMR);
421 }
422}
423
013d3752
VS
424/**
425 * bdw_update_pipe_irq - update DE pipe interrupt
426 * @dev_priv: driver private
427 * @pipe: pipe whose interrupt to update
428 * @interrupt_mask: mask of interrupt bits to update
429 * @enabled_irq_mask: mask of interrupt bits to enable
430 */
431void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
432 enum pipe pipe,
433 uint32_t interrupt_mask,
434 uint32_t enabled_irq_mask)
435{
436 uint32_t new_val;
437
438 assert_spin_locked(&dev_priv->irq_lock);
439
440 WARN_ON(enabled_irq_mask & ~interrupt_mask);
441
442 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
443 return;
444
445 new_val = dev_priv->de_irq_mask[pipe];
446 new_val &= ~interrupt_mask;
447 new_val |= (~enabled_irq_mask & interrupt_mask);
448
449 if (new_val != dev_priv->de_irq_mask[pipe]) {
450 dev_priv->de_irq_mask[pipe] = new_val;
451 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
452 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
453 }
454}
455
fee884ed
DV
456/**
457 * ibx_display_interrupt_update - update SDEIMR
458 * @dev_priv: driver private
459 * @interrupt_mask: mask of interrupt bits to update
460 * @enabled_irq_mask: mask of interrupt bits to enable
461 */
47339cd9
DV
462void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
463 uint32_t interrupt_mask,
464 uint32_t enabled_irq_mask)
fee884ed
DV
465{
466 uint32_t sdeimr = I915_READ(SDEIMR);
467 sdeimr &= ~interrupt_mask;
468 sdeimr |= (~enabled_irq_mask & interrupt_mask);
469
15a17aae
DV
470 WARN_ON(enabled_irq_mask & ~interrupt_mask);
471
fee884ed
DV
472 assert_spin_locked(&dev_priv->irq_lock);
473
9df7575f 474 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 475 return;
c67a470b 476
fee884ed
DV
477 I915_WRITE(SDEIMR, sdeimr);
478 POSTING_READ(SDEIMR);
479}
8664281b 480
b5ea642a 481static void
755e9019
ID
482__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
483 u32 enable_mask, u32 status_mask)
7c463586 484{
f0f59a00 485 i915_reg_t reg = PIPESTAT(pipe);
755e9019 486 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 487
b79480ba 488 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 489 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 490
04feced9
VS
491 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
492 status_mask & ~PIPESTAT_INT_STATUS_MASK,
493 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
494 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
495 return;
496
497 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
498 return;
499
91d181dd
ID
500 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
501
46c06a30 502 /* Enable the interrupt, clear any pending status */
755e9019 503 pipestat |= enable_mask | status_mask;
46c06a30
VS
504 I915_WRITE(reg, pipestat);
505 POSTING_READ(reg);
7c463586
KP
506}
507
b5ea642a 508static void
755e9019
ID
509__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
510 u32 enable_mask, u32 status_mask)
7c463586 511{
f0f59a00 512 i915_reg_t reg = PIPESTAT(pipe);
755e9019 513 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 514
b79480ba 515 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 516 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 517
04feced9
VS
518 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
519 status_mask & ~PIPESTAT_INT_STATUS_MASK,
520 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
521 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
522 return;
523
755e9019
ID
524 if ((pipestat & enable_mask) == 0)
525 return;
526
91d181dd
ID
527 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
528
755e9019 529 pipestat &= ~enable_mask;
46c06a30
VS
530 I915_WRITE(reg, pipestat);
531 POSTING_READ(reg);
7c463586
KP
532}
533
10c59c51
ID
534static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
535{
536 u32 enable_mask = status_mask << 16;
537
538 /*
724a6905
VS
539 * On pipe A we don't support the PSR interrupt yet,
540 * on pipe B and C the same bit MBZ.
10c59c51
ID
541 */
542 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
543 return 0;
724a6905
VS
544 /*
545 * On pipe B and C we don't support the PSR interrupt yet, on pipe
546 * A the same bit is for perf counters which we don't use either.
547 */
548 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
549 return 0;
10c59c51
ID
550
551 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
552 SPRITE0_FLIP_DONE_INT_EN_VLV |
553 SPRITE1_FLIP_DONE_INT_EN_VLV);
554 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
555 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
556 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
557 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
558
559 return enable_mask;
560}
561
755e9019
ID
562void
563i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
564 u32 status_mask)
565{
566 u32 enable_mask;
567
666a4537 568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
91c8a326 569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
10c59c51
ID
570 status_mask);
571 else
572 enable_mask = status_mask << 16;
755e9019
ID
573 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
574}
575
576void
577i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
578 u32 status_mask)
579{
580 u32 enable_mask;
581
666a4537 582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
91c8a326 583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
10c59c51
ID
584 status_mask);
585 else
586 enable_mask = status_mask << 16;
755e9019
ID
587 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
588}
589
01c66889 590/**
f49e38dd 591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
14bb2c11 592 * @dev_priv: i915 device private
01c66889 593 */
91d14251 594static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
01c66889 595{
91d14251 596 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
f49e38dd
JN
597 return;
598
13321786 599 spin_lock_irq(&dev_priv->irq_lock);
01c66889 600
755e9019 601 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
91d14251 602 if (INTEL_GEN(dev_priv) >= 4)
3b6c42e8 603 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 604 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 605
13321786 606 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
607}
608
f75f3746
VS
609/*
610 * This timing diagram depicts the video signal in and
611 * around the vertical blanking period.
612 *
613 * Assumptions about the fictitious mode used in this example:
614 * vblank_start >= 3
615 * vsync_start = vblank_start + 1
616 * vsync_end = vblank_start + 2
617 * vtotal = vblank_start + 3
618 *
619 * start of vblank:
620 * latch double buffered registers
621 * increment frame counter (ctg+)
622 * generate start of vblank interrupt (gen4+)
623 * |
624 * | frame start:
625 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
626 * | may be shifted forward 1-3 extra lines via PIPECONF
627 * | |
628 * | | start of vsync:
629 * | | generate vsync interrupt
630 * | | |
631 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
632 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
633 * ----va---> <-----------------vb--------------------> <--------va-------------
634 * | | <----vs-----> |
635 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
636 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
637 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
638 * | | |
639 * last visible pixel first visible pixel
640 * | increment frame counter (gen3/4)
641 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
642 *
643 * x = horizontal active
644 * _ = horizontal blanking
645 * hs = horizontal sync
646 * va = vertical active
647 * vb = vertical blanking
648 * vs = vertical sync
649 * vbs = vblank_start (number)
650 *
651 * Summary:
652 * - most events happen at the start of horizontal sync
653 * - frame start happens at the start of horizontal blank, 1-4 lines
654 * (depending on PIPECONF settings) after the start of vblank
655 * - gen3/4 pixel and frame counter are synchronized with the start
656 * of horizontal active on the first line of vertical active
657 */
658
88e72717 659static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4cdb83ec
VS
660{
661 /* Gen2 doesn't have a hardware frame counter */
662 return 0;
663}
664
42f52ef8
KP
665/* Called from drm generic code, passed a 'crtc', which
666 * we use as a pipe index
667 */
88e72717 668static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
0a3e67a4 669{
fac5e23e 670 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 671 i915_reg_t high_frame, low_frame;
0b2a8e09 672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
673 struct intel_crtc *intel_crtc =
674 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 675 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 676
f3a5c3f6
DV
677 htotal = mode->crtc_htotal;
678 hsync_start = mode->crtc_hsync_start;
679 vbl_start = mode->crtc_vblank_start;
680 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
681 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 682
0b2a8e09
VS
683 /* Convert to pixel count */
684 vbl_start *= htotal;
685
686 /* Start of vblank event occurs at start of hsync */
687 vbl_start -= htotal - hsync_start;
688
9db4a9c7
JB
689 high_frame = PIPEFRAME(pipe);
690 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 691
0a3e67a4
JB
692 /*
693 * High & low register fields aren't synchronized, so make sure
694 * we get a low value that's stable across two reads of the high
695 * register.
696 */
697 do {
5eddb70b 698 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 699 low = I915_READ(low_frame);
5eddb70b 700 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
701 } while (high1 != high2);
702
5eddb70b 703 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 704 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 705 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
706
707 /*
708 * The frame counter increments at beginning of active.
709 * Cook up a vblank counter by also checking the pixel
710 * counter against vblank start.
711 */
edc08d0a 712 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
713}
714
974e59ba 715static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
9880b7a5 716{
fac5e23e 717 struct drm_i915_private *dev_priv = to_i915(dev);
9880b7a5 718
649636ef 719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
9880b7a5
JB
720}
721
75aa3f63 722/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
a225f079
VS
723static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724{
725 struct drm_device *dev = crtc->base.dev;
fac5e23e 726 struct drm_i915_private *dev_priv = to_i915(dev);
fc467a22 727 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 728 enum pipe pipe = crtc->pipe;
80715b2f 729 int position, vtotal;
a225f079 730
80715b2f 731 vtotal = mode->crtc_vtotal;
a225f079
VS
732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
733 vtotal /= 2;
734
91d14251 735 if (IS_GEN2(dev_priv))
75aa3f63 736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
a225f079 737 else
75aa3f63 738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
a225f079 739
41b578fb
JB
740 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
745 *
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
751 */
91d14251 752 if (HAS_DDI(dev_priv) && !position) {
41b578fb
JB
753 int i, temp;
754
755 for (i = 0; i < 100; i++) {
756 udelay(1);
757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
758 DSL_LINEMASK_GEN3;
759 if (temp != position) {
760 position = temp;
761 break;
762 }
763 }
764 }
765
a225f079 766 /*
80715b2f
VS
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
a225f079 769 */
80715b2f 770 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
771}
772
88e72717 773static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
abca9e45 774 unsigned int flags, int *vpos, int *hpos,
3bb403bf
VS
775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
0af7e4df 777{
fac5e23e 778 struct drm_i915_private *dev_priv = to_i915(dev);
c2baf4b7
VS
779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3aa18df8 781 int position;
78e8fc6b 782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
783 bool in_vbl = true;
784 int ret = 0;
ad3543ed 785 unsigned long irqflags;
0af7e4df 786
fc467a22 787 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 789 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
790 return 0;
791 }
792
c2baf4b7 793 htotal = mode->crtc_htotal;
78e8fc6b 794 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
795 vtotal = mode->crtc_vtotal;
796 vbl_start = mode->crtc_vblank_start;
797 vbl_end = mode->crtc_vblank_end;
0af7e4df 798
d31faf65
VS
799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
800 vbl_start = DIV_ROUND_UP(vbl_start, 2);
801 vbl_end /= 2;
802 vtotal /= 2;
803 }
804
c2baf4b7
VS
805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
806
ad3543ed
MK
807 /*
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
811 */
812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 813
ad3543ed
MK
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
815
816 /* Get optional system timestamp before query. */
817 if (stime)
818 *stime = ktime_get();
819
91d14251 820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
0af7e4df
MK
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
823 */
a225f079 824 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
825 } else {
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
828 * scanout position.
829 */
75aa3f63 830 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 831
3aa18df8
VS
832 /* convert to pixel counts */
833 vbl_start *= htotal;
834 vbl_end *= htotal;
835 vtotal *= htotal;
78e8fc6b 836
7e78f1cb
VS
837 /*
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
845 */
846 if (position >= vtotal)
847 position = vtotal - 1;
848
78e8fc6b
VS
849 /*
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
857 */
858 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
859 }
860
ad3543ed
MK
861 /* Get optional system timestamp after query. */
862 if (etime)
863 *etime = ktime_get();
864
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
866
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
868
3aa18df8
VS
869 in_vbl = position >= vbl_start && position < vbl_end;
870
871 /*
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
875 * up since vbl_end.
876 */
877 if (position >= vbl_start)
878 position -= vbl_end;
879 else
880 position += vtotal - vbl_end;
0af7e4df 881
91d14251 882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
3aa18df8
VS
883 *vpos = position;
884 *hpos = 0;
885 } else {
886 *vpos = position / htotal;
887 *hpos = position - (*vpos * htotal);
888 }
0af7e4df 889
0af7e4df
MK
890 /* In vblank? */
891 if (in_vbl)
3d3cbd84 892 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
893
894 return ret;
895}
896
a225f079
VS
897int intel_get_crtc_scanline(struct intel_crtc *crtc)
898{
fac5e23e 899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
a225f079
VS
900 unsigned long irqflags;
901 int position;
902
903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
904 position = __intel_get_crtc_scanline(crtc);
905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
906
907 return position;
908}
909
88e72717 910static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
0af7e4df
MK
911 int *max_error,
912 struct timeval *vblank_time,
913 unsigned flags)
914{
4041b853 915 struct drm_crtc *crtc;
0af7e4df 916
88e72717
TR
917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
918 DRM_ERROR("Invalid crtc %u\n", pipe);
0af7e4df
MK
919 return -EINVAL;
920 }
921
922 /* Get drm_crtc to timestamp: */
4041b853
CW
923 crtc = intel_get_crtc_for_pipe(dev, pipe);
924 if (crtc == NULL) {
88e72717 925 DRM_ERROR("Invalid crtc %u\n", pipe);
4041b853
CW
926 return -EINVAL;
927 }
928
fc467a22 929 if (!crtc->hwmode.crtc_clock) {
88e72717 930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
4041b853
CW
931 return -EBUSY;
932 }
0af7e4df
MK
933
934 /* Helper routine in DRM core does all the work: */
4041b853
CW
935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
936 vblank_time, flags,
fc467a22 937 &crtc->hwmode);
0af7e4df
MK
938}
939
91d14251 940static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
f97108d1 941{
b5b72e89 942 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 943 u8 new_delay;
9270388e 944
d0ecd7e2 945 spin_lock(&mchdev_lock);
f97108d1 946
73edd18f
DV
947 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
948
20e4d407 949 new_delay = dev_priv->ips.cur_delay;
9270388e 950
7648fa99 951 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
952 busy_up = I915_READ(RCPREVBSYTUPAVG);
953 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
954 max_avg = I915_READ(RCBMAXAVG);
955 min_avg = I915_READ(RCBMINAVG);
956
957 /* Handle RCS change request from hw */
b5b72e89 958 if (busy_up > max_avg) {
20e4d407
DV
959 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
960 new_delay = dev_priv->ips.cur_delay - 1;
961 if (new_delay < dev_priv->ips.max_delay)
962 new_delay = dev_priv->ips.max_delay;
b5b72e89 963 } else if (busy_down < min_avg) {
20e4d407
DV
964 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
965 new_delay = dev_priv->ips.cur_delay + 1;
966 if (new_delay > dev_priv->ips.min_delay)
967 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
968 }
969
91d14251 970 if (ironlake_set_drps(dev_priv, new_delay))
20e4d407 971 dev_priv->ips.cur_delay = new_delay;
f97108d1 972
d0ecd7e2 973 spin_unlock(&mchdev_lock);
9270388e 974
f97108d1
JB
975 return;
976}
977
0bc40be8 978static void notify_ring(struct intel_engine_cs *engine)
549f7365 979{
aca34b6e 980 smp_store_mb(engine->breadcrumbs.irq_posted, true);
688e6c72
CW
981 if (intel_engine_wakeup(engine)) {
982 trace_i915_gem_request_notify(engine);
aca34b6e 983 engine->breadcrumbs.irq_wakeups++;
688e6c72 984 }
549f7365
CW
985}
986
43cf3bf0
CW
987static void vlv_c0_read(struct drm_i915_private *dev_priv,
988 struct intel_rps_ei *ei)
31685c25 989{
43cf3bf0
CW
990 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
993}
31685c25 994
43cf3bf0
CW
995static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996 const struct intel_rps_ei *old,
997 const struct intel_rps_ei *now,
998 int threshold)
999{
1000 u64 time, c0;
7bad74d5 1001 unsigned int mul = 100;
31685c25 1002
43cf3bf0
CW
1003 if (old->cz_clock == 0)
1004 return false;
31685c25 1005
7bad74d5
VS
1006 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1007 mul <<= 8;
1008
43cf3bf0 1009 time = now->cz_clock - old->cz_clock;
7bad74d5 1010 time *= threshold * dev_priv->czclk_freq;
31685c25 1011
43cf3bf0
CW
1012 /* Workload can be split between render + media, e.g. SwapBuffers
1013 * being blitted in X after being rendered in mesa. To account for
1014 * this we need to combine both engines into our activity counter.
31685c25 1015 */
43cf3bf0
CW
1016 c0 = now->render_c0 - old->render_c0;
1017 c0 += now->media_c0 - old->media_c0;
7bad74d5 1018 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
31685c25 1019
43cf3bf0 1020 return c0 >= time;
31685c25
D
1021}
1022
43cf3bf0 1023void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 1024{
43cf3bf0
CW
1025 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 1027}
31685c25 1028
43cf3bf0
CW
1029static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1030{
1031 struct intel_rps_ei now;
1032 u32 events = 0;
31685c25 1033
6f4b12f8 1034 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 1035 return 0;
31685c25 1036
43cf3bf0
CW
1037 vlv_c0_read(dev_priv, &now);
1038 if (now.cz_clock == 0)
1039 return 0;
31685c25 1040
43cf3bf0
CW
1041 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042 if (!vlv_c0_above(dev_priv,
1043 &dev_priv->rps.down_ei, &now,
8fb55197 1044 dev_priv->rps.down_threshold))
43cf3bf0
CW
1045 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046 dev_priv->rps.down_ei = now;
1047 }
31685c25 1048
43cf3bf0
CW
1049 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050 if (vlv_c0_above(dev_priv,
1051 &dev_priv->rps.up_ei, &now,
8fb55197 1052 dev_priv->rps.up_threshold))
43cf3bf0
CW
1053 events |= GEN6_PM_RP_UP_THRESHOLD;
1054 dev_priv->rps.up_ei = now;
31685c25
D
1055 }
1056
43cf3bf0 1057 return events;
31685c25
D
1058}
1059
f5a4c67d
CW
1060static bool any_waiters(struct drm_i915_private *dev_priv)
1061{
e2f80391 1062 struct intel_engine_cs *engine;
f5a4c67d 1063
b4ac5afc 1064 for_each_engine(engine, dev_priv)
688e6c72 1065 if (intel_engine_has_waiter(engine))
f5a4c67d
CW
1066 return true;
1067
1068 return false;
1069}
1070
4912d041 1071static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 1072{
2d1013dd
JN
1073 struct drm_i915_private *dev_priv =
1074 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
1075 bool client_boost;
1076 int new_delay, adj, min, max;
edbfdb45 1077 u32 pm_iir;
4912d041 1078
59cdb63d 1079 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
1080 /* Speed up work cancelation during disabling rps interrupts. */
1081 if (!dev_priv->rps.interrupts_enabled) {
1082 spin_unlock_irq(&dev_priv->irq_lock);
1083 return;
1084 }
1f814dac 1085
c6a828d3
DV
1086 pm_iir = dev_priv->rps.pm_iir;
1087 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
1090 client_boost = dev_priv->rps.client_boost;
1091 dev_priv->rps.client_boost = false;
59cdb63d 1092 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 1093
60611c13 1094 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 1095 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 1096
8d3afd7d 1097 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
c33d247d 1098 return;
3b8d8d91 1099
4fc688ce 1100 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1101
43cf3bf0
CW
1102 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1103
dd75fdc8 1104 adj = dev_priv->rps.last_adj;
edcf284b 1105 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
1106 min = dev_priv->rps.min_freq_softlimit;
1107 max = dev_priv->rps.max_freq_softlimit;
1108
1109 if (client_boost) {
1110 new_delay = dev_priv->rps.max_freq_softlimit;
1111 adj = 0;
1112 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1113 if (adj > 0)
1114 adj *= 2;
edcf284b
CW
1115 else /* CHV needs even encode values */
1116 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1117 /*
1118 * For better performance, jump directly
1119 * to RPe if we're below it.
1120 */
edcf284b 1121 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1122 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1123 adj = 0;
1124 }
f5a4c67d
CW
1125 } else if (any_waiters(dev_priv)) {
1126 adj = 0;
dd75fdc8 1127 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1128 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1130 else
b39fb297 1131 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1132 adj = 0;
1133 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134 if (adj < 0)
1135 adj *= 2;
edcf284b
CW
1136 else /* CHV needs even encode values */
1137 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1138 } else { /* unknown event */
edcf284b 1139 adj = 0;
dd75fdc8 1140 }
3b8d8d91 1141
edcf284b
CW
1142 dev_priv->rps.last_adj = adj;
1143
79249636
BW
1144 /* sysfs frequency interfaces may have snuck in while servicing the
1145 * interrupt
1146 */
edcf284b 1147 new_delay += adj;
8d3afd7d 1148 new_delay = clamp_t(int, new_delay, min, max);
27544369 1149
dc97997a 1150 intel_set_rps(dev_priv, new_delay);
3b8d8d91 1151
4fc688ce 1152 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1153}
1154
e3689190
BW
1155
1156/**
1157 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158 * occurred.
1159 * @work: workqueue struct
1160 *
1161 * Doesn't actually do anything except notify userspace. As a consequence of
1162 * this event, userspace should try to remap the bad rows since statistically
1163 * it is likely the same row is more likely to go bad again.
1164 */
1165static void ivybridge_parity_work(struct work_struct *work)
1166{
2d1013dd
JN
1167 struct drm_i915_private *dev_priv =
1168 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1169 u32 error_status, row, bank, subbank;
35a85ac6 1170 char *parity_event[6];
e3689190 1171 uint32_t misccpctl;
35a85ac6 1172 uint8_t slice = 0;
e3689190
BW
1173
1174 /* We must turn off DOP level clock gating to access the L3 registers.
1175 * In order to prevent a get/put style interface, acquire struct mutex
1176 * any time we access those registers.
1177 */
91c8a326 1178 mutex_lock(&dev_priv->drm.struct_mutex);
e3689190 1179
35a85ac6
BW
1180 /* If we've screwed up tracking, just let the interrupt fire again */
1181 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182 goto out;
1183
e3689190
BW
1184 misccpctl = I915_READ(GEN7_MISCCPCTL);
1185 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186 POSTING_READ(GEN7_MISCCPCTL);
1187
35a85ac6 1188 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
f0f59a00 1189 i915_reg_t reg;
e3689190 1190
35a85ac6 1191 slice--;
2d1fe073 1192 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
35a85ac6 1193 break;
e3689190 1194
35a85ac6 1195 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1196
6fa1c5f1 1197 reg = GEN7_L3CDERRST1(slice);
e3689190 1198
35a85ac6
BW
1199 error_status = I915_READ(reg);
1200 row = GEN7_PARITY_ERROR_ROW(error_status);
1201 bank = GEN7_PARITY_ERROR_BANK(error_status);
1202 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1203
1204 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205 POSTING_READ(reg);
1206
1207 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1208 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1209 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1210 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1212 parity_event[5] = NULL;
1213
91c8a326 1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
35a85ac6 1215 KOBJ_CHANGE, parity_event);
e3689190 1216
35a85ac6
BW
1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1218 slice, row, bank, subbank);
e3689190 1219
35a85ac6
BW
1220 kfree(parity_event[4]);
1221 kfree(parity_event[3]);
1222 kfree(parity_event[2]);
1223 kfree(parity_event[1]);
1224 }
e3689190 1225
35a85ac6 1226 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1227
35a85ac6
BW
1228out:
1229 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1230 spin_lock_irq(&dev_priv->irq_lock);
2d1fe073 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
4cb21832 1232 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6 1233
91c8a326 1234 mutex_unlock(&dev_priv->drm.struct_mutex);
e3689190
BW
1235}
1236
261e40b8
VS
1237static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1238 u32 iir)
e3689190 1239{
261e40b8 1240 if (!HAS_L3_DPF(dev_priv))
e3689190
BW
1241 return;
1242
d0ecd7e2 1243 spin_lock(&dev_priv->irq_lock);
261e40b8 1244 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
d0ecd7e2 1245 spin_unlock(&dev_priv->irq_lock);
e3689190 1246
261e40b8 1247 iir &= GT_PARITY_ERROR(dev_priv);
35a85ac6
BW
1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1249 dev_priv->l3_parity.which_slice |= 1 << 1;
1250
1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1252 dev_priv->l3_parity.which_slice |= 1 << 0;
1253
a4da4fa4 1254 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1255}
1256
261e40b8 1257static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
f1af8fc1
PZ
1258 u32 gt_iir)
1259{
f8973c21 1260 if (gt_iir & GT_RENDER_USER_INTERRUPT)
4a570db5 1261 notify_ring(&dev_priv->engine[RCS]);
f1af8fc1 1262 if (gt_iir & ILK_BSD_USER_INTERRUPT)
4a570db5 1263 notify_ring(&dev_priv->engine[VCS]);
f1af8fc1
PZ
1264}
1265
261e40b8 1266static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
e7b4c6b1
DV
1267 u32 gt_iir)
1268{
f8973c21 1269 if (gt_iir & GT_RENDER_USER_INTERRUPT)
4a570db5 1270 notify_ring(&dev_priv->engine[RCS]);
cc609d5d 1271 if (gt_iir & GT_BSD_USER_INTERRUPT)
4a570db5 1272 notify_ring(&dev_priv->engine[VCS]);
cc609d5d 1273 if (gt_iir & GT_BLT_USER_INTERRUPT)
4a570db5 1274 notify_ring(&dev_priv->engine[BCS]);
e7b4c6b1 1275
cc609d5d
BW
1276 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1277 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1278 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1279 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1280
261e40b8
VS
1281 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1282 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
e7b4c6b1
DV
1283}
1284
fbcc1a0c 1285static __always_inline void
0bc40be8 1286gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
fbcc1a0c
NH
1287{
1288 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
0bc40be8 1289 notify_ring(engine);
fbcc1a0c 1290 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
27af5eea 1291 tasklet_schedule(&engine->irq_tasklet);
fbcc1a0c
NH
1292}
1293
e30e251a
VS
1294static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1295 u32 master_ctl,
1296 u32 gt_iir[4])
abd58f01 1297{
abd58f01
BW
1298 irqreturn_t ret = IRQ_NONE;
1299
1300 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
e30e251a
VS
1301 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1302 if (gt_iir[0]) {
1303 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
abd58f01 1304 ret = IRQ_HANDLED;
abd58f01
BW
1305 } else
1306 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1307 }
1308
85f9b5f9 1309 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
e30e251a
VS
1310 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1311 if (gt_iir[1]) {
1312 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
abd58f01 1313 ret = IRQ_HANDLED;
0961021a 1314 } else
abd58f01 1315 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1316 }
1317
abd58f01 1318 if (master_ctl & GEN8_GT_VECS_IRQ) {
e30e251a
VS
1319 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1320 if (gt_iir[3]) {
1321 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
abd58f01 1322 ret = IRQ_HANDLED;
abd58f01
BW
1323 } else
1324 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1325 }
1326
0961021a 1327 if (master_ctl & GEN8_GT_PM_IRQ) {
e30e251a
VS
1328 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1329 if (gt_iir[2] & dev_priv->pm_rps_events) {
cb0d205e 1330 I915_WRITE_FW(GEN8_GT_IIR(2),
e30e251a 1331 gt_iir[2] & dev_priv->pm_rps_events);
38cc46d7 1332 ret = IRQ_HANDLED;
0961021a
BW
1333 } else
1334 DRM_ERROR("The master control interrupt lied (PM)!\n");
1335 }
1336
abd58f01
BW
1337 return ret;
1338}
1339
e30e251a
VS
1340static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1341 u32 gt_iir[4])
1342{
1343 if (gt_iir[0]) {
1344 gen8_cs_irq_handler(&dev_priv->engine[RCS],
1345 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1346 gen8_cs_irq_handler(&dev_priv->engine[BCS],
1347 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1348 }
1349
1350 if (gt_iir[1]) {
1351 gen8_cs_irq_handler(&dev_priv->engine[VCS],
1352 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1353 gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1354 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1355 }
1356
1357 if (gt_iir[3])
1358 gen8_cs_irq_handler(&dev_priv->engine[VECS],
1359 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1360
1361 if (gt_iir[2] & dev_priv->pm_rps_events)
1362 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1363}
1364
63c88d22
ID
1365static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1366{
1367 switch (port) {
1368 case PORT_A:
195baa06 1369 return val & PORTA_HOTPLUG_LONG_DETECT;
63c88d22
ID
1370 case PORT_B:
1371 return val & PORTB_HOTPLUG_LONG_DETECT;
1372 case PORT_C:
1373 return val & PORTC_HOTPLUG_LONG_DETECT;
63c88d22
ID
1374 default:
1375 return false;
1376 }
1377}
1378
6dbf30ce
VS
1379static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1380{
1381 switch (port) {
1382 case PORT_E:
1383 return val & PORTE_HOTPLUG_LONG_DETECT;
1384 default:
1385 return false;
1386 }
1387}
1388
74c0b395
VS
1389static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1390{
1391 switch (port) {
1392 case PORT_A:
1393 return val & PORTA_HOTPLUG_LONG_DETECT;
1394 case PORT_B:
1395 return val & PORTB_HOTPLUG_LONG_DETECT;
1396 case PORT_C:
1397 return val & PORTC_HOTPLUG_LONG_DETECT;
1398 case PORT_D:
1399 return val & PORTD_HOTPLUG_LONG_DETECT;
1400 default:
1401 return false;
1402 }
1403}
1404
e4ce95aa
VS
1405static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1406{
1407 switch (port) {
1408 case PORT_A:
1409 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1410 default:
1411 return false;
1412 }
1413}
1414
676574df 1415static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1416{
1417 switch (port) {
13cf5504 1418 case PORT_B:
676574df 1419 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1420 case PORT_C:
676574df 1421 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1422 case PORT_D:
676574df
JN
1423 return val & PORTD_HOTPLUG_LONG_DETECT;
1424 default:
1425 return false;
13cf5504
DA
1426 }
1427}
1428
676574df 1429static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1430{
1431 switch (port) {
13cf5504 1432 case PORT_B:
676574df 1433 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1434 case PORT_C:
676574df 1435 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1436 case PORT_D:
676574df
JN
1437 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1438 default:
1439 return false;
13cf5504
DA
1440 }
1441}
1442
42db67d6
VS
1443/*
1444 * Get a bit mask of pins that have triggered, and which ones may be long.
1445 * This can be called multiple times with the same masks to accumulate
1446 * hotplug detection results from several registers.
1447 *
1448 * Note that the caller is expected to zero out the masks initially.
1449 */
fd63e2a9 1450static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1451 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1452 const u32 hpd[HPD_NUM_PINS],
1453 bool long_pulse_detect(enum port port, u32 val))
676574df 1454{
8c841e57 1455 enum port port;
676574df
JN
1456 int i;
1457
676574df 1458 for_each_hpd_pin(i) {
8c841e57
JN
1459 if ((hpd[i] & hotplug_trigger) == 0)
1460 continue;
676574df 1461
8c841e57
JN
1462 *pin_mask |= BIT(i);
1463
cc24fcdc
ID
1464 if (!intel_hpd_pin_to_port(i, &port))
1465 continue;
1466
fd63e2a9 1467 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1468 *long_mask |= BIT(i);
676574df
JN
1469 }
1470
1471 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1472 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1473
1474}
1475
91d14251 1476static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
515ac2bb 1477{
28c70f16 1478 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1479}
1480
91d14251 1481static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
ce99c256 1482{
9ee32fea 1483 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1484}
1485
8bf1e9f1 1486#if defined(CONFIG_DEBUG_FS)
91d14251
TU
1487static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1488 enum pipe pipe,
277de95e
DV
1489 uint32_t crc0, uint32_t crc1,
1490 uint32_t crc2, uint32_t crc3,
1491 uint32_t crc4)
8bf1e9f1 1492{
8bf1e9f1
SH
1493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1494 struct intel_pipe_crc_entry *entry;
ac2300d4 1495 int head, tail;
b2c88f5b 1496
d538bbdf
DL
1497 spin_lock(&pipe_crc->lock);
1498
0c912c79 1499 if (!pipe_crc->entries) {
d538bbdf 1500 spin_unlock(&pipe_crc->lock);
34273620 1501 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1502 return;
1503 }
1504
d538bbdf
DL
1505 head = pipe_crc->head;
1506 tail = pipe_crc->tail;
b2c88f5b
DL
1507
1508 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1509 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1510 DRM_ERROR("CRC buffer overflowing\n");
1511 return;
1512 }
1513
1514 entry = &pipe_crc->entries[head];
8bf1e9f1 1515
91c8a326 1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
91d14251 1517 pipe);
eba94eb9
DV
1518 entry->crc[0] = crc0;
1519 entry->crc[1] = crc1;
1520 entry->crc[2] = crc2;
1521 entry->crc[3] = crc3;
1522 entry->crc[4] = crc4;
b2c88f5b
DL
1523
1524 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1525 pipe_crc->head = head;
1526
1527 spin_unlock(&pipe_crc->lock);
07144428
DL
1528
1529 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1530}
277de95e
DV
1531#else
1532static inline void
91d14251
TU
1533display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1534 enum pipe pipe,
277de95e
DV
1535 uint32_t crc0, uint32_t crc1,
1536 uint32_t crc2, uint32_t crc3,
1537 uint32_t crc4) {}
1538#endif
1539
eba94eb9 1540
91d14251
TU
1541static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1542 enum pipe pipe)
5a69b89f 1543{
91d14251 1544 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1546 0, 0, 0, 0);
5a69b89f
DV
1547}
1548
91d14251
TU
1549static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1550 enum pipe pipe)
eba94eb9 1551{
91d14251 1552 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1556 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1558}
5b3a856b 1559
91d14251
TU
1560static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1561 enum pipe pipe)
5b3a856b 1562{
0b5c5ed0
DV
1563 uint32_t res1, res2;
1564
91d14251 1565 if (INTEL_GEN(dev_priv) >= 3)
0b5c5ed0
DV
1566 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1567 else
1568 res1 = 0;
1569
91d14251 1570 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
0b5c5ed0
DV
1571 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1572 else
1573 res2 = 0;
5b3a856b 1574
91d14251 1575 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1576 I915_READ(PIPE_CRC_RES_RED(pipe)),
1577 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1578 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1579 res1, res2);
5b3a856b 1580}
8bf1e9f1 1581
1403c0d4
PZ
1582/* The RPS events need forcewake, so we add them to a work queue and mask their
1583 * IMR bits until the work is done. Other interrupts can be processed without
1584 * the work queue. */
1585static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1586{
a6706b45 1587 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1588 spin_lock(&dev_priv->irq_lock);
480c8033 1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1590 if (dev_priv->rps.interrupts_enabled) {
1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
c33d247d 1592 schedule_work(&dev_priv->rps.work);
d4d70aa5 1593 }
59cdb63d 1594 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1595 }
baf02a1f 1596
c9a9a268
ID
1597 if (INTEL_INFO(dev_priv)->gen >= 8)
1598 return;
1599
2d1fe073 1600 if (HAS_VEBOX(dev_priv)) {
1403c0d4 1601 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
4a570db5 1602 notify_ring(&dev_priv->engine[VECS]);
12638c57 1603
aaecdf61
DV
1604 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1605 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1606 }
baf02a1f
BW
1607}
1608
5a21b665 1609static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
91d14251 1610 enum pipe pipe)
8d7849db 1611{
5a21b665
DV
1612 bool ret;
1613
91c8a326 1614 ret = drm_handle_vblank(&dev_priv->drm, pipe);
5a21b665 1615 if (ret)
51cbaf01 1616 intel_finish_page_flip_mmio(dev_priv, pipe);
5a21b665
DV
1617
1618 return ret;
8d7849db
VS
1619}
1620
91d14251
TU
1621static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1622 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
c1874ed7 1623{
c1874ed7
ID
1624 int pipe;
1625
58ead0d7 1626 spin_lock(&dev_priv->irq_lock);
1ca993d2
VS
1627
1628 if (!dev_priv->display_irqs_enabled) {
1629 spin_unlock(&dev_priv->irq_lock);
1630 return;
1631 }
1632
055e393f 1633 for_each_pipe(dev_priv, pipe) {
f0f59a00 1634 i915_reg_t reg;
bbb5eebf 1635 u32 mask, iir_bit = 0;
91d181dd 1636
bbb5eebf
DV
1637 /*
1638 * PIPESTAT bits get signalled even when the interrupt is
1639 * disabled with the mask bits, and some of the status bits do
1640 * not generate interrupts at all (like the underrun bit). Hence
1641 * we need to be careful that we only handle what we want to
1642 * handle.
1643 */
0f239f4c
DV
1644
1645 /* fifo underruns are filterered in the underrun handler. */
1646 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1647
1648 switch (pipe) {
1649 case PIPE_A:
1650 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1651 break;
1652 case PIPE_B:
1653 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1654 break;
3278f67f
VS
1655 case PIPE_C:
1656 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1657 break;
bbb5eebf
DV
1658 }
1659 if (iir & iir_bit)
1660 mask |= dev_priv->pipestat_irq_mask[pipe];
1661
1662 if (!mask)
91d181dd
ID
1663 continue;
1664
1665 reg = PIPESTAT(pipe);
bbb5eebf
DV
1666 mask |= PIPESTAT_INT_ENABLE_MASK;
1667 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1668
1669 /*
1670 * Clear the PIPE*STAT regs before the IIR
1671 */
91d181dd
ID
1672 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1673 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1674 I915_WRITE(reg, pipe_stats[pipe]);
1675 }
58ead0d7 1676 spin_unlock(&dev_priv->irq_lock);
2ecb8ca4
VS
1677}
1678
91d14251 1679static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2ecb8ca4
VS
1680 u32 pipe_stats[I915_MAX_PIPES])
1681{
2ecb8ca4 1682 enum pipe pipe;
c1874ed7 1683
055e393f 1684 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
1685 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1686 intel_pipe_handle_vblank(dev_priv, pipe))
1687 intel_check_page_flip(dev_priv, pipe);
c1874ed7 1688
5251f04e 1689 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
51cbaf01 1690 intel_finish_page_flip_cs(dev_priv, pipe);
c1874ed7
ID
1691
1692 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 1693 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
c1874ed7 1694
1f7247c0
DV
1695 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1696 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1697 }
1698
1699 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
91d14251 1700 gmbus_irq_handler(dev_priv);
c1874ed7
ID
1701}
1702
1ae3c34c 1703static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
16c6c56b 1704{
16c6c56b
VS
1705 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1706
1ae3c34c
VS
1707 if (hotplug_status)
1708 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
16c6c56b 1709
1ae3c34c
VS
1710 return hotplug_status;
1711}
1712
91d14251 1713static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1ae3c34c
VS
1714 u32 hotplug_status)
1715{
1716 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1717
91d14251
TU
1718 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1719 IS_CHERRYVIEW(dev_priv)) {
0d2e4297 1720 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1721
58f2cf24
VS
1722 if (hotplug_trigger) {
1723 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1724 hotplug_trigger, hpd_status_g4x,
1725 i9xx_port_hotplug_long_detect);
1726
91d14251 1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
58f2cf24 1728 }
369712e8
JN
1729
1730 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
91d14251 1731 dp_aux_irq_handler(dev_priv);
0d2e4297
JN
1732 } else {
1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1734
58f2cf24
VS
1735 if (hotplug_trigger) {
1736 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
44cc6c08 1737 hotplug_trigger, hpd_status_i915,
58f2cf24 1738 i9xx_port_hotplug_long_detect);
91d14251 1739 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
58f2cf24 1740 }
3ff60f89 1741 }
16c6c56b
VS
1742}
1743
ff1f525e 1744static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1745{
45a83f84 1746 struct drm_device *dev = arg;
fac5e23e 1747 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 1748 irqreturn_t ret = IRQ_NONE;
7e231dbe 1749
2dd2a883
ID
1750 if (!intel_irqs_enabled(dev_priv))
1751 return IRQ_NONE;
1752
1f814dac
ID
1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1754 disable_rpm_wakeref_asserts(dev_priv);
1755
1e1cace9 1756 do {
6e814800 1757 u32 iir, gt_iir, pm_iir;
2ecb8ca4 1758 u32 pipe_stats[I915_MAX_PIPES] = {};
1ae3c34c 1759 u32 hotplug_status = 0;
a5e485a9 1760 u32 ier = 0;
3ff60f89 1761
7e231dbe
JB
1762 gt_iir = I915_READ(GTIIR);
1763 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89 1764 iir = I915_READ(VLV_IIR);
7e231dbe
JB
1765
1766 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1e1cace9 1767 break;
7e231dbe
JB
1768
1769 ret = IRQ_HANDLED;
1770
a5e485a9
VS
1771 /*
1772 * Theory on interrupt generation, based on empirical evidence:
1773 *
1774 * x = ((VLV_IIR & VLV_IER) ||
1775 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1776 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1777 *
1778 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1779 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1780 * guarantee the CPU interrupt will be raised again even if we
1781 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1782 * bits this time around.
1783 */
4a0a0202 1784 I915_WRITE(VLV_MASTER_IER, 0);
a5e485a9
VS
1785 ier = I915_READ(VLV_IER);
1786 I915_WRITE(VLV_IER, 0);
4a0a0202
VS
1787
1788 if (gt_iir)
1789 I915_WRITE(GTIIR, gt_iir);
1790 if (pm_iir)
1791 I915_WRITE(GEN6_PMIIR, pm_iir);
1792
7ce4d1f2 1793 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1ae3c34c 1794 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
7ce4d1f2 1795
3ff60f89
OM
1796 /* Call regardless, as some status bits might not be
1797 * signalled in iir */
91d14251 1798 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
7ce4d1f2
VS
1799
1800 /*
1801 * VLV_IIR is single buffered, and reflects the level
1802 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1803 */
1804 if (iir)
1805 I915_WRITE(VLV_IIR, iir);
4a0a0202 1806
a5e485a9 1807 I915_WRITE(VLV_IER, ier);
4a0a0202
VS
1808 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1809 POSTING_READ(VLV_MASTER_IER);
1ae3c34c 1810
52894874 1811 if (gt_iir)
261e40b8 1812 snb_gt_irq_handler(dev_priv, gt_iir);
52894874
VS
1813 if (pm_iir)
1814 gen6_rps_irq_handler(dev_priv, pm_iir);
1815
1ae3c34c 1816 if (hotplug_status)
91d14251 1817 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2ecb8ca4 1818
91d14251 1819 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1e1cace9 1820 } while (0);
7e231dbe 1821
1f814dac
ID
1822 enable_rpm_wakeref_asserts(dev_priv);
1823
7e231dbe
JB
1824 return ret;
1825}
1826
43f328d7
VS
1827static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1828{
45a83f84 1829 struct drm_device *dev = arg;
fac5e23e 1830 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7 1831 irqreturn_t ret = IRQ_NONE;
43f328d7 1832
2dd2a883
ID
1833 if (!intel_irqs_enabled(dev_priv))
1834 return IRQ_NONE;
1835
1f814dac
ID
1836 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1837 disable_rpm_wakeref_asserts(dev_priv);
1838
579de73b 1839 do {
6e814800 1840 u32 master_ctl, iir;
e30e251a 1841 u32 gt_iir[4] = {};
2ecb8ca4 1842 u32 pipe_stats[I915_MAX_PIPES] = {};
1ae3c34c 1843 u32 hotplug_status = 0;
a5e485a9
VS
1844 u32 ier = 0;
1845
8e5fd599
VS
1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1847 iir = I915_READ(VLV_IIR);
43f328d7 1848
8e5fd599
VS
1849 if (master_ctl == 0 && iir == 0)
1850 break;
43f328d7 1851
27b6c122
OM
1852 ret = IRQ_HANDLED;
1853
a5e485a9
VS
1854 /*
1855 * Theory on interrupt generation, based on empirical evidence:
1856 *
1857 * x = ((VLV_IIR & VLV_IER) ||
1858 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1859 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1860 *
1861 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1862 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1863 * guarantee the CPU interrupt will be raised again even if we
1864 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1865 * bits this time around.
1866 */
8e5fd599 1867 I915_WRITE(GEN8_MASTER_IRQ, 0);
a5e485a9
VS
1868 ier = I915_READ(VLV_IER);
1869 I915_WRITE(VLV_IER, 0);
43f328d7 1870
e30e251a 1871 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
43f328d7 1872
7ce4d1f2 1873 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1ae3c34c 1874 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
7ce4d1f2 1875
27b6c122
OM
1876 /* Call regardless, as some status bits might not be
1877 * signalled in iir */
91d14251 1878 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
43f328d7 1879
7ce4d1f2
VS
1880 /*
1881 * VLV_IIR is single buffered, and reflects the level
1882 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1883 */
1884 if (iir)
1885 I915_WRITE(VLV_IIR, iir);
1886
a5e485a9 1887 I915_WRITE(VLV_IER, ier);
e5328c43 1888 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
8e5fd599 1889 POSTING_READ(GEN8_MASTER_IRQ);
1ae3c34c 1890
e30e251a
VS
1891 gen8_gt_irq_handler(dev_priv, gt_iir);
1892
1ae3c34c 1893 if (hotplug_status)
91d14251 1894 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2ecb8ca4 1895
91d14251 1896 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
579de73b 1897 } while (0);
3278f67f 1898
1f814dac
ID
1899 enable_rpm_wakeref_asserts(dev_priv);
1900
43f328d7
VS
1901 return ret;
1902}
1903
91d14251
TU
1904static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1905 u32 hotplug_trigger,
40e56410
VS
1906 const u32 hpd[HPD_NUM_PINS])
1907{
40e56410
VS
1908 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1909
6a39d7c9
JN
1910 /*
1911 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1912 * unless we touch the hotplug register, even if hotplug_trigger is
1913 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1914 * errors.
1915 */
40e56410 1916 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
6a39d7c9
JN
1917 if (!hotplug_trigger) {
1918 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1919 PORTD_HOTPLUG_STATUS_MASK |
1920 PORTC_HOTPLUG_STATUS_MASK |
1921 PORTB_HOTPLUG_STATUS_MASK;
1922 dig_hotplug_reg &= ~mask;
1923 }
1924
40e56410 1925 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
6a39d7c9
JN
1926 if (!hotplug_trigger)
1927 return;
40e56410
VS
1928
1929 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1930 dig_hotplug_reg, hpd,
1931 pch_port_hotplug_long_detect);
1932
91d14251 1933 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
40e56410
VS
1934}
1935
91d14251 1936static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
776ad806 1937{
9db4a9c7 1938 int pipe;
b543fb04 1939 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1940
91d14251 1941 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
91d131d2 1942
cfc33bf7
VS
1943 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1945 SDE_AUDIO_POWER_SHIFT);
776ad806 1946 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1947 port_name(port));
1948 }
776ad806 1949
ce99c256 1950 if (pch_iir & SDE_AUX_MASK)
91d14251 1951 dp_aux_irq_handler(dev_priv);
ce99c256 1952
776ad806 1953 if (pch_iir & SDE_GMBUS)
91d14251 1954 gmbus_irq_handler(dev_priv);
776ad806
JB
1955
1956 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1958
1959 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1960 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1961
1962 if (pch_iir & SDE_POISON)
1963 DRM_ERROR("PCH poison interrupt\n");
1964
9db4a9c7 1965 if (pch_iir & SDE_FDI_MASK)
055e393f 1966 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1967 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1968 pipe_name(pipe),
1969 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1970
1971 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1972 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1973
1974 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1975 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1976
776ad806 1977 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1979
1980 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1982}
1983
91d14251 1984static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
8664281b 1985{
8664281b 1986 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1987 enum pipe pipe;
8664281b 1988
de032bf4
PZ
1989 if (err_int & ERR_INT_POISON)
1990 DRM_ERROR("Poison interrupt\n");
1991
055e393f 1992 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1993 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1994 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1995
5a69b89f 1996 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
91d14251
TU
1997 if (IS_IVYBRIDGE(dev_priv))
1998 ivb_pipe_crc_irq_handler(dev_priv, pipe);
5a69b89f 1999 else
91d14251 2000 hsw_pipe_crc_irq_handler(dev_priv, pipe);
5a69b89f
DV
2001 }
2002 }
8bf1e9f1 2003
8664281b
PZ
2004 I915_WRITE(GEN7_ERR_INT, err_int);
2005}
2006
91d14251 2007static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
8664281b 2008{
8664281b
PZ
2009 u32 serr_int = I915_READ(SERR_INT);
2010
de032bf4
PZ
2011 if (serr_int & SERR_INT_POISON)
2012 DRM_ERROR("PCH poison interrupt\n");
2013
8664281b 2014 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
2016
2017 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
2019
2020 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 2021 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
2022
2023 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
2024}
2025
91d14251 2026static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23e81d69 2027{
23e81d69 2028 int pipe;
6dbf30ce 2029 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 2030
91d14251 2031 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
91d131d2 2032
cfc33bf7
VS
2033 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2034 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2035 SDE_AUDIO_POWER_SHIFT_CPT);
2036 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2037 port_name(port));
2038 }
23e81d69
AJ
2039
2040 if (pch_iir & SDE_AUX_MASK_CPT)
91d14251 2041 dp_aux_irq_handler(dev_priv);
23e81d69
AJ
2042
2043 if (pch_iir & SDE_GMBUS_CPT)
91d14251 2044 gmbus_irq_handler(dev_priv);
23e81d69
AJ
2045
2046 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2048
2049 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2050 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2051
2052 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 2053 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
2054 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2055 pipe_name(pipe),
2056 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
2057
2058 if (pch_iir & SDE_ERROR_CPT)
91d14251 2059 cpt_serr_int_handler(dev_priv);
23e81d69
AJ
2060}
2061
91d14251 2062static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
6dbf30ce 2063{
6dbf30ce
VS
2064 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2065 ~SDE_PORTE_HOTPLUG_SPT;
2066 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2067 u32 pin_mask = 0, long_mask = 0;
2068
2069 if (hotplug_trigger) {
2070 u32 dig_hotplug_reg;
2071
2072 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2073 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2074
2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2076 dig_hotplug_reg, hpd_spt,
74c0b395 2077 spt_port_hotplug_long_detect);
6dbf30ce
VS
2078 }
2079
2080 if (hotplug2_trigger) {
2081 u32 dig_hotplug_reg;
2082
2083 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2084 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2085
2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2087 dig_hotplug_reg, hpd_spt,
2088 spt_port_hotplug2_long_detect);
2089 }
2090
2091 if (pin_mask)
91d14251 2092 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
6dbf30ce
VS
2093
2094 if (pch_iir & SDE_GMBUS_CPT)
91d14251 2095 gmbus_irq_handler(dev_priv);
6dbf30ce
VS
2096}
2097
91d14251
TU
2098static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2099 u32 hotplug_trigger,
40e56410
VS
2100 const u32 hpd[HPD_NUM_PINS])
2101{
40e56410
VS
2102 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2103
2104 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2106
2107 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2108 dig_hotplug_reg, hpd,
2109 ilk_port_hotplug_long_detect);
2110
91d14251 2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
40e56410
VS
2112}
2113
91d14251
TU
2114static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2115 u32 de_iir)
c008bc6e 2116{
40da17c2 2117 enum pipe pipe;
e4ce95aa
VS
2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2119
40e56410 2120 if (hotplug_trigger)
91d14251 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
c008bc6e
PZ
2122
2123 if (de_iir & DE_AUX_CHANNEL_A)
91d14251 2124 dp_aux_irq_handler(dev_priv);
c008bc6e
PZ
2125
2126 if (de_iir & DE_GSE)
91d14251 2127 intel_opregion_asle_intr(dev_priv);
c008bc6e 2128
c008bc6e
PZ
2129 if (de_iir & DE_POISON)
2130 DRM_ERROR("Poison interrupt\n");
2131
055e393f 2132 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
2133 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2134 intel_pipe_handle_vblank(dev_priv, pipe))
2135 intel_check_page_flip(dev_priv, pipe);
5b3a856b 2136
40da17c2 2137 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 2138 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 2139
40da17c2 2140 if (de_iir & DE_PIPE_CRC_DONE(pipe))
91d14251 2141 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
c008bc6e 2142
40da17c2 2143 /* plane/pipes map 1:1 on ilk+ */
5251f04e 2144 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
51cbaf01 2145 intel_finish_page_flip_cs(dev_priv, pipe);
c008bc6e
PZ
2146 }
2147
2148 /* check event from PCH */
2149 if (de_iir & DE_PCH_EVENT) {
2150 u32 pch_iir = I915_READ(SDEIIR);
2151
91d14251
TU
2152 if (HAS_PCH_CPT(dev_priv))
2153 cpt_irq_handler(dev_priv, pch_iir);
c008bc6e 2154 else
91d14251 2155 ibx_irq_handler(dev_priv, pch_iir);
c008bc6e
PZ
2156
2157 /* should clear PCH hotplug event before clear CPU irq */
2158 I915_WRITE(SDEIIR, pch_iir);
2159 }
2160
91d14251
TU
2161 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2162 ironlake_rps_change_irq_handler(dev_priv);
c008bc6e
PZ
2163}
2164
91d14251
TU
2165static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2166 u32 de_iir)
9719fb98 2167{
07d27e20 2168 enum pipe pipe;
23bb4cb5
VS
2169 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2170
40e56410 2171 if (hotplug_trigger)
91d14251 2172 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
9719fb98
PZ
2173
2174 if (de_iir & DE_ERR_INT_IVB)
91d14251 2175 ivb_err_int_handler(dev_priv);
9719fb98
PZ
2176
2177 if (de_iir & DE_AUX_CHANNEL_A_IVB)
91d14251 2178 dp_aux_irq_handler(dev_priv);
9719fb98
PZ
2179
2180 if (de_iir & DE_GSE_IVB)
91d14251 2181 intel_opregion_asle_intr(dev_priv);
9719fb98 2182
055e393f 2183 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
2184 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2185 intel_pipe_handle_vblank(dev_priv, pipe))
2186 intel_check_page_flip(dev_priv, pipe);
40da17c2
DV
2187
2188 /* plane/pipes map 1:1 on ilk+ */
5251f04e 2189 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
51cbaf01 2190 intel_finish_page_flip_cs(dev_priv, pipe);
9719fb98
PZ
2191 }
2192
2193 /* check event from PCH */
91d14251 2194 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
9719fb98
PZ
2195 u32 pch_iir = I915_READ(SDEIIR);
2196
91d14251 2197 cpt_irq_handler(dev_priv, pch_iir);
9719fb98
PZ
2198
2199 /* clear PCH hotplug event before clear CPU irq */
2200 I915_WRITE(SDEIIR, pch_iir);
2201 }
2202}
2203
72c90f62
OM
2204/*
2205 * To handle irqs with the minimum potential races with fresh interrupts, we:
2206 * 1 - Disable Master Interrupt Control.
2207 * 2 - Find the source(s) of the interrupt.
2208 * 3 - Clear the Interrupt Identity bits (IIR).
2209 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2210 * 5 - Re-enable Master Interrupt Control.
2211 */
f1af8fc1 2212static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2213{
45a83f84 2214 struct drm_device *dev = arg;
fac5e23e 2215 struct drm_i915_private *dev_priv = to_i915(dev);
f1af8fc1 2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2217 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2218
2dd2a883
ID
2219 if (!intel_irqs_enabled(dev_priv))
2220 return IRQ_NONE;
2221
1f814dac
ID
2222 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2223 disable_rpm_wakeref_asserts(dev_priv);
2224
b1f14ad0
JB
2225 /* disable master interrupt before clearing iir */
2226 de_ier = I915_READ(DEIER);
2227 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2228 POSTING_READ(DEIER);
b1f14ad0 2229
44498aea
PZ
2230 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2231 * interrupts will will be stored on its back queue, and then we'll be
2232 * able to process them after we restore SDEIER (as soon as we restore
2233 * it, we'll get an interrupt if SDEIIR still has something to process
2234 * due to its back queue). */
91d14251 2235 if (!HAS_PCH_NOP(dev_priv)) {
ab5c608b
BW
2236 sde_ier = I915_READ(SDEIER);
2237 I915_WRITE(SDEIER, 0);
2238 POSTING_READ(SDEIER);
2239 }
44498aea 2240
72c90f62
OM
2241 /* Find, clear, then process each source of interrupt */
2242
b1f14ad0 2243 gt_iir = I915_READ(GTIIR);
0e43406b 2244 if (gt_iir) {
72c90f62
OM
2245 I915_WRITE(GTIIR, gt_iir);
2246 ret = IRQ_HANDLED;
91d14251 2247 if (INTEL_GEN(dev_priv) >= 6)
261e40b8 2248 snb_gt_irq_handler(dev_priv, gt_iir);
d8fc8a47 2249 else
261e40b8 2250 ilk_gt_irq_handler(dev_priv, gt_iir);
b1f14ad0
JB
2251 }
2252
0e43406b
CW
2253 de_iir = I915_READ(DEIIR);
2254 if (de_iir) {
72c90f62
OM
2255 I915_WRITE(DEIIR, de_iir);
2256 ret = IRQ_HANDLED;
91d14251
TU
2257 if (INTEL_GEN(dev_priv) >= 7)
2258 ivb_display_irq_handler(dev_priv, de_iir);
f1af8fc1 2259 else
91d14251 2260 ilk_display_irq_handler(dev_priv, de_iir);
b1f14ad0
JB
2261 }
2262
91d14251 2263 if (INTEL_GEN(dev_priv) >= 6) {
f1af8fc1
PZ
2264 u32 pm_iir = I915_READ(GEN6_PMIIR);
2265 if (pm_iir) {
f1af8fc1
PZ
2266 I915_WRITE(GEN6_PMIIR, pm_iir);
2267 ret = IRQ_HANDLED;
72c90f62 2268 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2269 }
0e43406b 2270 }
b1f14ad0 2271
b1f14ad0
JB
2272 I915_WRITE(DEIER, de_ier);
2273 POSTING_READ(DEIER);
91d14251 2274 if (!HAS_PCH_NOP(dev_priv)) {
ab5c608b
BW
2275 I915_WRITE(SDEIER, sde_ier);
2276 POSTING_READ(SDEIER);
2277 }
b1f14ad0 2278
1f814dac
ID
2279 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2280 enable_rpm_wakeref_asserts(dev_priv);
2281
b1f14ad0
JB
2282 return ret;
2283}
2284
91d14251
TU
2285static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2286 u32 hotplug_trigger,
40e56410 2287 const u32 hpd[HPD_NUM_PINS])
d04a492d 2288{
cebd87a0 2289 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
d04a492d 2290
a52bb15b
VS
2291 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2292 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
d04a492d 2293
cebd87a0 2294 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
40e56410 2295 dig_hotplug_reg, hpd,
cebd87a0 2296 bxt_port_hotplug_long_detect);
40e56410 2297
91d14251 2298 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
d04a492d
SS
2299}
2300
f11a0f46
TU
2301static irqreturn_t
2302gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
abd58f01 2303{
abd58f01 2304 irqreturn_t ret = IRQ_NONE;
f11a0f46 2305 u32 iir;
c42664cc 2306 enum pipe pipe;
88e04703 2307
abd58f01 2308 if (master_ctl & GEN8_DE_MISC_IRQ) {
e32192e1
TU
2309 iir = I915_READ(GEN8_DE_MISC_IIR);
2310 if (iir) {
2311 I915_WRITE(GEN8_DE_MISC_IIR, iir);
abd58f01 2312 ret = IRQ_HANDLED;
e32192e1 2313 if (iir & GEN8_DE_MISC_GSE)
91d14251 2314 intel_opregion_asle_intr(dev_priv);
38cc46d7
OM
2315 else
2316 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2317 }
38cc46d7
OM
2318 else
2319 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2320 }
2321
6d766f02 2322 if (master_ctl & GEN8_DE_PORT_IRQ) {
e32192e1
TU
2323 iir = I915_READ(GEN8_DE_PORT_IIR);
2324 if (iir) {
2325 u32 tmp_mask;
d04a492d 2326 bool found = false;
cebd87a0 2327
e32192e1 2328 I915_WRITE(GEN8_DE_PORT_IIR, iir);
6d766f02 2329 ret = IRQ_HANDLED;
88e04703 2330
e32192e1
TU
2331 tmp_mask = GEN8_AUX_CHANNEL_A;
2332 if (INTEL_INFO(dev_priv)->gen >= 9)
2333 tmp_mask |= GEN9_AUX_CHANNEL_B |
2334 GEN9_AUX_CHANNEL_C |
2335 GEN9_AUX_CHANNEL_D;
2336
2337 if (iir & tmp_mask) {
91d14251 2338 dp_aux_irq_handler(dev_priv);
d04a492d
SS
2339 found = true;
2340 }
2341
e32192e1
TU
2342 if (IS_BROXTON(dev_priv)) {
2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2344 if (tmp_mask) {
91d14251
TU
2345 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2346 hpd_bxt);
e32192e1
TU
2347 found = true;
2348 }
2349 } else if (IS_BROADWELL(dev_priv)) {
2350 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2351 if (tmp_mask) {
91d14251
TU
2352 ilk_hpd_irq_handler(dev_priv,
2353 tmp_mask, hpd_bdw);
e32192e1
TU
2354 found = true;
2355 }
d04a492d
SS
2356 }
2357
91d14251
TU
2358 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2359 gmbus_irq_handler(dev_priv);
9e63743e
SS
2360 found = true;
2361 }
2362
d04a492d 2363 if (!found)
38cc46d7 2364 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2365 }
38cc46d7
OM
2366 else
2367 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2368 }
2369
055e393f 2370 for_each_pipe(dev_priv, pipe) {
e32192e1 2371 u32 flip_done, fault_errors;
abd58f01 2372
c42664cc
DV
2373 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2374 continue;
abd58f01 2375
e32192e1
TU
2376 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2377 if (!iir) {
2378 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2379 continue;
2380 }
770de83d 2381
e32192e1
TU
2382 ret = IRQ_HANDLED;
2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
38cc46d7 2384
5a21b665
DV
2385 if (iir & GEN8_PIPE_VBLANK &&
2386 intel_pipe_handle_vblank(dev_priv, pipe))
2387 intel_check_page_flip(dev_priv, pipe);
770de83d 2388
e32192e1
TU
2389 flip_done = iir;
2390 if (INTEL_INFO(dev_priv)->gen >= 9)
2391 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2392 else
2393 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
38cc46d7 2394
5251f04e 2395 if (flip_done)
51cbaf01 2396 intel_finish_page_flip_cs(dev_priv, pipe);
38cc46d7 2397
e32192e1 2398 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
91d14251 2399 hsw_pipe_crc_irq_handler(dev_priv, pipe);
38cc46d7 2400
e32192e1
TU
2401 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2402 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
770de83d 2403
e32192e1
TU
2404 fault_errors = iir;
2405 if (INTEL_INFO(dev_priv)->gen >= 9)
2406 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2407 else
2408 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
770de83d 2409
e32192e1
TU
2410 if (fault_errors)
2411 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2412 pipe_name(pipe),
2413 fault_errors);
abd58f01
BW
2414 }
2415
91d14251 2416 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
266ea3d9 2417 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2418 /*
2419 * FIXME(BDW): Assume for now that the new interrupt handling
2420 * scheme also closed the SDE interrupt handling race we've seen
2421 * on older pch-split platforms. But this needs testing.
2422 */
e32192e1
TU
2423 iir = I915_READ(SDEIIR);
2424 if (iir) {
2425 I915_WRITE(SDEIIR, iir);
92d03a80 2426 ret = IRQ_HANDLED;
6dbf30ce 2427
22dea0be 2428 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
91d14251 2429 spt_irq_handler(dev_priv, iir);
6dbf30ce 2430 else
91d14251 2431 cpt_irq_handler(dev_priv, iir);
2dfb0b81
JN
2432 } else {
2433 /*
2434 * Like on previous PCH there seems to be something
2435 * fishy going on with forwarding PCH interrupts.
2436 */
2437 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2438 }
92d03a80
DV
2439 }
2440
f11a0f46
TU
2441 return ret;
2442}
2443
2444static irqreturn_t gen8_irq_handler(int irq, void *arg)
2445{
2446 struct drm_device *dev = arg;
fac5e23e 2447 struct drm_i915_private *dev_priv = to_i915(dev);
f11a0f46 2448 u32 master_ctl;
e30e251a 2449 u32 gt_iir[4] = {};
f11a0f46
TU
2450 irqreturn_t ret;
2451
2452 if (!intel_irqs_enabled(dev_priv))
2453 return IRQ_NONE;
2454
2455 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2456 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2457 if (!master_ctl)
2458 return IRQ_NONE;
2459
2460 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2461
2462 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2463 disable_rpm_wakeref_asserts(dev_priv);
2464
2465 /* Find, clear, then process each source of interrupt */
e30e251a
VS
2466 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2467 gen8_gt_irq_handler(dev_priv, gt_iir);
f11a0f46
TU
2468 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2469
cb0d205e
CW
2470 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2471 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01 2472
1f814dac
ID
2473 enable_rpm_wakeref_asserts(dev_priv);
2474
abd58f01
BW
2475 return ret;
2476}
2477
1f15b76f 2478static void i915_error_wake_up(struct drm_i915_private *dev_priv)
17e1df07 2479{
17e1df07
DV
2480 /*
2481 * Notify all waiters for GPU completion events that reset state has
2482 * been changed, and that they need to restart their wait after
2483 * checking for potential errors (and bail out to drop locks if there is
2484 * a gpu reset pending so that i915_error_work_func can acquire them).
2485 */
2486
2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1f15b76f 2488 wake_up_all(&dev_priv->gpu_error.wait_queue);
17e1df07
DV
2489
2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2491 wake_up_all(&dev_priv->pending_flip_queue);
17e1df07
DV
2492}
2493
8a905236 2494/**
b8d24a06 2495 * i915_reset_and_wakeup - do process context error handling work
14bb2c11 2496 * @dev_priv: i915 device private
8a905236
JB
2497 *
2498 * Fire an error uevent so userspace can see that a hang or error
2499 * was detected.
2500 */
c033666a 2501static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
8a905236 2502{
91c8a326 2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
cce723ed
BW
2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2507 int ret;
8a905236 2508
c033666a 2509 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
f316a42c 2510
7db0ba24
DV
2511 /*
2512 * Note that there's only one work item which does gpu resets, so we
2513 * need not worry about concurrent gpu resets potentially incrementing
2514 * error->reset_counter twice. We only need to take care of another
2515 * racing irq/hangcheck declaring the gpu dead for a second time. A
2516 * quick check for that is good enough: schedule_work ensures the
2517 * correct ordering between hang detection and this work item, and since
2518 * the reset in-progress bit is only ever set by code outside of this
2519 * work we don't need to worry about any other races.
2520 */
d98c52cf 2521 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
f803aa55 2522 DRM_DEBUG_DRIVER("resetting chip\n");
c033666a 2523 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1f83fee0 2524
f454c694
ID
2525 /*
2526 * In most cases it's guaranteed that we get here with an RPM
2527 * reference held, for example because there is a pending GPU
2528 * request that won't finish until the reset is done. This
2529 * isn't the case at least when we get here by doing a
2530 * simulated reset via debugs, so get an RPM reference.
2531 */
2532 intel_runtime_pm_get(dev_priv);
7514747d 2533
c033666a 2534 intel_prepare_reset(dev_priv);
7514747d 2535
17e1df07
DV
2536 /*
2537 * All state reset _must_ be completed before we update the
2538 * reset counter, for otherwise waiters might miss the reset
2539 * pending state and not properly drop locks, resulting in
2540 * deadlocks with the reset work.
2541 */
c033666a 2542 ret = i915_reset(dev_priv);
f69061be 2543
c033666a 2544 intel_finish_reset(dev_priv);
17e1df07 2545
f454c694
ID
2546 intel_runtime_pm_put(dev_priv);
2547
d98c52cf 2548 if (ret == 0)
c033666a 2549 kobject_uevent_env(kobj,
f69061be 2550 KOBJ_CHANGE, reset_done_event);
1f83fee0 2551
17e1df07
DV
2552 /*
2553 * Note: The wake_up also serves as a memory barrier so that
2554 * waiters see the update value of the reset counter atomic_t.
2555 */
1f15b76f 2556 wake_up_all(&dev_priv->gpu_error.reset_queue);
f316a42c 2557 }
8a905236
JB
2558}
2559
c033666a 2560static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
8a905236 2561{
bd9854f9 2562 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2563 u32 eir = I915_READ(EIR);
050ee91f 2564 int pipe, i;
8a905236 2565
35aed2e6
CW
2566 if (!eir)
2567 return;
8a905236 2568
a70491cc 2569 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2570
c033666a 2571 i915_get_extra_instdone(dev_priv, instdone);
bd9854f9 2572
c033666a 2573 if (IS_G4X(dev_priv)) {
8a905236
JB
2574 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2575 u32 ipeir = I915_READ(IPEIR_I965);
2576
a70491cc
JP
2577 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2578 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2579 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2580 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2581 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2582 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2583 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2584 POSTING_READ(IPEIR_I965);
8a905236
JB
2585 }
2586 if (eir & GM45_ERROR_PAGE_TABLE) {
2587 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2588 pr_err("page table error\n");
2589 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2590 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2591 POSTING_READ(PGTBL_ER);
8a905236
JB
2592 }
2593 }
2594
c033666a 2595 if (!IS_GEN2(dev_priv)) {
8a905236
JB
2596 if (eir & I915_ERROR_PAGE_TABLE) {
2597 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2598 pr_err("page table error\n");
2599 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2600 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2601 POSTING_READ(PGTBL_ER);
8a905236
JB
2602 }
2603 }
2604
2605 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2606 pr_err("memory refresh error:\n");
055e393f 2607 for_each_pipe(dev_priv, pipe)
a70491cc 2608 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2609 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2610 /* pipestat has already been acked */
2611 }
2612 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2613 pr_err("instruction error\n");
2614 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2615 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2616 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
c033666a 2617 if (INTEL_GEN(dev_priv) < 4) {
8a905236
JB
2618 u32 ipeir = I915_READ(IPEIR);
2619
a70491cc
JP
2620 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2621 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2622 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2623 I915_WRITE(IPEIR, ipeir);
3143a2bf 2624 POSTING_READ(IPEIR);
8a905236
JB
2625 } else {
2626 u32 ipeir = I915_READ(IPEIR_I965);
2627
a70491cc
JP
2628 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2629 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2630 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2631 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2632 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2633 POSTING_READ(IPEIR_I965);
8a905236
JB
2634 }
2635 }
2636
2637 I915_WRITE(EIR, eir);
3143a2bf 2638 POSTING_READ(EIR);
8a905236
JB
2639 eir = I915_READ(EIR);
2640 if (eir) {
2641 /*
2642 * some errors might have become stuck,
2643 * mask them.
2644 */
2645 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2646 I915_WRITE(EMR, I915_READ(EMR) | eir);
2647 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2648 }
35aed2e6
CW
2649}
2650
2651/**
b8d24a06 2652 * i915_handle_error - handle a gpu error
14bb2c11 2653 * @dev_priv: i915 device private
14b730fc 2654 * @engine_mask: mask representing engines that are hung
aafd8581 2655 * Do some basic checking of register state at error time and
35aed2e6
CW
2656 * dump it to the syslog. Also call i915_capture_error_state() to make
2657 * sure we get a record and make it available in debugfs. Fire a uevent
2658 * so userspace knows something bad happened (should trigger collection
2659 * of a ring dump etc.).
14bb2c11 2660 * @fmt: Error message format string
35aed2e6 2661 */
c033666a
CW
2662void i915_handle_error(struct drm_i915_private *dev_priv,
2663 u32 engine_mask,
58174462 2664 const char *fmt, ...)
35aed2e6 2665{
58174462
MK
2666 va_list args;
2667 char error_msg[80];
35aed2e6 2668
58174462
MK
2669 va_start(args, fmt);
2670 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2671 va_end(args);
2672
c033666a
CW
2673 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2674 i915_report_and_clear_eir(dev_priv);
8a905236 2675
14b730fc 2676 if (engine_mask) {
805de8f4 2677 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
f69061be 2678 &dev_priv->gpu_error.reset_counter);
ba1234d1 2679
11ed50ec 2680 /*
b8d24a06
MK
2681 * Wakeup waiting processes so that the reset function
2682 * i915_reset_and_wakeup doesn't deadlock trying to grab
2683 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2684 * processes will see a reset in progress and back off,
2685 * releasing their locks and then wait for the reset completion.
2686 * We must do this for _all_ gpu waiters that might hold locks
2687 * that the reset work needs to acquire.
2688 *
2689 * Note: The wake_up serves as the required memory barrier to
2690 * ensure that the waiters see the updated value of the reset
2691 * counter atomic_t.
11ed50ec 2692 */
1f15b76f 2693 i915_error_wake_up(dev_priv);
11ed50ec
BG
2694 }
2695
c033666a 2696 i915_reset_and_wakeup(dev_priv);
8a905236
JB
2697}
2698
42f52ef8
KP
2699/* Called from drm generic code, passed 'crtc' which
2700 * we use as a pipe index
2701 */
88e72717 2702static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
0a3e67a4 2703{
fac5e23e 2704 struct drm_i915_private *dev_priv = to_i915(dev);
e9d21d7f 2705 unsigned long irqflags;
71e0ffa5 2706
1ec14ad3 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2708 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2709 i915_enable_pipestat(dev_priv, pipe,
755e9019 2710 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2711 else
7c463586 2712 i915_enable_pipestat(dev_priv, pipe,
755e9019 2713 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2715
0a3e67a4
JB
2716 return 0;
2717}
2718
88e72717 2719static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
f796cf8f 2720{
fac5e23e 2721 struct drm_i915_private *dev_priv = to_i915(dev);
f796cf8f 2722 unsigned long irqflags;
b518421f 2723 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2724 DE_PIPE_VBLANK(pipe);
f796cf8f 2725
f796cf8f 2726 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
fbdedaea 2727 ilk_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2728 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2729
2730 return 0;
2731}
2732
88e72717 2733static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
7e231dbe 2734{
fac5e23e 2735 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 2736 unsigned long irqflags;
7e231dbe 2737
7e231dbe 2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2739 i915_enable_pipestat(dev_priv, pipe,
755e9019 2740 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2742
2743 return 0;
2744}
2745
88e72717 2746static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
abd58f01 2747{
fac5e23e 2748 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01 2749 unsigned long irqflags;
abd58f01 2750
abd58f01 2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2752 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01 2753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
013d3752 2754
abd58f01
BW
2755 return 0;
2756}
2757
42f52ef8
KP
2758/* Called from drm generic code, passed 'crtc' which
2759 * we use as a pipe index
2760 */
88e72717 2761static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
0a3e67a4 2762{
fac5e23e 2763 struct drm_i915_private *dev_priv = to_i915(dev);
e9d21d7f 2764 unsigned long irqflags;
0a3e67a4 2765
1ec14ad3 2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2767 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2768 PIPE_VBLANK_INTERRUPT_STATUS |
2769 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2770 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2771}
2772
88e72717 2773static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
f796cf8f 2774{
fac5e23e 2775 struct drm_i915_private *dev_priv = to_i915(dev);
f796cf8f 2776 unsigned long irqflags;
b518421f 2777 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2778 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2779
2780 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
fbdedaea 2781 ilk_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2782 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2783}
2784
88e72717 2785static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
7e231dbe 2786{
fac5e23e 2787 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 2788 unsigned long irqflags;
7e231dbe
JB
2789
2790 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2791 i915_disable_pipestat(dev_priv, pipe,
755e9019 2792 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2793 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2794}
2795
88e72717 2796static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
abd58f01 2797{
fac5e23e 2798 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01 2799 unsigned long irqflags;
abd58f01 2800
abd58f01 2801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2802 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01
BW
2803 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2804}
2805
9107e9d2 2806static bool
0bc40be8 2807ring_idle(struct intel_engine_cs *engine, u32 seqno)
9107e9d2 2808{
cffa781e
CW
2809 return i915_seqno_passed(seqno,
2810 READ_ONCE(engine->last_submitted_seqno));
f65d9421
BG
2811}
2812
a028c4b0 2813static bool
31bb59cc 2814ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
a028c4b0 2815{
31bb59cc 2816 if (INTEL_GEN(engine->i915) >= 8) {
a6cdb93a 2817 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2818 } else {
2819 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2820 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2821 MI_SEMAPHORE_REGISTER);
2822 }
2823}
2824
a4872ba6 2825static struct intel_engine_cs *
0bc40be8
TU
2826semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2827 u64 offset)
921d42ea 2828{
c033666a 2829 struct drm_i915_private *dev_priv = engine->i915;
a4872ba6 2830 struct intel_engine_cs *signaller;
921d42ea 2831
c033666a 2832 if (INTEL_GEN(dev_priv) >= 8) {
b4ac5afc 2833 for_each_engine(signaller, dev_priv) {
0bc40be8 2834 if (engine == signaller)
a6cdb93a
RV
2835 continue;
2836
0bc40be8 2837 if (offset == signaller->semaphore.signal_ggtt[engine->id])
a6cdb93a
RV
2838 return signaller;
2839 }
921d42ea
DV
2840 } else {
2841 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2842
b4ac5afc 2843 for_each_engine(signaller, dev_priv) {
0bc40be8 2844 if(engine == signaller)
921d42ea
DV
2845 continue;
2846
0bc40be8 2847 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
921d42ea
DV
2848 return signaller;
2849 }
2850 }
2851
a6cdb93a 2852 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
0bc40be8 2853 engine->id, ipehr, offset);
921d42ea
DV
2854
2855 return NULL;
2856}
2857
a4872ba6 2858static struct intel_engine_cs *
0bc40be8 2859semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
a24a11e6 2860{
c033666a 2861 struct drm_i915_private *dev_priv = engine->i915;
88fe429d 2862 u32 cmd, ipehr, head;
a6cdb93a
RV
2863 u64 offset = 0;
2864 int i, backwards;
a24a11e6 2865
381e8ae3
TE
2866 /*
2867 * This function does not support execlist mode - any attempt to
2868 * proceed further into this function will result in a kernel panic
2869 * when dereferencing ring->buffer, which is not set up in execlist
2870 * mode.
2871 *
2872 * The correct way of doing it would be to derive the currently
2873 * executing ring buffer from the current context, which is derived
2874 * from the currently running request. Unfortunately, to get the
2875 * current request we would have to grab the struct_mutex before doing
2876 * anything else, which would be ill-advised since some other thread
2877 * might have grabbed it already and managed to hang itself, causing
2878 * the hang checker to deadlock.
2879 *
2880 * Therefore, this function does not support execlist mode in its
2881 * current form. Just return NULL and move on.
2882 */
0bc40be8 2883 if (engine->buffer == NULL)
381e8ae3
TE
2884 return NULL;
2885
0bc40be8 2886 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
31bb59cc 2887 if (!ipehr_is_semaphore_wait(engine, ipehr))
6274f212 2888 return NULL;
a24a11e6 2889
88fe429d
DV
2890 /*
2891 * HEAD is likely pointing to the dword after the actual command,
2892 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2893 * or 4 dwords depending on the semaphore wait command size.
2894 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2895 * point at at batch, and semaphores are always emitted into the
2896 * ringbuffer itself.
a24a11e6 2897 */
0bc40be8 2898 head = I915_READ_HEAD(engine) & HEAD_ADDR;
c033666a 2899 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
88fe429d 2900
a6cdb93a 2901 for (i = backwards; i; --i) {
88fe429d
DV
2902 /*
2903 * Be paranoid and presume the hw has gone off into the wild -
2904 * our ring is smaller than what the hardware (and hence
2905 * HEAD_ADDR) allows. Also handles wrap-around.
2906 */
0bc40be8 2907 head &= engine->buffer->size - 1;
88fe429d
DV
2908
2909 /* This here seems to blow up */
0bc40be8 2910 cmd = ioread32(engine->buffer->virtual_start + head);
a24a11e6
CW
2911 if (cmd == ipehr)
2912 break;
2913
88fe429d
DV
2914 head -= 4;
2915 }
a24a11e6 2916
88fe429d
DV
2917 if (!i)
2918 return NULL;
a24a11e6 2919
0bc40be8 2920 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
c033666a 2921 if (INTEL_GEN(dev_priv) >= 8) {
0bc40be8 2922 offset = ioread32(engine->buffer->virtual_start + head + 12);
a6cdb93a 2923 offset <<= 32;
0bc40be8 2924 offset = ioread32(engine->buffer->virtual_start + head + 8);
a6cdb93a 2925 }
0bc40be8 2926 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
a24a11e6
CW
2927}
2928
0bc40be8 2929static int semaphore_passed(struct intel_engine_cs *engine)
6274f212 2930{
c033666a 2931 struct drm_i915_private *dev_priv = engine->i915;
a4872ba6 2932 struct intel_engine_cs *signaller;
a0d036b0 2933 u32 seqno;
6274f212 2934
0bc40be8 2935 engine->hangcheck.deadlock++;
6274f212 2936
0bc40be8 2937 signaller = semaphore_waits_for(engine, &seqno);
4be17381
CW
2938 if (signaller == NULL)
2939 return -1;
2940
2941 /* Prevent pathological recursion due to driver bugs */
666796da 2942 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
6274f212
CW
2943 return -1;
2944
1b7744e7 2945 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
4be17381
CW
2946 return 1;
2947
a0d036b0
CW
2948 /* cursory check for an unkickable deadlock */
2949 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2950 semaphore_passed(signaller) < 0)
4be17381
CW
2951 return -1;
2952
2953 return 0;
6274f212
CW
2954}
2955
2956static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2957{
e2f80391 2958 struct intel_engine_cs *engine;
6274f212 2959
b4ac5afc 2960 for_each_engine(engine, dev_priv)
e2f80391 2961 engine->hangcheck.deadlock = 0;
6274f212
CW
2962}
2963
0bc40be8 2964static bool subunits_stuck(struct intel_engine_cs *engine)
1ec14ad3 2965{
61642ff0
MK
2966 u32 instdone[I915_NUM_INSTDONE_REG];
2967 bool stuck;
2968 int i;
2969
0bc40be8 2970 if (engine->id != RCS)
61642ff0
MK
2971 return true;
2972
c033666a 2973 i915_get_extra_instdone(engine->i915, instdone);
9107e9d2 2974
61642ff0
MK
2975 /* There might be unstable subunit states even when
2976 * actual head is not moving. Filter out the unstable ones by
2977 * accumulating the undone -> done transitions and only
2978 * consider those as progress.
2979 */
2980 stuck = true;
2981 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
0bc40be8 2982 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
61642ff0 2983
0bc40be8 2984 if (tmp != engine->hangcheck.instdone[i])
61642ff0
MK
2985 stuck = false;
2986
0bc40be8 2987 engine->hangcheck.instdone[i] |= tmp;
61642ff0
MK
2988 }
2989
2990 return stuck;
2991}
2992
2993static enum intel_ring_hangcheck_action
0bc40be8 2994head_stuck(struct intel_engine_cs *engine, u64 acthd)
61642ff0 2995{
0bc40be8 2996 if (acthd != engine->hangcheck.acthd) {
61642ff0
MK
2997
2998 /* Clear subunit states on head movement */
0bc40be8
TU
2999 memset(engine->hangcheck.instdone, 0,
3000 sizeof(engine->hangcheck.instdone));
61642ff0 3001
24a65e62 3002 return HANGCHECK_ACTIVE;
f260fe7b 3003 }
6274f212 3004
0bc40be8 3005 if (!subunits_stuck(engine))
61642ff0
MK
3006 return HANGCHECK_ACTIVE;
3007
3008 return HANGCHECK_HUNG;
3009}
3010
3011static enum intel_ring_hangcheck_action
0bc40be8 3012ring_stuck(struct intel_engine_cs *engine, u64 acthd)
61642ff0 3013{
c033666a 3014 struct drm_i915_private *dev_priv = engine->i915;
61642ff0
MK
3015 enum intel_ring_hangcheck_action ha;
3016 u32 tmp;
3017
0bc40be8 3018 ha = head_stuck(engine, acthd);
61642ff0
MK
3019 if (ha != HANGCHECK_HUNG)
3020 return ha;
3021
c033666a 3022 if (IS_GEN2(dev_priv))
f2f4d82f 3023 return HANGCHECK_HUNG;
9107e9d2
CW
3024
3025 /* Is the chip hanging on a WAIT_FOR_EVENT?
3026 * If so we can simply poke the RB_WAIT bit
3027 * and break the hang. This should work on
3028 * all but the second generation chipsets.
3029 */
0bc40be8 3030 tmp = I915_READ_CTL(engine);
1ec14ad3 3031 if (tmp & RING_WAIT) {
c033666a 3032 i915_handle_error(dev_priv, 0,
58174462 3033 "Kicking stuck wait on %s",
0bc40be8
TU
3034 engine->name);
3035 I915_WRITE_CTL(engine, tmp);
f2f4d82f 3036 return HANGCHECK_KICK;
6274f212
CW
3037 }
3038
c033666a 3039 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
0bc40be8 3040 switch (semaphore_passed(engine)) {
6274f212 3041 default:
f2f4d82f 3042 return HANGCHECK_HUNG;
6274f212 3043 case 1:
c033666a 3044 i915_handle_error(dev_priv, 0,
58174462 3045 "Kicking stuck semaphore on %s",
0bc40be8
TU
3046 engine->name);
3047 I915_WRITE_CTL(engine, tmp);
f2f4d82f 3048 return HANGCHECK_KICK;
6274f212 3049 case 0:
f2f4d82f 3050 return HANGCHECK_WAIT;
6274f212 3051 }
9107e9d2 3052 }
ed5cbb03 3053
f2f4d82f 3054 return HANGCHECK_HUNG;
ed5cbb03
MK
3055}
3056
aca34b6e 3057static unsigned long kick_waiters(struct intel_engine_cs *engine)
12471ba8 3058{
c033666a 3059 struct drm_i915_private *i915 = engine->i915;
aca34b6e 3060 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
12471ba8 3061
aca34b6e 3062 if (engine->hangcheck.user_interrupts == irq_count &&
12471ba8 3063 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
688e6c72 3064 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
12471ba8
CW
3065 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3066 engine->name);
688e6c72
CW
3067
3068 intel_engine_enable_fake_irq(engine);
12471ba8
CW
3069 }
3070
aca34b6e 3071 return irq_count;
12471ba8 3072}
737b1506 3073/*
f65d9421 3074 * This is called when the chip hasn't reported back with completed
05407ff8
MK
3075 * batchbuffers in a long time. We keep track per ring seqno progress and
3076 * if there are no progress, hangcheck score for that ring is increased.
3077 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3078 * we kick the ring. If we see no progress on three subsequent calls
3079 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 3080 */
737b1506 3081static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 3082{
737b1506
CW
3083 struct drm_i915_private *dev_priv =
3084 container_of(work, typeof(*dev_priv),
3085 gpu_error.hangcheck_work.work);
e2f80391 3086 struct intel_engine_cs *engine;
2b284288
CW
3087 unsigned int hung = 0, stuck = 0;
3088 int busy_count = 0;
9107e9d2
CW
3089#define BUSY 1
3090#define KICK 5
3091#define HUNG 20
24a65e62 3092#define ACTIVE_DECAY 15
893eead0 3093
d330a953 3094 if (!i915.enable_hangcheck)
3e0dc6b0
BW
3095 return;
3096
b1379d49 3097 if (!READ_ONCE(dev_priv->gt.awake))
67d97da3 3098 return;
1f814dac 3099
75714940
MK
3100 /* As enabling the GPU requires fairly extensive mmio access,
3101 * periodically arm the mmio checker to see if we are triggering
3102 * any invalid access.
3103 */
3104 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3105
2b284288 3106 for_each_engine(engine, dev_priv) {
688e6c72 3107 bool busy = intel_engine_has_waiter(engine);
50877445
CW
3108 u64 acthd;
3109 u32 seqno;
12471ba8 3110 unsigned user_interrupts;
05407ff8 3111
6274f212
CW
3112 semaphore_clear_deadlocks(dev_priv);
3113
c04e0f3b
CW
3114 /* We don't strictly need an irq-barrier here, as we are not
3115 * serving an interrupt request, be paranoid in case the
3116 * barrier has side-effects (such as preventing a broken
3117 * cacheline snoop) and so be sure that we can see the seqno
3118 * advance. If the seqno should stick, due to a stale
3119 * cacheline, we would erroneously declare the GPU hung.
3120 */
3121 if (engine->irq_seqno_barrier)
3122 engine->irq_seqno_barrier(engine);
3123
e2f80391 3124 acthd = intel_ring_get_active_head(engine);
1b7744e7 3125 seqno = intel_engine_get_seqno(engine);
b4519513 3126
12471ba8
CW
3127 /* Reset stuck interrupts between batch advances */
3128 user_interrupts = 0;
3129
e2f80391
TU
3130 if (engine->hangcheck.seqno == seqno) {
3131 if (ring_idle(engine, seqno)) {
3132 engine->hangcheck.action = HANGCHECK_IDLE;
05535726 3133 if (busy) {
094f9a54 3134 /* Safeguard against driver failure */
12471ba8 3135 user_interrupts = kick_waiters(engine);
e2f80391 3136 engine->hangcheck.score += BUSY;
05535726 3137 }
05407ff8 3138 } else {
6274f212
CW
3139 /* We always increment the hangcheck score
3140 * if the ring is busy and still processing
3141 * the same request, so that no single request
3142 * can run indefinitely (such as a chain of
3143 * batches). The only time we do not increment
3144 * the hangcheck score on this ring, if this
3145 * ring is in a legitimate wait for another
3146 * ring. In that case the waiting ring is a
3147 * victim and we want to be sure we catch the
3148 * right culprit. Then every time we do kick
3149 * the ring, add a small increment to the
3150 * score so that we can catch a batch that is
3151 * being repeatedly kicked and so responsible
3152 * for stalling the machine.
3153 */
e2f80391
TU
3154 engine->hangcheck.action = ring_stuck(engine,
3155 acthd);
ad8beaea 3156
e2f80391 3157 switch (engine->hangcheck.action) {
da661464 3158 case HANGCHECK_IDLE:
f2f4d82f 3159 case HANGCHECK_WAIT:
f260fe7b 3160 break;
24a65e62 3161 case HANGCHECK_ACTIVE:
e2f80391 3162 engine->hangcheck.score += BUSY;
6274f212 3163 break;
f2f4d82f 3164 case HANGCHECK_KICK:
e2f80391 3165 engine->hangcheck.score += KICK;
6274f212 3166 break;
f2f4d82f 3167 case HANGCHECK_HUNG:
e2f80391 3168 engine->hangcheck.score += HUNG;
6274f212
CW
3169 break;
3170 }
05407ff8 3171 }
2b284288
CW
3172
3173 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3174 hung |= intel_engine_flag(engine);
3175 if (engine->hangcheck.action != HANGCHECK_HUNG)
3176 stuck |= intel_engine_flag(engine);
3177 }
9107e9d2 3178 } else {
e2f80391 3179 engine->hangcheck.action = HANGCHECK_ACTIVE;
da661464 3180
9107e9d2
CW
3181 /* Gradually reduce the count so that we catch DoS
3182 * attempts across multiple batches.
3183 */
e2f80391
TU
3184 if (engine->hangcheck.score > 0)
3185 engine->hangcheck.score -= ACTIVE_DECAY;
3186 if (engine->hangcheck.score < 0)
3187 engine->hangcheck.score = 0;
f260fe7b 3188
61642ff0 3189 /* Clear head and subunit states on seqno movement */
12471ba8 3190 acthd = 0;
61642ff0 3191
e2f80391
TU
3192 memset(engine->hangcheck.instdone, 0,
3193 sizeof(engine->hangcheck.instdone));
d1e61e7f
CW
3194 }
3195
e2f80391
TU
3196 engine->hangcheck.seqno = seqno;
3197 engine->hangcheck.acthd = acthd;
12471ba8 3198 engine->hangcheck.user_interrupts = user_interrupts;
9107e9d2 3199 busy_count += busy;
893eead0 3200 }
b9201c14 3201
2b284288
CW
3202 if (hung) {
3203 char msg[80];
3204 int len;
92cab734 3205
2b284288
CW
3206 /* If some rings hung but others were still busy, only
3207 * blame the hanging rings in the synopsis.
3208 */
3209 if (stuck != hung)
3210 hung &= ~stuck;
3211 len = scnprintf(msg, sizeof(msg),
3212 "%s on ", stuck == hung ? "No progress" : "Hang");
3213 for_each_engine_masked(engine, dev_priv, hung)
3214 len += scnprintf(msg + len, sizeof(msg) - len,
3215 "%s, ", engine->name);
3216 msg[len-2] = '\0';
3217
3218 return i915_handle_error(dev_priv, hung, msg);
3219 }
f65d9421 3220
05535726 3221 /* Reset timer in case GPU hangs without another request being added */
05407ff8 3222 if (busy_count)
c033666a 3223 i915_queue_hangcheck(dev_priv);
10cd45b6
MK
3224}
3225
1c69eb42 3226static void ibx_irq_reset(struct drm_device *dev)
91738a95 3227{
fac5e23e 3228 struct drm_i915_private *dev_priv = to_i915(dev);
91738a95
PZ
3229
3230 if (HAS_PCH_NOP(dev))
3231 return;
3232
f86f3fb0 3233 GEN5_IRQ_RESET(SDE);
105b122e
PZ
3234
3235 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3236 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 3237}
105b122e 3238
622364b6
PZ
3239/*
3240 * SDEIER is also touched by the interrupt handler to work around missed PCH
3241 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3242 * instead we unconditionally enable all PCH interrupt sources here, but then
3243 * only unmask them as needed with SDEIMR.
3244 *
3245 * This function needs to be called before interrupts are enabled.
3246 */
3247static void ibx_irq_pre_postinstall(struct drm_device *dev)
3248{
fac5e23e 3249 struct drm_i915_private *dev_priv = to_i915(dev);
622364b6
PZ
3250
3251 if (HAS_PCH_NOP(dev))
3252 return;
3253
3254 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
3255 I915_WRITE(SDEIER, 0xffffffff);
3256 POSTING_READ(SDEIER);
3257}
3258
7c4d664e 3259static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5 3260{
fac5e23e 3261 struct drm_i915_private *dev_priv = to_i915(dev);
d18ea1b5 3262
f86f3fb0 3263 GEN5_IRQ_RESET(GT);
a9d356a6 3264 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 3265 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
3266}
3267
70591a41
VS
3268static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3269{
3270 enum pipe pipe;
3271
71b8b41d
VS
3272 if (IS_CHERRYVIEW(dev_priv))
3273 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3274 else
3275 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3276
ad22d106 3277 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
70591a41
VS
3278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3279
ad22d106
VS
3280 for_each_pipe(dev_priv, pipe) {
3281 I915_WRITE(PIPESTAT(pipe),
3282 PIPE_FIFO_UNDERRUN_STATUS |
3283 PIPESTAT_INT_STATUS_MASK);
3284 dev_priv->pipestat_irq_mask[pipe] = 0;
3285 }
70591a41
VS
3286
3287 GEN5_IRQ_RESET(VLV_);
ad22d106 3288 dev_priv->irq_mask = ~0;
70591a41
VS
3289}
3290
8bb61306
VS
3291static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3292{
3293 u32 pipestat_mask;
9ab981f2 3294 u32 enable_mask;
8bb61306
VS
3295 enum pipe pipe;
3296
8bb61306
VS
3297 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3298 PIPE_CRC_DONE_INTERRUPT_STATUS;
3299
3300 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3301 for_each_pipe(dev_priv, pipe)
3302 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3303
9ab981f2
VS
3304 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3305 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3306 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
8bb61306 3307 if (IS_CHERRYVIEW(dev_priv))
9ab981f2 3308 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
6b7eafc1
VS
3309
3310 WARN_ON(dev_priv->irq_mask != ~0);
3311
9ab981f2
VS
3312 dev_priv->irq_mask = ~enable_mask;
3313
3314 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
8bb61306
VS
3315}
3316
3317/* drm_dma.h hooks
3318*/
3319static void ironlake_irq_reset(struct drm_device *dev)
3320{
fac5e23e 3321 struct drm_i915_private *dev_priv = to_i915(dev);
8bb61306
VS
3322
3323 I915_WRITE(HWSTAM, 0xffffffff);
3324
3325 GEN5_IRQ_RESET(DE);
3326 if (IS_GEN7(dev))
3327 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3328
3329 gen5_gt_irq_reset(dev);
3330
3331 ibx_irq_reset(dev);
3332}
3333
7e231dbe
JB
3334static void valleyview_irq_preinstall(struct drm_device *dev)
3335{
fac5e23e 3336 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 3337
34c7b8a7
VS
3338 I915_WRITE(VLV_MASTER_IER, 0);
3339 POSTING_READ(VLV_MASTER_IER);
3340
7c4d664e 3341 gen5_gt_irq_reset(dev);
7e231dbe 3342
ad22d106 3343 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3344 if (dev_priv->display_irqs_enabled)
3345 vlv_display_irq_reset(dev_priv);
ad22d106 3346 spin_unlock_irq(&dev_priv->irq_lock);
7e231dbe
JB
3347}
3348
d6e3cca3
DV
3349static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3350{
3351 GEN8_IRQ_RESET_NDX(GT, 0);
3352 GEN8_IRQ_RESET_NDX(GT, 1);
3353 GEN8_IRQ_RESET_NDX(GT, 2);
3354 GEN8_IRQ_RESET_NDX(GT, 3);
3355}
3356
823f6b38 3357static void gen8_irq_reset(struct drm_device *dev)
abd58f01 3358{
fac5e23e 3359 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01
BW
3360 int pipe;
3361
abd58f01
BW
3362 I915_WRITE(GEN8_MASTER_IRQ, 0);
3363 POSTING_READ(GEN8_MASTER_IRQ);
3364
d6e3cca3 3365 gen8_gt_irq_reset(dev_priv);
abd58f01 3366
055e393f 3367 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
3368 if (intel_display_power_is_enabled(dev_priv,
3369 POWER_DOMAIN_PIPE(pipe)))
813bde43 3370 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3371
f86f3fb0
PZ
3372 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3373 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3374 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3375
266ea3d9
SS
3376 if (HAS_PCH_SPLIT(dev))
3377 ibx_irq_reset(dev);
abd58f01 3378}
09f2344d 3379
4c6c03be
DL
3380void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3381 unsigned int pipe_mask)
d49bdb0e 3382{
1180e206 3383 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
6831f3e3 3384 enum pipe pipe;
d49bdb0e 3385
13321786 3386 spin_lock_irq(&dev_priv->irq_lock);
6831f3e3
VS
3387 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3388 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3389 dev_priv->de_irq_mask[pipe],
3390 ~dev_priv->de_irq_mask[pipe] | extra_ier);
13321786 3391 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3392}
3393
aae8ba84
VS
3394void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3395 unsigned int pipe_mask)
3396{
6831f3e3
VS
3397 enum pipe pipe;
3398
aae8ba84 3399 spin_lock_irq(&dev_priv->irq_lock);
6831f3e3
VS
3400 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3401 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
aae8ba84
VS
3402 spin_unlock_irq(&dev_priv->irq_lock);
3403
3404 /* make sure we're done processing display irqs */
91c8a326 3405 synchronize_irq(dev_priv->drm.irq);
aae8ba84
VS
3406}
3407
43f328d7
VS
3408static void cherryview_irq_preinstall(struct drm_device *dev)
3409{
fac5e23e 3410 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7
VS
3411
3412 I915_WRITE(GEN8_MASTER_IRQ, 0);
3413 POSTING_READ(GEN8_MASTER_IRQ);
3414
d6e3cca3 3415 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3416
3417 GEN5_IRQ_RESET(GEN8_PCU_);
3418
ad22d106 3419 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3420 if (dev_priv->display_irqs_enabled)
3421 vlv_display_irq_reset(dev_priv);
ad22d106 3422 spin_unlock_irq(&dev_priv->irq_lock);
43f328d7
VS
3423}
3424
91d14251 3425static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
87a02106
VS
3426 const u32 hpd[HPD_NUM_PINS])
3427{
87a02106
VS
3428 struct intel_encoder *encoder;
3429 u32 enabled_irqs = 0;
3430
91c8a326 3431 for_each_intel_encoder(&dev_priv->drm, encoder)
87a02106
VS
3432 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3433 enabled_irqs |= hpd[encoder->hpd_pin];
3434
3435 return enabled_irqs;
3436}
3437
91d14251 3438static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
7fe0b973 3439{
87a02106 3440 u32 hotplug_irqs, hotplug, enabled_irqs;
82a28bcf 3441
91d14251 3442 if (HAS_PCH_IBX(dev_priv)) {
fee884ed 3443 hotplug_irqs = SDE_HOTPLUG_MASK;
91d14251 3444 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
82a28bcf 3445 } else {
fee884ed 3446 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
91d14251 3447 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
82a28bcf 3448 }
7fe0b973 3449
fee884ed 3450 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3451
3452 /*
3453 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
3454 * duration to 2ms (which is the minimum in the Display Port spec).
3455 * The pulse duration bits are reserved on LPT+.
82a28bcf 3456 */
7fe0b973
KP
3457 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3458 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3459 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3460 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3461 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
0b2eb33e
VS
3462 /*
3463 * When CPU and PCH are on the same package, port A
3464 * HPD must be enabled in both north and south.
3465 */
91d14251 3466 if (HAS_PCH_LPT_LP(dev_priv))
0b2eb33e 3467 hotplug |= PORTA_HOTPLUG_ENABLE;
7fe0b973 3468 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 3469}
26951caf 3470
91d14251 3471static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
6dbf30ce 3472{
6dbf30ce
VS
3473 u32 hotplug_irqs, hotplug, enabled_irqs;
3474
3475 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
91d14251 3476 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
6dbf30ce
VS
3477
3478 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3479
3480 /* Enable digital hotplug on the PCH */
3481 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3482 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
74c0b395 3483 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
6dbf30ce
VS
3484 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3485
3486 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3487 hotplug |= PORTE_HOTPLUG_ENABLE;
3488 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3489}
3490
91d14251 3491static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
e4ce95aa 3492{
e4ce95aa
VS
3493 u32 hotplug_irqs, hotplug, enabled_irqs;
3494
91d14251 3495 if (INTEL_GEN(dev_priv) >= 8) {
3a3b3c7d 3496 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
91d14251 3497 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3a3b3c7d
VS
3498
3499 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
91d14251 3500 } else if (INTEL_GEN(dev_priv) >= 7) {
23bb4cb5 3501 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
91d14251 3502 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3a3b3c7d
VS
3503
3504 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
23bb4cb5
VS
3505 } else {
3506 hotplug_irqs = DE_DP_A_HOTPLUG;
91d14251 3507 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
e4ce95aa 3508
3a3b3c7d
VS
3509 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3510 }
e4ce95aa
VS
3511
3512 /*
3513 * Enable digital hotplug on the CPU, and configure the DP short pulse
3514 * duration to 2ms (which is the minimum in the Display Port spec)
23bb4cb5 3515 * The pulse duration bits are reserved on HSW+.
e4ce95aa
VS
3516 */
3517 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3518 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3519 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3520 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3521
91d14251 3522 ibx_hpd_irq_setup(dev_priv);
e4ce95aa
VS
3523}
3524
91d14251 3525static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
e0a20ad7 3526{
a52bb15b 3527 u32 hotplug_irqs, hotplug, enabled_irqs;
e0a20ad7 3528
91d14251 3529 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
a52bb15b 3530 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
e0a20ad7 3531
a52bb15b 3532 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
e0a20ad7 3533
a52bb15b
VS
3534 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3535 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3536 PORTA_HOTPLUG_ENABLE;
d252bf68
SS
3537
3538 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3539 hotplug, enabled_irqs);
3540 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3541
3542 /*
3543 * For BXT invert bit has to be set based on AOB design
3544 * for HPD detection logic, update it based on VBT fields.
3545 */
3546
3547 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3548 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3549 hotplug |= BXT_DDIA_HPD_INVERT;
3550 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3551 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3552 hotplug |= BXT_DDIB_HPD_INVERT;
3553 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3554 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3555 hotplug |= BXT_DDIC_HPD_INVERT;
3556
a52bb15b 3557 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
e0a20ad7
SS
3558}
3559
d46da437
PZ
3560static void ibx_irq_postinstall(struct drm_device *dev)
3561{
fac5e23e 3562 struct drm_i915_private *dev_priv = to_i915(dev);
82a28bcf 3563 u32 mask;
e5868a31 3564
692a04cf
DV
3565 if (HAS_PCH_NOP(dev))
3566 return;
3567
105b122e 3568 if (HAS_PCH_IBX(dev))
5c673b60 3569 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3570 else
5c673b60 3571 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3572
b51a2842 3573 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
d46da437 3574 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3575}
3576
0a9a8c91
DV
3577static void gen5_gt_irq_postinstall(struct drm_device *dev)
3578{
fac5e23e 3579 struct drm_i915_private *dev_priv = to_i915(dev);
0a9a8c91
DV
3580 u32 pm_irqs, gt_irqs;
3581
3582 pm_irqs = gt_irqs = 0;
3583
3584 dev_priv->gt_irq_mask = ~0;
040d2baa 3585 if (HAS_L3_DPF(dev)) {
0a9a8c91 3586 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3587 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3588 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3589 }
3590
3591 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3592 if (IS_GEN5(dev)) {
f8973c21 3593 gt_irqs |= ILK_BSD_USER_INTERRUPT;
0a9a8c91
DV
3594 } else {
3595 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3596 }
3597
35079899 3598 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3599
3600 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3601 /*
3602 * RPS interrupts will get enabled/disabled on demand when RPS
3603 * itself is enabled/disabled.
3604 */
0a9a8c91
DV
3605 if (HAS_VEBOX(dev))
3606 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3607
605cd25b 3608 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3609 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3610 }
3611}
3612
f71d4af4 3613static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3614{
fac5e23e 3615 struct drm_i915_private *dev_priv = to_i915(dev);
8e76f8dc
PZ
3616 u32 display_mask, extra_mask;
3617
3618 if (INTEL_INFO(dev)->gen >= 7) {
3619 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3620 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3621 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3622 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3623 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23bb4cb5
VS
3624 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3625 DE_DP_A_HOTPLUG_IVB);
8e76f8dc
PZ
3626 } else {
3627 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3628 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3629 DE_AUX_CHANNEL_A |
5b3a856b
DV
3630 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3631 DE_POISON);
e4ce95aa
VS
3632 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3633 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3634 DE_DP_A_HOTPLUG);
8e76f8dc 3635 }
036a4a7d 3636
1ec14ad3 3637 dev_priv->irq_mask = ~display_mask;
036a4a7d 3638
0c841212
PZ
3639 I915_WRITE(HWSTAM, 0xeffe);
3640
622364b6
PZ
3641 ibx_irq_pre_postinstall(dev);
3642
35079899 3643 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3644
0a9a8c91 3645 gen5_gt_irq_postinstall(dev);
036a4a7d 3646
d46da437 3647 ibx_irq_postinstall(dev);
7fe0b973 3648
f97108d1 3649 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3650 /* Enable PCU event interrupts
3651 *
3652 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3653 * setup is guaranteed to run in single-threaded context. But we
3654 * need it to make the assert_spin_locked happy. */
d6207435 3655 spin_lock_irq(&dev_priv->irq_lock);
fbdedaea 3656 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3657 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3658 }
3659
036a4a7d
ZW
3660 return 0;
3661}
3662
f8b79e58
ID
3663void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3664{
3665 assert_spin_locked(&dev_priv->irq_lock);
3666
3667 if (dev_priv->display_irqs_enabled)
3668 return;
3669
3670 dev_priv->display_irqs_enabled = true;
3671
d6c69803
VS
3672 if (intel_irqs_enabled(dev_priv)) {
3673 vlv_display_irq_reset(dev_priv);
ad22d106 3674 vlv_display_irq_postinstall(dev_priv);
d6c69803 3675 }
f8b79e58
ID
3676}
3677
3678void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3679{
3680 assert_spin_locked(&dev_priv->irq_lock);
3681
3682 if (!dev_priv->display_irqs_enabled)
3683 return;
3684
3685 dev_priv->display_irqs_enabled = false;
3686
950eabaf 3687 if (intel_irqs_enabled(dev_priv))
ad22d106 3688 vlv_display_irq_reset(dev_priv);
f8b79e58
ID
3689}
3690
0e6c9a9e
VS
3691
3692static int valleyview_irq_postinstall(struct drm_device *dev)
3693{
fac5e23e 3694 struct drm_i915_private *dev_priv = to_i915(dev);
0e6c9a9e 3695
0a9a8c91 3696 gen5_gt_irq_postinstall(dev);
7e231dbe 3697
ad22d106 3698 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3699 if (dev_priv->display_irqs_enabled)
3700 vlv_display_irq_postinstall(dev_priv);
ad22d106
VS
3701 spin_unlock_irq(&dev_priv->irq_lock);
3702
7e231dbe 3703 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
34c7b8a7 3704 POSTING_READ(VLV_MASTER_IER);
20afbda2
DV
3705
3706 return 0;
3707}
3708
abd58f01
BW
3709static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3710{
abd58f01
BW
3711 /* These are interrupts we'll toggle with the ring mask register */
3712 uint32_t gt_interrupts[] = {
3713 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3714 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6
OM
3715 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3716 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3717 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3718 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3719 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3720 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3721 0,
73d477f6
OM
3722 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3723 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3724 };
3725
98735739
TU
3726 if (HAS_L3_DPF(dev_priv))
3727 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3728
0961021a 3729 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3730 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3731 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3732 /*
3733 * RPS interrupts will get enabled/disabled on demand when RPS itself
3734 * is enabled/disabled.
3735 */
3736 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3737 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3738}
3739
3740static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3741{
770de83d
DL
3742 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3743 uint32_t de_pipe_enables;
3a3b3c7d
VS
3744 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3745 u32 de_port_enables;
11825b0d 3746 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3a3b3c7d 3747 enum pipe pipe;
770de83d 3748
b4834a50 3749 if (INTEL_INFO(dev_priv)->gen >= 9) {
770de83d
DL
3750 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3751 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d
VS
3752 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3753 GEN9_AUX_CHANNEL_D;
9e63743e 3754 if (IS_BROXTON(dev_priv))
3a3b3c7d
VS
3755 de_port_masked |= BXT_DE_PORT_GMBUS;
3756 } else {
770de83d
DL
3757 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3758 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d 3759 }
770de83d
DL
3760
3761 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3762 GEN8_PIPE_FIFO_UNDERRUN;
3763
3a3b3c7d 3764 de_port_enables = de_port_masked;
a52bb15b
VS
3765 if (IS_BROXTON(dev_priv))
3766 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3767 else if (IS_BROADWELL(dev_priv))
3a3b3c7d
VS
3768 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3769
13b3a0a7
DV
3770 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3771 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3772 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3773
055e393f 3774 for_each_pipe(dev_priv, pipe)
f458ebbc 3775 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3776 POWER_DOMAIN_PIPE(pipe)))
3777 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3778 dev_priv->de_irq_mask[pipe],
3779 de_pipe_enables);
abd58f01 3780
3a3b3c7d 3781 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
11825b0d 3782 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
abd58f01
BW
3783}
3784
3785static int gen8_irq_postinstall(struct drm_device *dev)
3786{
fac5e23e 3787 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01 3788
266ea3d9
SS
3789 if (HAS_PCH_SPLIT(dev))
3790 ibx_irq_pre_postinstall(dev);
622364b6 3791
abd58f01
BW
3792 gen8_gt_irq_postinstall(dev_priv);
3793 gen8_de_irq_postinstall(dev_priv);
3794
266ea3d9
SS
3795 if (HAS_PCH_SPLIT(dev))
3796 ibx_irq_postinstall(dev);
abd58f01 3797
e5328c43 3798 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
abd58f01
BW
3799 POSTING_READ(GEN8_MASTER_IRQ);
3800
3801 return 0;
3802}
3803
43f328d7
VS
3804static int cherryview_irq_postinstall(struct drm_device *dev)
3805{
fac5e23e 3806 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7 3807
43f328d7
VS
3808 gen8_gt_irq_postinstall(dev_priv);
3809
ad22d106 3810 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3811 if (dev_priv->display_irqs_enabled)
3812 vlv_display_irq_postinstall(dev_priv);
ad22d106
VS
3813 spin_unlock_irq(&dev_priv->irq_lock);
3814
e5328c43 3815 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
43f328d7
VS
3816 POSTING_READ(GEN8_MASTER_IRQ);
3817
3818 return 0;
3819}
3820
abd58f01
BW
3821static void gen8_irq_uninstall(struct drm_device *dev)
3822{
fac5e23e 3823 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01
BW
3824
3825 if (!dev_priv)
3826 return;
3827
823f6b38 3828 gen8_irq_reset(dev);
abd58f01
BW
3829}
3830
7e231dbe
JB
3831static void valleyview_irq_uninstall(struct drm_device *dev)
3832{
fac5e23e 3833 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe
JB
3834
3835 if (!dev_priv)
3836 return;
3837
843d0e7d 3838 I915_WRITE(VLV_MASTER_IER, 0);
34c7b8a7 3839 POSTING_READ(VLV_MASTER_IER);
843d0e7d 3840
893fce8e
VS
3841 gen5_gt_irq_reset(dev);
3842
7e231dbe 3843 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3844
ad22d106 3845 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3846 if (dev_priv->display_irqs_enabled)
3847 vlv_display_irq_reset(dev_priv);
ad22d106 3848 spin_unlock_irq(&dev_priv->irq_lock);
7e231dbe
JB
3849}
3850
43f328d7
VS
3851static void cherryview_irq_uninstall(struct drm_device *dev)
3852{
fac5e23e 3853 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7
VS
3854
3855 if (!dev_priv)
3856 return;
3857
3858 I915_WRITE(GEN8_MASTER_IRQ, 0);
3859 POSTING_READ(GEN8_MASTER_IRQ);
3860
a2c30fba 3861 gen8_gt_irq_reset(dev_priv);
43f328d7 3862
a2c30fba 3863 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3864
ad22d106 3865 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3866 if (dev_priv->display_irqs_enabled)
3867 vlv_display_irq_reset(dev_priv);
ad22d106 3868 spin_unlock_irq(&dev_priv->irq_lock);
43f328d7
VS
3869}
3870
f71d4af4 3871static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3872{
fac5e23e 3873 struct drm_i915_private *dev_priv = to_i915(dev);
4697995b
JB
3874
3875 if (!dev_priv)
3876 return;
3877
be30b29f 3878 ironlake_irq_reset(dev);
036a4a7d
ZW
3879}
3880
a266c7d5 3881static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3882{
fac5e23e 3883 struct drm_i915_private *dev_priv = to_i915(dev);
9db4a9c7 3884 int pipe;
91e3738e 3885
055e393f 3886 for_each_pipe(dev_priv, pipe)
9db4a9c7 3887 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3888 I915_WRITE16(IMR, 0xffff);
3889 I915_WRITE16(IER, 0x0);
3890 POSTING_READ16(IER);
c2798b19
CW
3891}
3892
3893static int i8xx_irq_postinstall(struct drm_device *dev)
3894{
fac5e23e 3895 struct drm_i915_private *dev_priv = to_i915(dev);
c2798b19 3896
c2798b19
CW
3897 I915_WRITE16(EMR,
3898 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3899
3900 /* Unmask the interrupts that we always want on. */
3901 dev_priv->irq_mask =
3902 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3903 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3904 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3905 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3906 I915_WRITE16(IMR, dev_priv->irq_mask);
3907
3908 I915_WRITE16(IER,
3909 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3910 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3911 I915_USER_INTERRUPT);
3912 POSTING_READ16(IER);
3913
379ef82d
DV
3914 /* Interrupt setup is already guaranteed to be single-threaded, this is
3915 * just to make the assert_spin_locked check happy. */
d6207435 3916 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3917 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3918 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3919 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3920
c2798b19
CW
3921 return 0;
3922}
3923
5a21b665
DV
3924/*
3925 * Returns true when a page flip has completed.
3926 */
3927static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3928 int plane, int pipe, u32 iir)
3929{
3930 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3931
3932 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3933 return false;
3934
3935 if ((iir & flip_pending) == 0)
3936 goto check_page_flip;
3937
3938 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3939 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3940 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3941 * the flip is completed (no longer pending). Since this doesn't raise
3942 * an interrupt per se, we watch for the change at vblank.
3943 */
3944 if (I915_READ16(ISR) & flip_pending)
3945 goto check_page_flip;
3946
3947 intel_finish_page_flip_cs(dev_priv, pipe);
3948 return true;
3949
3950check_page_flip:
3951 intel_check_page_flip(dev_priv, pipe);
3952 return false;
3953}
3954
ff1f525e 3955static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3956{
45a83f84 3957 struct drm_device *dev = arg;
fac5e23e 3958 struct drm_i915_private *dev_priv = to_i915(dev);
c2798b19
CW
3959 u16 iir, new_iir;
3960 u32 pipe_stats[2];
c2798b19
CW
3961 int pipe;
3962 u16 flip_mask =
3963 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3964 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1f814dac 3965 irqreturn_t ret;
c2798b19 3966
2dd2a883
ID
3967 if (!intel_irqs_enabled(dev_priv))
3968 return IRQ_NONE;
3969
1f814dac
ID
3970 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3971 disable_rpm_wakeref_asserts(dev_priv);
3972
3973 ret = IRQ_NONE;
c2798b19
CW
3974 iir = I915_READ16(IIR);
3975 if (iir == 0)
1f814dac 3976 goto out;
c2798b19
CW
3977
3978 while (iir & ~flip_mask) {
3979 /* Can't rely on pipestat interrupt bit in iir as it might
3980 * have been cleared after the pipestat interrupt was received.
3981 * It doesn't set the bit in iir again, but it still produces
3982 * interrupts (for non-MSI).
3983 */
222c7f51 3984 spin_lock(&dev_priv->irq_lock);
c2798b19 3985 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3986 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 3987
055e393f 3988 for_each_pipe(dev_priv, pipe) {
f0f59a00 3989 i915_reg_t reg = PIPESTAT(pipe);
c2798b19
CW
3990 pipe_stats[pipe] = I915_READ(reg);
3991
3992 /*
3993 * Clear the PIPE*STAT regs before the IIR
3994 */
2d9d2b0b 3995 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3996 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 3997 }
222c7f51 3998 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
3999
4000 I915_WRITE16(IIR, iir & ~flip_mask);
4001 new_iir = I915_READ16(IIR); /* Flush posted writes */
4002
c2798b19 4003 if (iir & I915_USER_INTERRUPT)
4a570db5 4004 notify_ring(&dev_priv->engine[RCS]);
c2798b19 4005
055e393f 4006 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
4007 int plane = pipe;
4008 if (HAS_FBC(dev_priv))
4009 plane = !plane;
4010
4011 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4012 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4013 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 4014
4356d586 4015 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 4016 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2d9d2b0b 4017
1f7247c0
DV
4018 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4019 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4020 pipe);
4356d586 4021 }
c2798b19
CW
4022
4023 iir = new_iir;
4024 }
1f814dac
ID
4025 ret = IRQ_HANDLED;
4026
4027out:
4028 enable_rpm_wakeref_asserts(dev_priv);
c2798b19 4029
1f814dac 4030 return ret;
c2798b19
CW
4031}
4032
4033static void i8xx_irq_uninstall(struct drm_device * dev)
4034{
fac5e23e 4035 struct drm_i915_private *dev_priv = to_i915(dev);
c2798b19
CW
4036 int pipe;
4037
055e393f 4038 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
4039 /* Clear enable bits; then clear status bits */
4040 I915_WRITE(PIPESTAT(pipe), 0);
4041 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4042 }
4043 I915_WRITE16(IMR, 0xffff);
4044 I915_WRITE16(IER, 0x0);
4045 I915_WRITE16(IIR, I915_READ16(IIR));
4046}
4047
a266c7d5
CW
4048static void i915_irq_preinstall(struct drm_device * dev)
4049{
fac5e23e 4050 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4051 int pipe;
4052
a266c7d5 4053 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4054 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4055 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4056 }
4057
00d98ebd 4058 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 4059 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4060 I915_WRITE(PIPESTAT(pipe), 0);
4061 I915_WRITE(IMR, 0xffffffff);
4062 I915_WRITE(IER, 0x0);
4063 POSTING_READ(IER);
4064}
4065
4066static int i915_irq_postinstall(struct drm_device *dev)
4067{
fac5e23e 4068 struct drm_i915_private *dev_priv = to_i915(dev);
38bde180 4069 u32 enable_mask;
a266c7d5 4070
38bde180
CW
4071 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4072
4073 /* Unmask the interrupts that we always want on. */
4074 dev_priv->irq_mask =
4075 ~(I915_ASLE_INTERRUPT |
4076 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4077 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4078 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 4079 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
4080
4081 enable_mask =
4082 I915_ASLE_INTERRUPT |
4083 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4084 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
4085 I915_USER_INTERRUPT;
4086
a266c7d5 4087 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4088 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4089 POSTING_READ(PORT_HOTPLUG_EN);
4090
a266c7d5
CW
4091 /* Enable in IER... */
4092 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4093 /* and unmask in IMR */
4094 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4095 }
4096
a266c7d5
CW
4097 I915_WRITE(IMR, dev_priv->irq_mask);
4098 I915_WRITE(IER, enable_mask);
4099 POSTING_READ(IER);
4100
91d14251 4101 i915_enable_asle_pipestat(dev_priv);
20afbda2 4102
379ef82d
DV
4103 /* Interrupt setup is already guaranteed to be single-threaded, this is
4104 * just to make the assert_spin_locked check happy. */
d6207435 4105 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4106 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4107 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4108 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 4109
20afbda2
DV
4110 return 0;
4111}
4112
5a21b665
DV
4113/*
4114 * Returns true when a page flip has completed.
4115 */
4116static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4117 int plane, int pipe, u32 iir)
4118{
4119 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4120
4121 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4122 return false;
4123
4124 if ((iir & flip_pending) == 0)
4125 goto check_page_flip;
4126
4127 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4128 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4129 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4130 * the flip is completed (no longer pending). Since this doesn't raise
4131 * an interrupt per se, we watch for the change at vblank.
4132 */
4133 if (I915_READ(ISR) & flip_pending)
4134 goto check_page_flip;
4135
4136 intel_finish_page_flip_cs(dev_priv, pipe);
4137 return true;
4138
4139check_page_flip:
4140 intel_check_page_flip(dev_priv, pipe);
4141 return false;
4142}
4143
ff1f525e 4144static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 4145{
45a83f84 4146 struct drm_device *dev = arg;
fac5e23e 4147 struct drm_i915_private *dev_priv = to_i915(dev);
8291ee90 4148 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
4149 u32 flip_mask =
4150 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4151 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 4152 int pipe, ret = IRQ_NONE;
a266c7d5 4153
2dd2a883
ID
4154 if (!intel_irqs_enabled(dev_priv))
4155 return IRQ_NONE;
4156
1f814dac
ID
4157 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4158 disable_rpm_wakeref_asserts(dev_priv);
4159
a266c7d5 4160 iir = I915_READ(IIR);
38bde180
CW
4161 do {
4162 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 4163 bool blc_event = false;
a266c7d5
CW
4164
4165 /* Can't rely on pipestat interrupt bit in iir as it might
4166 * have been cleared after the pipestat interrupt was received.
4167 * It doesn't set the bit in iir again, but it still produces
4168 * interrupts (for non-MSI).
4169 */
222c7f51 4170 spin_lock(&dev_priv->irq_lock);
a266c7d5 4171 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4172 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4173
055e393f 4174 for_each_pipe(dev_priv, pipe) {
f0f59a00 4175 i915_reg_t reg = PIPESTAT(pipe);
a266c7d5
CW
4176 pipe_stats[pipe] = I915_READ(reg);
4177
38bde180 4178 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 4179 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4180 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 4181 irq_received = true;
a266c7d5
CW
4182 }
4183 }
222c7f51 4184 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4185
4186 if (!irq_received)
4187 break;
4188
a266c7d5 4189 /* Consume port. Then clear IIR or we'll miss events */
91d14251 4190 if (I915_HAS_HOTPLUG(dev_priv) &&
1ae3c34c
VS
4191 iir & I915_DISPLAY_PORT_INTERRUPT) {
4192 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4193 if (hotplug_status)
91d14251 4194 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1ae3c34c 4195 }
a266c7d5 4196
38bde180 4197 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4198 new_iir = I915_READ(IIR); /* Flush posted writes */
4199
a266c7d5 4200 if (iir & I915_USER_INTERRUPT)
4a570db5 4201 notify_ring(&dev_priv->engine[RCS]);
a266c7d5 4202
055e393f 4203 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
4204 int plane = pipe;
4205 if (HAS_FBC(dev_priv))
4206 plane = !plane;
4207
4208 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4209 i915_handle_vblank(dev_priv, plane, pipe, iir))
4210 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
4211
4212 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4213 blc_event = true;
4356d586
DV
4214
4215 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 4216 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2d9d2b0b 4217
1f7247c0
DV
4218 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4219 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4220 pipe);
a266c7d5
CW
4221 }
4222
a266c7d5 4223 if (blc_event || (iir & I915_ASLE_INTERRUPT))
91d14251 4224 intel_opregion_asle_intr(dev_priv);
a266c7d5
CW
4225
4226 /* With MSI, interrupts are only generated when iir
4227 * transitions from zero to nonzero. If another bit got
4228 * set while we were handling the existing iir bits, then
4229 * we would never get another interrupt.
4230 *
4231 * This is fine on non-MSI as well, as if we hit this path
4232 * we avoid exiting the interrupt handler only to generate
4233 * another one.
4234 *
4235 * Note that for MSI this could cause a stray interrupt report
4236 * if an interrupt landed in the time between writing IIR and
4237 * the posting read. This should be rare enough to never
4238 * trigger the 99% of 100,000 interrupts test for disabling
4239 * stray interrupts.
4240 */
38bde180 4241 ret = IRQ_HANDLED;
a266c7d5 4242 iir = new_iir;
38bde180 4243 } while (iir & ~flip_mask);
a266c7d5 4244
1f814dac
ID
4245 enable_rpm_wakeref_asserts(dev_priv);
4246
a266c7d5
CW
4247 return ret;
4248}
4249
4250static void i915_irq_uninstall(struct drm_device * dev)
4251{
fac5e23e 4252 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4253 int pipe;
4254
a266c7d5 4255 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4256 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4257 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4258 }
4259
00d98ebd 4260 I915_WRITE16(HWSTAM, 0xffff);
055e393f 4261 for_each_pipe(dev_priv, pipe) {
55b39755 4262 /* Clear enable bits; then clear status bits */
a266c7d5 4263 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
4264 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4265 }
a266c7d5
CW
4266 I915_WRITE(IMR, 0xffffffff);
4267 I915_WRITE(IER, 0x0);
4268
a266c7d5
CW
4269 I915_WRITE(IIR, I915_READ(IIR));
4270}
4271
4272static void i965_irq_preinstall(struct drm_device * dev)
4273{
fac5e23e 4274 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4275 int pipe;
4276
0706f17c 4277 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4279
4280 I915_WRITE(HWSTAM, 0xeffe);
055e393f 4281 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4282 I915_WRITE(PIPESTAT(pipe), 0);
4283 I915_WRITE(IMR, 0xffffffff);
4284 I915_WRITE(IER, 0x0);
4285 POSTING_READ(IER);
4286}
4287
4288static int i965_irq_postinstall(struct drm_device *dev)
4289{
fac5e23e 4290 struct drm_i915_private *dev_priv = to_i915(dev);
bbba0a97 4291 u32 enable_mask;
a266c7d5
CW
4292 u32 error_mask;
4293
a266c7d5 4294 /* Unmask the interrupts that we always want on. */
bbba0a97 4295 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4296 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4297 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4298 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4299 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4300 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4301 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4302
4303 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4304 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4305 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4306 enable_mask |= I915_USER_INTERRUPT;
4307
91d14251 4308 if (IS_G4X(dev_priv))
bbba0a97 4309 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4310
b79480ba
DV
4311 /* Interrupt setup is already guaranteed to be single-threaded, this is
4312 * just to make the assert_spin_locked check happy. */
d6207435 4313 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4314 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4315 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4316 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4317 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 4318
a266c7d5
CW
4319 /*
4320 * Enable some error detection, note the instruction error mask
4321 * bit is reserved, so we leave it masked.
4322 */
91d14251 4323 if (IS_G4X(dev_priv)) {
a266c7d5
CW
4324 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4325 GM45_ERROR_MEM_PRIV |
4326 GM45_ERROR_CP_PRIV |
4327 I915_ERROR_MEMORY_REFRESH);
4328 } else {
4329 error_mask = ~(I915_ERROR_PAGE_TABLE |
4330 I915_ERROR_MEMORY_REFRESH);
4331 }
4332 I915_WRITE(EMR, error_mask);
4333
4334 I915_WRITE(IMR, dev_priv->irq_mask);
4335 I915_WRITE(IER, enable_mask);
4336 POSTING_READ(IER);
4337
0706f17c 4338 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4339 POSTING_READ(PORT_HOTPLUG_EN);
4340
91d14251 4341 i915_enable_asle_pipestat(dev_priv);
20afbda2
DV
4342
4343 return 0;
4344}
4345
91d14251 4346static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
20afbda2 4347{
20afbda2
DV
4348 u32 hotplug_en;
4349
b5ea2d56
DV
4350 assert_spin_locked(&dev_priv->irq_lock);
4351
778eb334
VS
4352 /* Note HDMI and DP share hotplug bits */
4353 /* enable bits are the same for all generations */
91d14251 4354 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
778eb334
VS
4355 /* Programming the CRT detection parameters tends
4356 to generate a spurious hotplug event about three
4357 seconds later. So just do it once.
4358 */
91d14251 4359 if (IS_G4X(dev_priv))
778eb334 4360 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
778eb334
VS
4361 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4362
4363 /* Ignore TV since it's buggy */
0706f17c 4364 i915_hotplug_interrupt_update_locked(dev_priv,
f9e3dc78
JN
4365 HOTPLUG_INT_EN_MASK |
4366 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4367 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4368 hotplug_en);
a266c7d5
CW
4369}
4370
ff1f525e 4371static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4372{
45a83f84 4373 struct drm_device *dev = arg;
fac5e23e 4374 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4375 u32 iir, new_iir;
4376 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4377 int ret = IRQ_NONE, pipe;
21ad8330
VS
4378 u32 flip_mask =
4379 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4380 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4381
2dd2a883
ID
4382 if (!intel_irqs_enabled(dev_priv))
4383 return IRQ_NONE;
4384
1f814dac
ID
4385 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4386 disable_rpm_wakeref_asserts(dev_priv);
4387
a266c7d5
CW
4388 iir = I915_READ(IIR);
4389
a266c7d5 4390 for (;;) {
501e01d7 4391 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4392 bool blc_event = false;
4393
a266c7d5
CW
4394 /* Can't rely on pipestat interrupt bit in iir as it might
4395 * have been cleared after the pipestat interrupt was received.
4396 * It doesn't set the bit in iir again, but it still produces
4397 * interrupts (for non-MSI).
4398 */
222c7f51 4399 spin_lock(&dev_priv->irq_lock);
a266c7d5 4400 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4401 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4402
055e393f 4403 for_each_pipe(dev_priv, pipe) {
f0f59a00 4404 i915_reg_t reg = PIPESTAT(pipe);
a266c7d5
CW
4405 pipe_stats[pipe] = I915_READ(reg);
4406
4407 /*
4408 * Clear the PIPE*STAT regs before the IIR
4409 */
4410 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4411 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4412 irq_received = true;
a266c7d5
CW
4413 }
4414 }
222c7f51 4415 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4416
4417 if (!irq_received)
4418 break;
4419
4420 ret = IRQ_HANDLED;
4421
4422 /* Consume port. Then clear IIR or we'll miss events */
1ae3c34c
VS
4423 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4424 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4425 if (hotplug_status)
91d14251 4426 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1ae3c34c 4427 }
a266c7d5 4428
21ad8330 4429 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4430 new_iir = I915_READ(IIR); /* Flush posted writes */
4431
a266c7d5 4432 if (iir & I915_USER_INTERRUPT)
4a570db5 4433 notify_ring(&dev_priv->engine[RCS]);
a266c7d5 4434 if (iir & I915_BSD_USER_INTERRUPT)
4a570db5 4435 notify_ring(&dev_priv->engine[VCS]);
a266c7d5 4436
055e393f 4437 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
4438 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4439 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4440 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4441
4442 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4443 blc_event = true;
4356d586
DV
4444
4445 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 4446 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
a266c7d5 4447
1f7247c0
DV
4448 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4449 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4450 }
a266c7d5
CW
4451
4452 if (blc_event || (iir & I915_ASLE_INTERRUPT))
91d14251 4453 intel_opregion_asle_intr(dev_priv);
a266c7d5 4454
515ac2bb 4455 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
91d14251 4456 gmbus_irq_handler(dev_priv);
515ac2bb 4457
a266c7d5
CW
4458 /* With MSI, interrupts are only generated when iir
4459 * transitions from zero to nonzero. If another bit got
4460 * set while we were handling the existing iir bits, then
4461 * we would never get another interrupt.
4462 *
4463 * This is fine on non-MSI as well, as if we hit this path
4464 * we avoid exiting the interrupt handler only to generate
4465 * another one.
4466 *
4467 * Note that for MSI this could cause a stray interrupt report
4468 * if an interrupt landed in the time between writing IIR and
4469 * the posting read. This should be rare enough to never
4470 * trigger the 99% of 100,000 interrupts test for disabling
4471 * stray interrupts.
4472 */
4473 iir = new_iir;
4474 }
4475
1f814dac
ID
4476 enable_rpm_wakeref_asserts(dev_priv);
4477
a266c7d5
CW
4478 return ret;
4479}
4480
4481static void i965_irq_uninstall(struct drm_device * dev)
4482{
fac5e23e 4483 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4484 int pipe;
4485
4486 if (!dev_priv)
4487 return;
4488
0706f17c 4489 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4490 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4491
4492 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4493 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4494 I915_WRITE(PIPESTAT(pipe), 0);
4495 I915_WRITE(IMR, 0xffffffff);
4496 I915_WRITE(IER, 0x0);
4497
055e393f 4498 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4499 I915_WRITE(PIPESTAT(pipe),
4500 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4501 I915_WRITE(IIR, I915_READ(IIR));
4502}
4503
fca52a55
DV
4504/**
4505 * intel_irq_init - initializes irq support
4506 * @dev_priv: i915 device instance
4507 *
4508 * This function initializes all the irq support including work items, timers
4509 * and all the vtables. It does not setup the interrupt itself though.
4510 */
b963291c 4511void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4512{
91c8a326 4513 struct drm_device *dev = &dev_priv->drm;
8b2e326d 4514
77913b39
JN
4515 intel_hpd_init_work(dev_priv);
4516
c6a828d3 4517 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4518 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4519
a6706b45 4520 /* Let's track the enabled rps events */
666a4537 4521 if (IS_VALLEYVIEW(dev_priv))
6c65a587 4522 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4523 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4524 else
4525 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4526
1800ad25
SAK
4527 dev_priv->rps.pm_intr_keep = 0;
4528
4529 /*
4530 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4531 * if GEN6_PM_UP_EI_EXPIRED is masked.
4532 *
4533 * TODO: verify if this can be reproduced on VLV,CHV.
4534 */
4535 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4536 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4537
4538 if (INTEL_INFO(dev_priv)->gen >= 8)
4539 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4540
737b1506
CW
4541 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4542 i915_hangcheck_elapsed);
61bac78e 4543
b963291c 4544 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4545 dev->max_vblank_count = 0;
4546 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4547 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4 4548 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
fd8f507c 4549 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
391f75e2
VS
4550 } else {
4551 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4552 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4553 }
4554
21da2700
VS
4555 /*
4556 * Opt out of the vblank disable timer on everything except gen2.
4557 * Gen2 doesn't have a hardware frame counter and so depends on
4558 * vblank interrupts to produce sane vblank seuquence numbers.
4559 */
b963291c 4560 if (!IS_GEN2(dev_priv))
21da2700
VS
4561 dev->vblank_disable_immediate = true;
4562
f3a5c3f6
DV
4563 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4564 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4565
b963291c 4566 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4567 dev->driver->irq_handler = cherryview_irq_handler;
4568 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4569 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4570 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4571 dev->driver->enable_vblank = valleyview_enable_vblank;
4572 dev->driver->disable_vblank = valleyview_disable_vblank;
4573 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4574 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4575 dev->driver->irq_handler = valleyview_irq_handler;
4576 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4577 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4578 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4579 dev->driver->enable_vblank = valleyview_enable_vblank;
4580 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4581 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4582 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4583 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4584 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4585 dev->driver->irq_postinstall = gen8_irq_postinstall;
4586 dev->driver->irq_uninstall = gen8_irq_uninstall;
4587 dev->driver->enable_vblank = gen8_enable_vblank;
4588 dev->driver->disable_vblank = gen8_disable_vblank;
6dbf30ce 4589 if (IS_BROXTON(dev))
e0a20ad7 4590 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
22dea0be 4591 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
6dbf30ce
VS
4592 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4593 else
3a3b3c7d 4594 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4
JB
4595 } else if (HAS_PCH_SPLIT(dev)) {
4596 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4597 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4598 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4599 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4600 dev->driver->enable_vblank = ironlake_enable_vblank;
4601 dev->driver->disable_vblank = ironlake_disable_vblank;
23bb4cb5 4602 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4 4603 } else {
7e22dbbb 4604 if (IS_GEN2(dev_priv)) {
c2798b19
CW
4605 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4606 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4607 dev->driver->irq_handler = i8xx_irq_handler;
4608 dev->driver->irq_uninstall = i8xx_irq_uninstall;
7e22dbbb 4609 } else if (IS_GEN3(dev_priv)) {
a266c7d5
CW
4610 dev->driver->irq_preinstall = i915_irq_preinstall;
4611 dev->driver->irq_postinstall = i915_irq_postinstall;
4612 dev->driver->irq_uninstall = i915_irq_uninstall;
4613 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4614 } else {
a266c7d5
CW
4615 dev->driver->irq_preinstall = i965_irq_preinstall;
4616 dev->driver->irq_postinstall = i965_irq_postinstall;
4617 dev->driver->irq_uninstall = i965_irq_uninstall;
4618 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4619 }
778eb334
VS
4620 if (I915_HAS_HOTPLUG(dev_priv))
4621 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4622 dev->driver->enable_vblank = i915_enable_vblank;
4623 dev->driver->disable_vblank = i915_disable_vblank;
4624 }
4625}
20afbda2 4626
fca52a55
DV
4627/**
4628 * intel_irq_install - enables the hardware interrupt
4629 * @dev_priv: i915 device instance
4630 *
4631 * This function enables the hardware interrupt handling, but leaves the hotplug
4632 * handling still disabled. It is called after intel_irq_init().
4633 *
4634 * In the driver load and resume code we need working interrupts in a few places
4635 * but don't want to deal with the hassle of concurrent probe and hotplug
4636 * workers. Hence the split into this two-stage approach.
4637 */
2aeb7d3a
DV
4638int intel_irq_install(struct drm_i915_private *dev_priv)
4639{
4640 /*
4641 * We enable some interrupt sources in our postinstall hooks, so mark
4642 * interrupts as enabled _before_ actually enabling them to avoid
4643 * special cases in our ordering checks.
4644 */
4645 dev_priv->pm.irqs_enabled = true;
4646
91c8a326 4647 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
2aeb7d3a
DV
4648}
4649
fca52a55
DV
4650/**
4651 * intel_irq_uninstall - finilizes all irq handling
4652 * @dev_priv: i915 device instance
4653 *
4654 * This stops interrupt and hotplug handling and unregisters and frees all
4655 * resources acquired in the init functions.
4656 */
2aeb7d3a
DV
4657void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4658{
91c8a326 4659 drm_irq_uninstall(&dev_priv->drm);
2aeb7d3a
DV
4660 intel_hpd_cancel_work(dev_priv);
4661 dev_priv->pm.irqs_enabled = false;
4662}
4663
fca52a55
DV
4664/**
4665 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4666 * @dev_priv: i915 device instance
4667 *
4668 * This function is used to disable interrupts at runtime, both in the runtime
4669 * pm and the system suspend/resume code.
4670 */
b963291c 4671void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4672{
91c8a326 4673 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
2aeb7d3a 4674 dev_priv->pm.irqs_enabled = false;
91c8a326 4675 synchronize_irq(dev_priv->drm.irq);
c67a470b
PZ
4676}
4677
fca52a55
DV
4678/**
4679 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4680 * @dev_priv: i915 device instance
4681 *
4682 * This function is used to enable interrupts at runtime, both in the runtime
4683 * pm and the system suspend/resume code.
4684 */
b963291c 4685void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4686{
2aeb7d3a 4687 dev_priv->pm.irqs_enabled = true;
91c8a326
CW
4688 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4689 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
c67a470b 4690}