drm/i915: Draw a picture about video timings
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
e5868a31
EE
40static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
704cfb87 65static const u32 hpd_status_g4x[] = {
e5868a31
EE
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
e5868a31
EE
74static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
5c502442 83/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 84#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92} while (0)
93
f86f3fb0 94#define GEN5_IRQ_RESET(type) do { \
a9d356a6 95 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 96 POSTING_READ(type##IMR); \
a9d356a6 97 I915_WRITE(type##IER, 0); \
5c502442
PZ
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
a9d356a6
PZ
102} while (0)
103
337ba017
PZ
104/*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117} while (0)
118
35079899 119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
337ba017 120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
35079899
PZ
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124} while (0)
125
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
337ba017 127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
35079899
PZ
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131} while (0)
132
036a4a7d 133/* For display hotplug interrupt */
995b6762 134static void
2d1013dd 135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 136{
4bc9d430
DV
137 assert_spin_locked(&dev_priv->irq_lock);
138
730488b2 139 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 140 return;
c67a470b 141
1ec14ad3
CW
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 145 POSTING_READ(DEIMR);
036a4a7d
ZW
146 }
147}
148
0ff9800a 149static void
2d1013dd 150ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 151{
4bc9d430
DV
152 assert_spin_locked(&dev_priv->irq_lock);
153
730488b2 154 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 155 return;
c67a470b 156
1ec14ad3
CW
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 160 POSTING_READ(DEIMR);
036a4a7d
ZW
161 }
162}
163
43eaea13
PZ
164/**
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
169 */
170static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173{
174 assert_spin_locked(&dev_priv->irq_lock);
175
730488b2 176 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 177 return;
c67a470b 178
43eaea13
PZ
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183}
184
185void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186{
187 ilk_update_gt_irq(dev_priv, mask, mask);
188}
189
190void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191{
192 ilk_update_gt_irq(dev_priv, mask, 0);
193}
194
edbfdb45
PZ
195/**
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204{
605cd25b 205 uint32_t new_val;
edbfdb45
PZ
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
730488b2 209 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 210 return;
c67a470b 211
605cd25b 212 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
605cd25b
PZ
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
f52ecbcf
PZ
219 POSTING_READ(GEN6_PMIMR);
220 }
edbfdb45
PZ
221}
222
223void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{
225 snb_update_pm_irq(dev_priv, mask, mask);
226}
227
228void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229{
230 snb_update_pm_irq(dev_priv, mask, 0);
231}
232
8664281b
PZ
233static bool ivb_can_enable_err_int(struct drm_device *dev)
234{
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
4bc9d430
DV
239 assert_spin_locked(&dev_priv->irq_lock);
240
8664281b
PZ
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249}
250
0961021a
BW
251/**
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */
259static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262{
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(dev_priv->pm.irqs_disabled))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279}
280
281void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{
283 bdw_update_pm_irq(dev_priv, mask, mask);
284}
285
286void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287{
288 bdw_update_pm_irq(dev_priv, mask, 0);
289}
290
8664281b
PZ
291static bool cpt_can_enable_serr_int(struct drm_device *dev)
292{
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
fee884ed
DV
297 assert_spin_locked(&dev_priv->irq_lock);
298
8664281b
PZ
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307}
308
56b80e1f
VS
309void i9xx_check_fifo_underruns(struct drm_device *dev)
310{
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313 unsigned long flags;
314
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
319 u32 pipestat;
320
321 if (crtc->cpu_fifo_underrun_disabled)
322 continue;
323
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
326 continue;
327
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
329 POSTING_READ(reg);
330
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 }
333
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
335}
336
e69abff0
VS
337static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
338 enum pipe pipe, bool enable)
2d9d2b0b
VS
339{
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 u32 reg = PIPESTAT(pipe);
e69abff0 342 u32 pipestat = I915_READ(reg) & 0xffff0000;
2d9d2b0b
VS
343
344 assert_spin_locked(&dev_priv->irq_lock);
345
e69abff0
VS
346 if (enable) {
347 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
348 POSTING_READ(reg);
349 } else {
350 if (pipestat & PIPE_FIFO_UNDERRUN_STATUS)
351 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
352 }
2d9d2b0b
VS
353}
354
8664281b
PZ
355static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
356 enum pipe pipe, bool enable)
357{
358 struct drm_i915_private *dev_priv = dev->dev_private;
359 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
360 DE_PIPEB_FIFO_UNDERRUN;
361
362 if (enable)
363 ironlake_enable_display_irq(dev_priv, bit);
364 else
365 ironlake_disable_display_irq(dev_priv, bit);
366}
367
368static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
7336df65 369 enum pipe pipe, bool enable)
8664281b
PZ
370{
371 struct drm_i915_private *dev_priv = dev->dev_private;
8664281b 372 if (enable) {
7336df65
DV
373 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
374
8664281b
PZ
375 if (!ivb_can_enable_err_int(dev))
376 return;
377
8664281b
PZ
378 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
379 } else {
380 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
7336df65 381
29c6b0c5 382 if (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
823c6909
VS
383 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
384 pipe_name(pipe));
7336df65 385 }
8664281b
PZ
386 }
387}
388
38d83c96
DV
389static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
390 enum pipe pipe, bool enable)
391{
392 struct drm_i915_private *dev_priv = dev->dev_private;
393
394 assert_spin_locked(&dev_priv->irq_lock);
395
396 if (enable)
397 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
398 else
399 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
400 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
401 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
402}
403
fee884ed
DV
404/**
405 * ibx_display_interrupt_update - update SDEIMR
406 * @dev_priv: driver private
407 * @interrupt_mask: mask of interrupt bits to update
408 * @enabled_irq_mask: mask of interrupt bits to enable
409 */
410static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
411 uint32_t interrupt_mask,
412 uint32_t enabled_irq_mask)
413{
414 uint32_t sdeimr = I915_READ(SDEIMR);
415 sdeimr &= ~interrupt_mask;
416 sdeimr |= (~enabled_irq_mask & interrupt_mask);
417
418 assert_spin_locked(&dev_priv->irq_lock);
419
730488b2 420 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 421 return;
c67a470b 422
fee884ed
DV
423 I915_WRITE(SDEIMR, sdeimr);
424 POSTING_READ(SDEIMR);
425}
426#define ibx_enable_display_interrupt(dev_priv, bits) \
427 ibx_display_interrupt_update((dev_priv), (bits), (bits))
428#define ibx_disable_display_interrupt(dev_priv, bits) \
429 ibx_display_interrupt_update((dev_priv), (bits), 0)
430
de28075d
DV
431static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
432 enum transcoder pch_transcoder,
8664281b
PZ
433 bool enable)
434{
8664281b 435 struct drm_i915_private *dev_priv = dev->dev_private;
de28075d
DV
436 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
437 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
8664281b
PZ
438
439 if (enable)
fee884ed 440 ibx_enable_display_interrupt(dev_priv, bit);
8664281b 441 else
fee884ed 442 ibx_disable_display_interrupt(dev_priv, bit);
8664281b
PZ
443}
444
445static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
446 enum transcoder pch_transcoder,
447 bool enable)
448{
449 struct drm_i915_private *dev_priv = dev->dev_private;
450
451 if (enable) {
1dd246fb
DV
452 I915_WRITE(SERR_INT,
453 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
454
8664281b
PZ
455 if (!cpt_can_enable_serr_int(dev))
456 return;
457
fee884ed 458 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
8664281b 459 } else {
fee884ed 460 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
1dd246fb 461
29c6b0c5 462 if (I915_READ(SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
823c6909
VS
463 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
464 transcoder_name(pch_transcoder));
1dd246fb 465 }
8664281b 466 }
8664281b
PZ
467}
468
469/**
470 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
471 * @dev: drm device
472 * @pipe: pipe
473 * @enable: true if we want to report FIFO underrun errors, false otherwise
474 *
475 * This function makes us disable or enable CPU fifo underruns for a specific
476 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
477 * reporting for one pipe may also disable all the other CPU error interruts for
478 * the other pipes, due to the fact that there's just one interrupt mask/enable
479 * bit for all the pipes.
480 *
481 * Returns the previous state of underrun reporting.
482 */
c5ab3bc0
DV
483static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
484 enum pipe pipe, bool enable)
8664281b
PZ
485{
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
488 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8664281b
PZ
489 bool ret;
490
77961eb9
ID
491 assert_spin_locked(&dev_priv->irq_lock);
492
8664281b
PZ
493 ret = !intel_crtc->cpu_fifo_underrun_disabled;
494
495 if (enable == ret)
496 goto done;
497
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499
e69abff0
VS
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable);
2d9d2b0b 502 else if (IS_GEN5(dev) || IS_GEN6(dev))
8664281b
PZ
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
7336df65 505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
38d83c96
DV
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
8664281b
PZ
508
509done:
f88d42f1
ID
510 return ret;
511}
512
513bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
514 enum pipe pipe, bool enable)
515{
516 struct drm_i915_private *dev_priv = dev->dev_private;
517 unsigned long flags;
518 bool ret;
519
520 spin_lock_irqsave(&dev_priv->irq_lock, flags);
521 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
8664281b 522 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
f88d42f1 523
8664281b
PZ
524 return ret;
525}
526
91d181dd
ID
527static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
528 enum pipe pipe)
529{
530 struct drm_i915_private *dev_priv = dev->dev_private;
531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
533
534 return !intel_crtc->cpu_fifo_underrun_disabled;
535}
536
8664281b
PZ
537/**
538 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
539 * @dev: drm device
540 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
541 * @enable: true if we want to report FIFO underrun errors, false otherwise
542 *
543 * This function makes us disable or enable PCH fifo underruns for a specific
544 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
545 * underrun reporting for one transcoder may also disable all the other PCH
546 * error interruts for the other transcoders, due to the fact that there's just
547 * one interrupt mask/enable bit for all the transcoders.
548 *
549 * Returns the previous state of underrun reporting.
550 */
551bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
552 enum transcoder pch_transcoder,
553 bool enable)
554{
555 struct drm_i915_private *dev_priv = dev->dev_private;
de28075d
DV
556 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8664281b
PZ
558 unsigned long flags;
559 bool ret;
560
de28075d
DV
561 /*
562 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
563 * has only one pch transcoder A that all pipes can use. To avoid racy
564 * pch transcoder -> pipe lookups from interrupt code simply store the
565 * underrun statistics in crtc A. Since we never expose this anywhere
566 * nor use it outside of the fifo underrun code here using the "wrong"
567 * crtc on LPT won't cause issues.
568 */
8664281b
PZ
569
570 spin_lock_irqsave(&dev_priv->irq_lock, flags);
571
572 ret = !intel_crtc->pch_fifo_underrun_disabled;
573
574 if (enable == ret)
575 goto done;
576
577 intel_crtc->pch_fifo_underrun_disabled = !enable;
578
579 if (HAS_PCH_IBX(dev))
de28075d 580 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
8664281b
PZ
581 else
582 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
583
584done:
585 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
586 return ret;
587}
588
589
b5ea642a 590static void
755e9019
ID
591__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
592 u32 enable_mask, u32 status_mask)
7c463586 593{
46c06a30 594 u32 reg = PIPESTAT(pipe);
755e9019 595 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 596
b79480ba
DV
597 assert_spin_locked(&dev_priv->irq_lock);
598
04feced9
VS
599 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
600 status_mask & ~PIPESTAT_INT_STATUS_MASK,
601 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
602 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
603 return;
604
605 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
606 return;
607
91d181dd
ID
608 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
609
46c06a30 610 /* Enable the interrupt, clear any pending status */
755e9019 611 pipestat |= enable_mask | status_mask;
46c06a30
VS
612 I915_WRITE(reg, pipestat);
613 POSTING_READ(reg);
7c463586
KP
614}
615
b5ea642a 616static void
755e9019
ID
617__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
618 u32 enable_mask, u32 status_mask)
7c463586 619{
46c06a30 620 u32 reg = PIPESTAT(pipe);
755e9019 621 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 622
b79480ba
DV
623 assert_spin_locked(&dev_priv->irq_lock);
624
04feced9
VS
625 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
626 status_mask & ~PIPESTAT_INT_STATUS_MASK,
627 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
628 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
629 return;
630
755e9019
ID
631 if ((pipestat & enable_mask) == 0)
632 return;
633
91d181dd
ID
634 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
635
755e9019 636 pipestat &= ~enable_mask;
46c06a30
VS
637 I915_WRITE(reg, pipestat);
638 POSTING_READ(reg);
7c463586
KP
639}
640
10c59c51
ID
641static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
642{
643 u32 enable_mask = status_mask << 16;
644
645 /*
724a6905
VS
646 * On pipe A we don't support the PSR interrupt yet,
647 * on pipe B and C the same bit MBZ.
10c59c51
ID
648 */
649 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
650 return 0;
724a6905
VS
651 /*
652 * On pipe B and C we don't support the PSR interrupt yet, on pipe
653 * A the same bit is for perf counters which we don't use either.
654 */
655 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
656 return 0;
10c59c51
ID
657
658 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
659 SPRITE0_FLIP_DONE_INT_EN_VLV |
660 SPRITE1_FLIP_DONE_INT_EN_VLV);
661 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
662 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
663 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
664 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
665
666 return enable_mask;
667}
668
755e9019
ID
669void
670i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
671 u32 status_mask)
672{
673 u32 enable_mask;
674
10c59c51
ID
675 if (IS_VALLEYVIEW(dev_priv->dev))
676 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
677 status_mask);
678 else
679 enable_mask = status_mask << 16;
755e9019
ID
680 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
681}
682
683void
684i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
685 u32 status_mask)
686{
687 u32 enable_mask;
688
10c59c51
ID
689 if (IS_VALLEYVIEW(dev_priv->dev))
690 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
691 status_mask);
692 else
693 enable_mask = status_mask << 16;
755e9019
ID
694 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
695}
696
01c66889 697/**
f49e38dd 698 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 699 */
f49e38dd 700static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 701{
2d1013dd 702 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3
CW
703 unsigned long irqflags;
704
f49e38dd
JN
705 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
706 return;
707
1ec14ad3 708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
01c66889 709
755e9019 710 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 711 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 712 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 713 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3
CW
714
715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
01c66889
ZY
716}
717
0a3e67a4
JB
718/**
719 * i915_pipe_enabled - check if a pipe is enabled
720 * @dev: DRM device
721 * @pipe: pipe to check
722 *
723 * Reading certain registers when the pipe is disabled can hang the chip.
724 * Use this routine to make sure the PLL is running and the pipe is active
725 * before reading such registers if unsure.
726 */
727static int
728i915_pipe_enabled(struct drm_device *dev, int pipe)
729{
2d1013dd 730 struct drm_i915_private *dev_priv = dev->dev_private;
702e7a56 731
a01025af
DV
732 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
733 /* Locking is horribly broken here, but whatever. */
734 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
71f8ba6b 736
a01025af
DV
737 return intel_crtc->active;
738 } else {
739 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
740 }
0a3e67a4
JB
741}
742
f75f3746
VS
743/*
744 * This timing diagram depicts the video signal in and
745 * around the vertical blanking period.
746 *
747 * Assumptions about the fictitious mode used in this example:
748 * vblank_start >= 3
749 * vsync_start = vblank_start + 1
750 * vsync_end = vblank_start + 2
751 * vtotal = vblank_start + 3
752 *
753 * start of vblank:
754 * latch double buffered registers
755 * increment frame counter (ctg+)
756 * generate start of vblank interrupt (gen4+)
757 * |
758 * | frame start:
759 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
760 * | may be shifted forward 1-3 extra lines via PIPECONF
761 * | |
762 * | | start of vsync:
763 * | | generate vsync interrupt
764 * | | |
765 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
766 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
767 * ----va---> <-----------------vb--------------------> <--------va-------------
768 * | | <----vs-----> |
769 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
770 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
771 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
772 * | | |
773 * last visible pixel first visible pixel
774 * | increment frame counter (gen3/4)
775 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
776 *
777 * x = horizontal active
778 * _ = horizontal blanking
779 * hs = horizontal sync
780 * va = vertical active
781 * vb = vertical blanking
782 * vs = vertical sync
783 * vbs = vblank_start (number)
784 *
785 * Summary:
786 * - most events happen at the start of horizontal sync
787 * - frame start happens at the start of horizontal blank, 1-4 lines
788 * (depending on PIPECONF settings) after the start of vblank
789 * - gen3/4 pixel and frame counter are synchronized with the start
790 * of horizontal active on the first line of vertical active
791 */
792
4cdb83ec
VS
793static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
794{
795 /* Gen2 doesn't have a hardware frame counter */
796 return 0;
797}
798
42f52ef8
KP
799/* Called from drm generic code, passed a 'crtc', which
800 * we use as a pipe index
801 */
f71d4af4 802static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4 803{
2d1013dd 804 struct drm_i915_private *dev_priv = dev->dev_private;
0a3e67a4
JB
805 unsigned long high_frame;
806 unsigned long low_frame;
0b2a8e09 807 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
0a3e67a4
JB
808
809 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 810 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 811 "pipe %c\n", pipe_name(pipe));
0a3e67a4
JB
812 return 0;
813 }
814
391f75e2
VS
815 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
816 struct intel_crtc *intel_crtc =
817 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
818 const struct drm_display_mode *mode =
819 &intel_crtc->config.adjusted_mode;
820
0b2a8e09
VS
821 htotal = mode->crtc_htotal;
822 hsync_start = mode->crtc_hsync_start;
823 vbl_start = mode->crtc_vblank_start;
824 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
825 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 826 } else {
a2d213dd 827 enum transcoder cpu_transcoder = (enum transcoder) pipe;
391f75e2
VS
828
829 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
0b2a8e09 830 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
391f75e2 831 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
0b2a8e09
VS
832 if ((I915_READ(PIPECONF(cpu_transcoder)) &
833 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
834 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2
VS
835 }
836
0b2a8e09
VS
837 /* Convert to pixel count */
838 vbl_start *= htotal;
839
840 /* Start of vblank event occurs at start of hsync */
841 vbl_start -= htotal - hsync_start;
842
9db4a9c7
JB
843 high_frame = PIPEFRAME(pipe);
844 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 845
0a3e67a4
JB
846 /*
847 * High & low register fields aren't synchronized, so make sure
848 * we get a low value that's stable across two reads of the high
849 * register.
850 */
851 do {
5eddb70b 852 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 853 low = I915_READ(low_frame);
5eddb70b 854 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
855 } while (high1 != high2);
856
5eddb70b 857 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 858 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 859 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
860
861 /*
862 * The frame counter increments at beginning of active.
863 * Cook up a vblank counter by also checking the pixel
864 * counter against vblank start.
865 */
edc08d0a 866 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
867}
868
f71d4af4 869static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5 870{
2d1013dd 871 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 872 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5
JB
873
874 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 875 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 876 "pipe %c\n", pipe_name(pipe));
9880b7a5
JB
877 return 0;
878 }
879
880 return I915_READ(reg);
881}
882
ad3543ed
MK
883/* raw reads, only for fast reads of display block, no need for forcewake etc. */
884#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
ad3543ed 885
a225f079
VS
886static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
887{
888 struct drm_device *dev = crtc->base.dev;
889 struct drm_i915_private *dev_priv = dev->dev_private;
890 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
891 enum pipe pipe = crtc->pipe;
892 int vtotal = mode->crtc_vtotal;
893 int position;
894
895 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
896 vtotal /= 2;
897
898 if (IS_GEN2(dev))
899 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
900 else
901 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
902
903 /*
904 * Scanline counter increments at leading edge of hsync, and
905 * it starts counting from vtotal-1 on the first active line.
906 * That means the scanline counter value is always one less
907 * than what we would expect. Ie. just after start of vblank,
908 * which also occurs at start of hsync (on the last active line),
909 * the scanline counter will read vblank_start-1.
910 */
911 return (position + 1) % vtotal;
912}
913
f71d4af4 914static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
abca9e45
VS
915 unsigned int flags, int *vpos, int *hpos,
916 ktime_t *stime, ktime_t *etime)
0af7e4df 917{
c2baf4b7
VS
918 struct drm_i915_private *dev_priv = dev->dev_private;
919 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
920 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
921 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
3aa18df8 922 int position;
78e8fc6b 923 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
924 bool in_vbl = true;
925 int ret = 0;
ad3543ed 926 unsigned long irqflags;
0af7e4df 927
c2baf4b7 928 if (!intel_crtc->active) {
0af7e4df 929 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 930 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
931 return 0;
932 }
933
c2baf4b7 934 htotal = mode->crtc_htotal;
78e8fc6b 935 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
936 vtotal = mode->crtc_vtotal;
937 vbl_start = mode->crtc_vblank_start;
938 vbl_end = mode->crtc_vblank_end;
0af7e4df 939
d31faf65
VS
940 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
941 vbl_start = DIV_ROUND_UP(vbl_start, 2);
942 vbl_end /= 2;
943 vtotal /= 2;
944 }
945
c2baf4b7
VS
946 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
947
ad3543ed
MK
948 /*
949 * Lock uncore.lock, as we will do multiple timing critical raw
950 * register reads, potentially with preemption disabled, so the
951 * following code must not block on uncore.lock.
952 */
953 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 954
ad3543ed
MK
955 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
956
957 /* Get optional system timestamp before query. */
958 if (stime)
959 *stime = ktime_get();
960
7c06b08a 961 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
962 /* No obvious pixelcount register. Only query vertical
963 * scanout position from Display scan line register.
964 */
a225f079 965 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
966 } else {
967 /* Have access to pixelcount since start of frame.
968 * We can split this into vertical and horizontal
969 * scanout position.
970 */
ad3543ed 971 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 972
3aa18df8
VS
973 /* convert to pixel counts */
974 vbl_start *= htotal;
975 vbl_end *= htotal;
976 vtotal *= htotal;
78e8fc6b 977
7e78f1cb
VS
978 /*
979 * In interlaced modes, the pixel counter counts all pixels,
980 * so one field will have htotal more pixels. In order to avoid
981 * the reported position from jumping backwards when the pixel
982 * counter is beyond the length of the shorter field, just
983 * clamp the position the length of the shorter field. This
984 * matches how the scanline counter based position works since
985 * the scanline counter doesn't count the two half lines.
986 */
987 if (position >= vtotal)
988 position = vtotal - 1;
989
78e8fc6b
VS
990 /*
991 * Start of vblank interrupt is triggered at start of hsync,
992 * just prior to the first active line of vblank. However we
993 * consider lines to start at the leading edge of horizontal
994 * active. So, should we get here before we've crossed into
995 * the horizontal active of the first line in vblank, we would
996 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
997 * always add htotal-hsync_start to the current pixel position.
998 */
999 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
1000 }
1001
ad3543ed
MK
1002 /* Get optional system timestamp after query. */
1003 if (etime)
1004 *etime = ktime_get();
1005
1006 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1007
1008 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1009
3aa18df8
VS
1010 in_vbl = position >= vbl_start && position < vbl_end;
1011
1012 /*
1013 * While in vblank, position will be negative
1014 * counting up towards 0 at vbl_end. And outside
1015 * vblank, position will be positive counting
1016 * up since vbl_end.
1017 */
1018 if (position >= vbl_start)
1019 position -= vbl_end;
1020 else
1021 position += vtotal - vbl_end;
0af7e4df 1022
7c06b08a 1023 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
1024 *vpos = position;
1025 *hpos = 0;
1026 } else {
1027 *vpos = position / htotal;
1028 *hpos = position - (*vpos * htotal);
1029 }
0af7e4df 1030
0af7e4df
MK
1031 /* In vblank? */
1032 if (in_vbl)
1033 ret |= DRM_SCANOUTPOS_INVBL;
1034
1035 return ret;
1036}
1037
a225f079
VS
1038int intel_get_crtc_scanline(struct intel_crtc *crtc)
1039{
1040 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1041 unsigned long irqflags;
1042 int position;
1043
1044 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1045 position = __intel_get_crtc_scanline(crtc);
1046 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1047
1048 return position;
1049}
1050
f71d4af4 1051static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
1052 int *max_error,
1053 struct timeval *vblank_time,
1054 unsigned flags)
1055{
4041b853 1056 struct drm_crtc *crtc;
0af7e4df 1057
7eb552ae 1058 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 1059 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
1060 return -EINVAL;
1061 }
1062
1063 /* Get drm_crtc to timestamp: */
4041b853
CW
1064 crtc = intel_get_crtc_for_pipe(dev, pipe);
1065 if (crtc == NULL) {
1066 DRM_ERROR("Invalid crtc %d\n", pipe);
1067 return -EINVAL;
1068 }
1069
1070 if (!crtc->enabled) {
1071 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1072 return -EBUSY;
1073 }
0af7e4df
MK
1074
1075 /* Helper routine in DRM core does all the work: */
4041b853
CW
1076 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1077 vblank_time, flags,
7da903ef
VS
1078 crtc,
1079 &to_intel_crtc(crtc)->config.adjusted_mode);
0af7e4df
MK
1080}
1081
67c347ff
JN
1082static bool intel_hpd_irq_event(struct drm_device *dev,
1083 struct drm_connector *connector)
321a1b30
EE
1084{
1085 enum drm_connector_status old_status;
1086
1087 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1088 old_status = connector->status;
1089
1090 connector->status = connector->funcs->detect(connector, false);
67c347ff
JN
1091 if (old_status == connector->status)
1092 return false;
1093
1094 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
321a1b30
EE
1095 connector->base.id,
1096 drm_get_connector_name(connector),
67c347ff
JN
1097 drm_get_connector_status_name(old_status),
1098 drm_get_connector_status_name(connector->status));
1099
1100 return true;
321a1b30
EE
1101}
1102
5ca58282
JB
1103/*
1104 * Handle hotplug events outside the interrupt handler proper.
1105 */
ac4c16c5
EE
1106#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1107
5ca58282
JB
1108static void i915_hotplug_work_func(struct work_struct *work)
1109{
2d1013dd
JN
1110 struct drm_i915_private *dev_priv =
1111 container_of(work, struct drm_i915_private, hotplug_work);
5ca58282 1112 struct drm_device *dev = dev_priv->dev;
c31c4ba3 1113 struct drm_mode_config *mode_config = &dev->mode_config;
cd569aed
EE
1114 struct intel_connector *intel_connector;
1115 struct intel_encoder *intel_encoder;
1116 struct drm_connector *connector;
1117 unsigned long irqflags;
1118 bool hpd_disabled = false;
321a1b30 1119 bool changed = false;
142e2398 1120 u32 hpd_event_bits;
4ef69c7a 1121
52d7eced
DV
1122 /* HPD irq before everything is fully set up. */
1123 if (!dev_priv->enable_hotplug_processing)
1124 return;
1125
a65e34c7 1126 mutex_lock(&mode_config->mutex);
e67189ab
JB
1127 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1128
cd569aed 1129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
142e2398
EE
1130
1131 hpd_event_bits = dev_priv->hpd_event_bits;
1132 dev_priv->hpd_event_bits = 0;
cd569aed
EE
1133 list_for_each_entry(connector, &mode_config->connector_list, head) {
1134 intel_connector = to_intel_connector(connector);
1135 intel_encoder = intel_connector->encoder;
1136 if (intel_encoder->hpd_pin > HPD_NONE &&
1137 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1138 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1139 DRM_INFO("HPD interrupt storm detected on connector %s: "
1140 "switching from hotplug detection to polling\n",
1141 drm_get_connector_name(connector));
1142 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1143 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1144 | DRM_CONNECTOR_POLL_DISCONNECT;
1145 hpd_disabled = true;
1146 }
142e2398
EE
1147 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1148 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1149 drm_get_connector_name(connector), intel_encoder->hpd_pin);
1150 }
cd569aed
EE
1151 }
1152 /* if there were no outputs to poll, poll was disabled,
1153 * therefore make sure it's enabled when disabling HPD on
1154 * some connectors */
ac4c16c5 1155 if (hpd_disabled) {
cd569aed 1156 drm_kms_helper_poll_enable(dev);
ac4c16c5
EE
1157 mod_timer(&dev_priv->hotplug_reenable_timer,
1158 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1159 }
cd569aed
EE
1160
1161 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1162
321a1b30
EE
1163 list_for_each_entry(connector, &mode_config->connector_list, head) {
1164 intel_connector = to_intel_connector(connector);
1165 intel_encoder = intel_connector->encoder;
1166 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1167 if (intel_encoder->hot_plug)
1168 intel_encoder->hot_plug(intel_encoder);
1169 if (intel_hpd_irq_event(dev, connector))
1170 changed = true;
1171 }
1172 }
40ee3381
KP
1173 mutex_unlock(&mode_config->mutex);
1174
321a1b30
EE
1175 if (changed)
1176 drm_kms_helper_hotplug_event(dev);
5ca58282
JB
1177}
1178
3ca1cced
VS
1179static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1180{
1181 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1182}
1183
d0ecd7e2 1184static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 1185{
2d1013dd 1186 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 1187 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 1188 u8 new_delay;
9270388e 1189
d0ecd7e2 1190 spin_lock(&mchdev_lock);
f97108d1 1191
73edd18f
DV
1192 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1193
20e4d407 1194 new_delay = dev_priv->ips.cur_delay;
9270388e 1195
7648fa99 1196 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
1197 busy_up = I915_READ(RCPREVBSYTUPAVG);
1198 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
1199 max_avg = I915_READ(RCBMAXAVG);
1200 min_avg = I915_READ(RCBMINAVG);
1201
1202 /* Handle RCS change request from hw */
b5b72e89 1203 if (busy_up > max_avg) {
20e4d407
DV
1204 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1205 new_delay = dev_priv->ips.cur_delay - 1;
1206 if (new_delay < dev_priv->ips.max_delay)
1207 new_delay = dev_priv->ips.max_delay;
b5b72e89 1208 } else if (busy_down < min_avg) {
20e4d407
DV
1209 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1210 new_delay = dev_priv->ips.cur_delay + 1;
1211 if (new_delay > dev_priv->ips.min_delay)
1212 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
1213 }
1214
7648fa99 1215 if (ironlake_set_drps(dev, new_delay))
20e4d407 1216 dev_priv->ips.cur_delay = new_delay;
f97108d1 1217
d0ecd7e2 1218 spin_unlock(&mchdev_lock);
9270388e 1219
f97108d1
JB
1220 return;
1221}
1222
549f7365
CW
1223static void notify_ring(struct drm_device *dev,
1224 struct intel_ring_buffer *ring)
1225{
475553de
CW
1226 if (ring->obj == NULL)
1227 return;
1228
814e9b57 1229 trace_i915_gem_request_complete(ring);
9862e600 1230
549f7365 1231 wake_up_all(&ring->irq_queue);
10cd45b6 1232 i915_queue_hangcheck(dev);
549f7365
CW
1233}
1234
4912d041 1235static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 1236{
2d1013dd
JN
1237 struct drm_i915_private *dev_priv =
1238 container_of(work, struct drm_i915_private, rps.work);
edbfdb45 1239 u32 pm_iir;
dd75fdc8 1240 int new_delay, adj;
4912d041 1241
59cdb63d 1242 spin_lock_irq(&dev_priv->irq_lock);
c6a828d3
DV
1243 pm_iir = dev_priv->rps.pm_iir;
1244 dev_priv->rps.pm_iir = 0;
0961021a
BW
1245 if (IS_BROADWELL(dev_priv->dev))
1246 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1247 else {
1248 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1249 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1250 }
59cdb63d 1251 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 1252
60611c13 1253 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 1254 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 1255
a6706b45 1256 if ((pm_iir & dev_priv->pm_rps_events) == 0)
3b8d8d91
JB
1257 return;
1258
4fc688ce 1259 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1260
dd75fdc8 1261 adj = dev_priv->rps.last_adj;
7425034a 1262 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1263 if (adj > 0)
1264 adj *= 2;
1265 else
1266 adj = 1;
b39fb297 1267 new_delay = dev_priv->rps.cur_freq + adj;
7425034a
VS
1268
1269 /*
1270 * For better performance, jump directly
1271 * to RPe if we're below it.
1272 */
b39fb297
BW
1273 if (new_delay < dev_priv->rps.efficient_freq)
1274 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1275 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1276 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1277 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1278 else
b39fb297 1279 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1280 adj = 0;
1281 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1282 if (adj < 0)
1283 adj *= 2;
1284 else
1285 adj = -1;
b39fb297 1286 new_delay = dev_priv->rps.cur_freq + adj;
dd75fdc8 1287 } else { /* unknown event */
b39fb297 1288 new_delay = dev_priv->rps.cur_freq;
dd75fdc8 1289 }
3b8d8d91 1290
79249636
BW
1291 /* sysfs frequency interfaces may have snuck in while servicing the
1292 * interrupt
1293 */
1272e7b8 1294 new_delay = clamp_t(int, new_delay,
b39fb297
BW
1295 dev_priv->rps.min_freq_softlimit,
1296 dev_priv->rps.max_freq_softlimit);
27544369 1297
b39fb297 1298 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
dd75fdc8
CW
1299
1300 if (IS_VALLEYVIEW(dev_priv->dev))
1301 valleyview_set_rps(dev_priv->dev, new_delay);
1302 else
1303 gen6_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1304
4fc688ce 1305 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1306}
1307
e3689190
BW
1308
1309/**
1310 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1311 * occurred.
1312 * @work: workqueue struct
1313 *
1314 * Doesn't actually do anything except notify userspace. As a consequence of
1315 * this event, userspace should try to remap the bad rows since statistically
1316 * it is likely the same row is more likely to go bad again.
1317 */
1318static void ivybridge_parity_work(struct work_struct *work)
1319{
2d1013dd
JN
1320 struct drm_i915_private *dev_priv =
1321 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1322 u32 error_status, row, bank, subbank;
35a85ac6 1323 char *parity_event[6];
e3689190
BW
1324 uint32_t misccpctl;
1325 unsigned long flags;
35a85ac6 1326 uint8_t slice = 0;
e3689190
BW
1327
1328 /* We must turn off DOP level clock gating to access the L3 registers.
1329 * In order to prevent a get/put style interface, acquire struct mutex
1330 * any time we access those registers.
1331 */
1332 mutex_lock(&dev_priv->dev->struct_mutex);
1333
35a85ac6
BW
1334 /* If we've screwed up tracking, just let the interrupt fire again */
1335 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1336 goto out;
1337
e3689190
BW
1338 misccpctl = I915_READ(GEN7_MISCCPCTL);
1339 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1340 POSTING_READ(GEN7_MISCCPCTL);
1341
35a85ac6
BW
1342 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1343 u32 reg;
e3689190 1344
35a85ac6
BW
1345 slice--;
1346 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1347 break;
e3689190 1348
35a85ac6 1349 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1350
35a85ac6 1351 reg = GEN7_L3CDERRST1 + (slice * 0x200);
e3689190 1352
35a85ac6
BW
1353 error_status = I915_READ(reg);
1354 row = GEN7_PARITY_ERROR_ROW(error_status);
1355 bank = GEN7_PARITY_ERROR_BANK(error_status);
1356 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1357
1358 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1359 POSTING_READ(reg);
1360
1361 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1362 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1363 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1364 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1365 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1366 parity_event[5] = NULL;
1367
5bdebb18 1368 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1369 KOBJ_CHANGE, parity_event);
e3689190 1370
35a85ac6
BW
1371 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1372 slice, row, bank, subbank);
e3689190 1373
35a85ac6
BW
1374 kfree(parity_event[4]);
1375 kfree(parity_event[3]);
1376 kfree(parity_event[2]);
1377 kfree(parity_event[1]);
1378 }
e3689190 1379
35a85ac6 1380 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1381
35a85ac6
BW
1382out:
1383 WARN_ON(dev_priv->l3_parity.which_slice);
1384 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1385 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1387
1388 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1389}
1390
35a85ac6 1391static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1392{
2d1013dd 1393 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1394
040d2baa 1395 if (!HAS_L3_DPF(dev))
e3689190
BW
1396 return;
1397
d0ecd7e2 1398 spin_lock(&dev_priv->irq_lock);
35a85ac6 1399 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1400 spin_unlock(&dev_priv->irq_lock);
e3689190 1401
35a85ac6
BW
1402 iir &= GT_PARITY_ERROR(dev);
1403 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1404 dev_priv->l3_parity.which_slice |= 1 << 1;
1405
1406 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1407 dev_priv->l3_parity.which_slice |= 1 << 0;
1408
a4da4fa4 1409 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1410}
1411
f1af8fc1
PZ
1412static void ilk_gt_irq_handler(struct drm_device *dev,
1413 struct drm_i915_private *dev_priv,
1414 u32 gt_iir)
1415{
1416 if (gt_iir &
1417 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1418 notify_ring(dev, &dev_priv->ring[RCS]);
1419 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1420 notify_ring(dev, &dev_priv->ring[VCS]);
1421}
1422
e7b4c6b1
DV
1423static void snb_gt_irq_handler(struct drm_device *dev,
1424 struct drm_i915_private *dev_priv,
1425 u32 gt_iir)
1426{
1427
cc609d5d
BW
1428 if (gt_iir &
1429 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
e7b4c6b1 1430 notify_ring(dev, &dev_priv->ring[RCS]);
cc609d5d 1431 if (gt_iir & GT_BSD_USER_INTERRUPT)
e7b4c6b1 1432 notify_ring(dev, &dev_priv->ring[VCS]);
cc609d5d 1433 if (gt_iir & GT_BLT_USER_INTERRUPT)
e7b4c6b1
DV
1434 notify_ring(dev, &dev_priv->ring[BCS]);
1435
cc609d5d
BW
1436 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1437 GT_BSD_CS_ERROR_INTERRUPT |
1438 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
58174462
MK
1439 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1440 gt_iir);
e7b4c6b1 1441 }
e3689190 1442
35a85ac6
BW
1443 if (gt_iir & GT_PARITY_ERROR(dev))
1444 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1445}
1446
0961021a
BW
1447static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1448{
1449 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1450 return;
1451
1452 spin_lock(&dev_priv->irq_lock);
1453 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1454 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1455 spin_unlock(&dev_priv->irq_lock);
1456
1457 queue_work(dev_priv->wq, &dev_priv->rps.work);
1458}
1459
abd58f01
BW
1460static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1461 struct drm_i915_private *dev_priv,
1462 u32 master_ctl)
1463{
1464 u32 rcs, bcs, vcs;
1465 uint32_t tmp = 0;
1466 irqreturn_t ret = IRQ_NONE;
1467
1468 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1469 tmp = I915_READ(GEN8_GT_IIR(0));
1470 if (tmp) {
1471 ret = IRQ_HANDLED;
1472 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1473 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1474 if (rcs & GT_RENDER_USER_INTERRUPT)
1475 notify_ring(dev, &dev_priv->ring[RCS]);
1476 if (bcs & GT_RENDER_USER_INTERRUPT)
1477 notify_ring(dev, &dev_priv->ring[BCS]);
1478 I915_WRITE(GEN8_GT_IIR(0), tmp);
1479 } else
1480 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1481 }
1482
85f9b5f9 1483 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
abd58f01
BW
1484 tmp = I915_READ(GEN8_GT_IIR(1));
1485 if (tmp) {
1486 ret = IRQ_HANDLED;
1487 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1488 if (vcs & GT_RENDER_USER_INTERRUPT)
1489 notify_ring(dev, &dev_priv->ring[VCS]);
85f9b5f9
ZY
1490 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1491 if (vcs & GT_RENDER_USER_INTERRUPT)
1492 notify_ring(dev, &dev_priv->ring[VCS2]);
abd58f01
BW
1493 I915_WRITE(GEN8_GT_IIR(1), tmp);
1494 } else
1495 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1496 }
1497
0961021a
BW
1498 if (master_ctl & GEN8_GT_PM_IRQ) {
1499 tmp = I915_READ(GEN8_GT_IIR(2));
1500 if (tmp & dev_priv->pm_rps_events) {
1501 ret = IRQ_HANDLED;
1502 gen8_rps_irq_handler(dev_priv, tmp);
1503 I915_WRITE(GEN8_GT_IIR(2),
1504 tmp & dev_priv->pm_rps_events);
1505 } else
1506 DRM_ERROR("The master control interrupt lied (PM)!\n");
1507 }
1508
abd58f01
BW
1509 if (master_ctl & GEN8_GT_VECS_IRQ) {
1510 tmp = I915_READ(GEN8_GT_IIR(3));
1511 if (tmp) {
1512 ret = IRQ_HANDLED;
1513 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1514 if (vcs & GT_RENDER_USER_INTERRUPT)
1515 notify_ring(dev, &dev_priv->ring[VECS]);
1516 I915_WRITE(GEN8_GT_IIR(3), tmp);
1517 } else
1518 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1519 }
1520
1521 return ret;
1522}
1523
b543fb04
EE
1524#define HPD_STORM_DETECT_PERIOD 1000
1525#define HPD_STORM_THRESHOLD 5
1526
10a504de 1527static inline void intel_hpd_irq_handler(struct drm_device *dev,
22062dba
DV
1528 u32 hotplug_trigger,
1529 const u32 *hpd)
b543fb04 1530{
2d1013dd 1531 struct drm_i915_private *dev_priv = dev->dev_private;
b543fb04 1532 int i;
10a504de 1533 bool storm_detected = false;
b543fb04 1534
91d131d2
DV
1535 if (!hotplug_trigger)
1536 return;
1537
cc9bd499
ID
1538 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1539 hotplug_trigger);
1540
b5ea2d56 1541 spin_lock(&dev_priv->irq_lock);
b543fb04 1542 for (i = 1; i < HPD_NUM_PINS; i++) {
821450c6 1543
3ff04a16
DV
1544 if (hpd[i] & hotplug_trigger &&
1545 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1546 /*
1547 * On GMCH platforms the interrupt mask bits only
1548 * prevent irq generation, not the setting of the
1549 * hotplug bits itself. So only WARN about unexpected
1550 * interrupts on saner platforms.
1551 */
1552 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1553 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1554 hotplug_trigger, i, hpd[i]);
1555
1556 continue;
1557 }
b8f102e8 1558
b543fb04
EE
1559 if (!(hpd[i] & hotplug_trigger) ||
1560 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1561 continue;
1562
bc5ead8c 1563 dev_priv->hpd_event_bits |= (1 << i);
b543fb04
EE
1564 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1565 dev_priv->hpd_stats[i].hpd_last_jiffies
1566 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1567 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1568 dev_priv->hpd_stats[i].hpd_cnt = 0;
b8f102e8 1569 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
b543fb04
EE
1570 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1571 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
142e2398 1572 dev_priv->hpd_event_bits &= ~(1 << i);
b543fb04 1573 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
10a504de 1574 storm_detected = true;
b543fb04
EE
1575 } else {
1576 dev_priv->hpd_stats[i].hpd_cnt++;
b8f102e8
EE
1577 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1578 dev_priv->hpd_stats[i].hpd_cnt);
b543fb04
EE
1579 }
1580 }
1581
10a504de
DV
1582 if (storm_detected)
1583 dev_priv->display.hpd_irq_setup(dev);
b5ea2d56 1584 spin_unlock(&dev_priv->irq_lock);
5876fa0d 1585
645416f5
DV
1586 /*
1587 * Our hotplug handler can grab modeset locks (by calling down into the
1588 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1589 * queue for otherwise the flush_work in the pageflip code will
1590 * deadlock.
1591 */
1592 schedule_work(&dev_priv->hotplug_work);
b543fb04
EE
1593}
1594
515ac2bb
DV
1595static void gmbus_irq_handler(struct drm_device *dev)
1596{
2d1013dd 1597 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1598
28c70f16 1599 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1600}
1601
ce99c256
DV
1602static void dp_aux_irq_handler(struct drm_device *dev)
1603{
2d1013dd 1604 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1605
9ee32fea 1606 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1607}
1608
8bf1e9f1 1609#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1610static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1611 uint32_t crc0, uint32_t crc1,
1612 uint32_t crc2, uint32_t crc3,
1613 uint32_t crc4)
8bf1e9f1
SH
1614{
1615 struct drm_i915_private *dev_priv = dev->dev_private;
1616 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1617 struct intel_pipe_crc_entry *entry;
ac2300d4 1618 int head, tail;
b2c88f5b 1619
d538bbdf
DL
1620 spin_lock(&pipe_crc->lock);
1621
0c912c79 1622 if (!pipe_crc->entries) {
d538bbdf 1623 spin_unlock(&pipe_crc->lock);
0c912c79
DL
1624 DRM_ERROR("spurious interrupt\n");
1625 return;
1626 }
1627
d538bbdf
DL
1628 head = pipe_crc->head;
1629 tail = pipe_crc->tail;
b2c88f5b
DL
1630
1631 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1632 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1633 DRM_ERROR("CRC buffer overflowing\n");
1634 return;
1635 }
1636
1637 entry = &pipe_crc->entries[head];
8bf1e9f1 1638
8bc5e955 1639 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1640 entry->crc[0] = crc0;
1641 entry->crc[1] = crc1;
1642 entry->crc[2] = crc2;
1643 entry->crc[3] = crc3;
1644 entry->crc[4] = crc4;
b2c88f5b
DL
1645
1646 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1647 pipe_crc->head = head;
1648
1649 spin_unlock(&pipe_crc->lock);
07144428
DL
1650
1651 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1652}
277de95e
DV
1653#else
1654static inline void
1655display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1656 uint32_t crc0, uint32_t crc1,
1657 uint32_t crc2, uint32_t crc3,
1658 uint32_t crc4) {}
1659#endif
1660
eba94eb9 1661
277de95e 1662static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1663{
1664 struct drm_i915_private *dev_priv = dev->dev_private;
1665
277de95e
DV
1666 display_pipe_crc_irq_handler(dev, pipe,
1667 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1668 0, 0, 0, 0);
5a69b89f
DV
1669}
1670
277de95e 1671static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1672{
1673 struct drm_i915_private *dev_priv = dev->dev_private;
1674
277de95e
DV
1675 display_pipe_crc_irq_handler(dev, pipe,
1676 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1677 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1678 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1679 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1680 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1681}
5b3a856b 1682
277de95e 1683static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1684{
1685 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1686 uint32_t res1, res2;
1687
1688 if (INTEL_INFO(dev)->gen >= 3)
1689 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1690 else
1691 res1 = 0;
1692
1693 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1694 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1695 else
1696 res2 = 0;
5b3a856b 1697
277de95e
DV
1698 display_pipe_crc_irq_handler(dev, pipe,
1699 I915_READ(PIPE_CRC_RES_RED(pipe)),
1700 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1701 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1702 res1, res2);
5b3a856b 1703}
8bf1e9f1 1704
1403c0d4
PZ
1705/* The RPS events need forcewake, so we add them to a work queue and mask their
1706 * IMR bits until the work is done. Other interrupts can be processed without
1707 * the work queue. */
1708static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1709{
a6706b45 1710 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1711 spin_lock(&dev_priv->irq_lock);
a6706b45
D
1712 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1713 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
59cdb63d 1714 spin_unlock(&dev_priv->irq_lock);
2adbee62
DV
1715
1716 queue_work(dev_priv->wq, &dev_priv->rps.work);
baf02a1f 1717 }
baf02a1f 1718
1403c0d4
PZ
1719 if (HAS_VEBOX(dev_priv->dev)) {
1720 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1721 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
12638c57 1722
1403c0d4 1723 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
58174462
MK
1724 i915_handle_error(dev_priv->dev, false,
1725 "VEBOX CS error interrupt 0x%08x",
1726 pm_iir);
1403c0d4 1727 }
12638c57 1728 }
baf02a1f
BW
1729}
1730
8d7849db
VS
1731static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1732{
1733 struct intel_crtc *crtc;
1734
1735 if (!drm_handle_vblank(dev, pipe))
1736 return false;
1737
1738 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1739 wake_up(&crtc->vbl_wait);
1740
1741 return true;
1742}
1743
c1874ed7
ID
1744static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1745{
1746 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1747 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1748 int pipe;
1749
58ead0d7 1750 spin_lock(&dev_priv->irq_lock);
c1874ed7 1751 for_each_pipe(pipe) {
91d181dd 1752 int reg;
bbb5eebf 1753 u32 mask, iir_bit = 0;
91d181dd 1754
bbb5eebf
DV
1755 /*
1756 * PIPESTAT bits get signalled even when the interrupt is
1757 * disabled with the mask bits, and some of the status bits do
1758 * not generate interrupts at all (like the underrun bit). Hence
1759 * we need to be careful that we only handle what we want to
1760 * handle.
1761 */
1762 mask = 0;
1763 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
1764 mask |= PIPE_FIFO_UNDERRUN_STATUS;
1765
1766 switch (pipe) {
1767 case PIPE_A:
1768 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1769 break;
1770 case PIPE_B:
1771 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1772 break;
3278f67f
VS
1773 case PIPE_C:
1774 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1775 break;
bbb5eebf
DV
1776 }
1777 if (iir & iir_bit)
1778 mask |= dev_priv->pipestat_irq_mask[pipe];
1779
1780 if (!mask)
91d181dd
ID
1781 continue;
1782
1783 reg = PIPESTAT(pipe);
bbb5eebf
DV
1784 mask |= PIPESTAT_INT_ENABLE_MASK;
1785 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1786
1787 /*
1788 * Clear the PIPE*STAT regs before the IIR
1789 */
91d181dd
ID
1790 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1791 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1792 I915_WRITE(reg, pipe_stats[pipe]);
1793 }
58ead0d7 1794 spin_unlock(&dev_priv->irq_lock);
c1874ed7
ID
1795
1796 for_each_pipe(pipe) {
1797 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
8d7849db 1798 intel_pipe_handle_vblank(dev, pipe);
c1874ed7 1799
579a9b0e 1800 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1801 intel_prepare_page_flip(dev, pipe);
1802 intel_finish_page_flip(dev, pipe);
1803 }
1804
1805 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1806 i9xx_pipe_crc_irq_handler(dev, pipe);
1807
1808 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1809 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1810 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1811 }
1812
1813 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1814 gmbus_irq_handler(dev);
1815}
1816
16c6c56b
VS
1817static void i9xx_hpd_irq_handler(struct drm_device *dev)
1818{
1819 struct drm_i915_private *dev_priv = dev->dev_private;
1820 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1821
1822 if (IS_G4X(dev)) {
1823 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1824
1825 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
1826 } else {
1827 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1828
1829 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1830 }
1831
1832 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1833 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1834 dp_aux_irq_handler(dev);
1835
1836 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1837 /*
1838 * Make sure hotplug status is cleared before we clear IIR, or else we
1839 * may miss hotplug events.
1840 */
1841 POSTING_READ(PORT_HOTPLUG_STAT);
1842}
1843
ff1f525e 1844static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1845{
45a83f84 1846 struct drm_device *dev = arg;
2d1013dd 1847 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1848 u32 iir, gt_iir, pm_iir;
1849 irqreturn_t ret = IRQ_NONE;
7e231dbe 1850
7e231dbe
JB
1851 while (true) {
1852 iir = I915_READ(VLV_IIR);
1853 gt_iir = I915_READ(GTIIR);
1854 pm_iir = I915_READ(GEN6_PMIIR);
1855
1856 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1857 goto out;
1858
1859 ret = IRQ_HANDLED;
1860
e7b4c6b1 1861 snb_gt_irq_handler(dev, dev_priv, gt_iir);
7e231dbe 1862
c1874ed7 1863 valleyview_pipestat_irq_handler(dev, iir);
31acc7f5 1864
7e231dbe 1865 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
1866 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1867 i9xx_hpd_irq_handler(dev);
7e231dbe 1868
60611c13 1869 if (pm_iir)
d0ecd7e2 1870 gen6_rps_irq_handler(dev_priv, pm_iir);
7e231dbe
JB
1871
1872 I915_WRITE(GTIIR, gt_iir);
1873 I915_WRITE(GEN6_PMIIR, pm_iir);
1874 I915_WRITE(VLV_IIR, iir);
1875 }
1876
1877out:
1878 return ret;
1879}
1880
43f328d7
VS
1881static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1882{
45a83f84 1883 struct drm_device *dev = arg;
43f328d7
VS
1884 struct drm_i915_private *dev_priv = dev->dev_private;
1885 u32 master_ctl, iir;
1886 irqreturn_t ret = IRQ_NONE;
43f328d7 1887
8e5fd599
VS
1888 for (;;) {
1889 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1890 iir = I915_READ(VLV_IIR);
43f328d7 1891
8e5fd599
VS
1892 if (master_ctl == 0 && iir == 0)
1893 break;
43f328d7 1894
8e5fd599 1895 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1896
8e5fd599 1897 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
43f328d7 1898
8e5fd599 1899 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1900
8e5fd599 1901 /* Consume port. Then clear IIR or we'll miss events */
3278f67f 1902 i9xx_hpd_irq_handler(dev);
43f328d7 1903
8e5fd599 1904 I915_WRITE(VLV_IIR, iir);
43f328d7 1905
8e5fd599
VS
1906 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1907 POSTING_READ(GEN8_MASTER_IRQ);
43f328d7 1908
8e5fd599
VS
1909 ret = IRQ_HANDLED;
1910 }
3278f67f 1911
43f328d7
VS
1912 return ret;
1913}
1914
23e81d69 1915static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1916{
2d1013dd 1917 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1918 int pipe;
b543fb04 1919 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
776ad806 1920
91d131d2
DV
1921 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1922
cfc33bf7
VS
1923 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1924 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1925 SDE_AUDIO_POWER_SHIFT);
776ad806 1926 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1927 port_name(port));
1928 }
776ad806 1929
ce99c256
DV
1930 if (pch_iir & SDE_AUX_MASK)
1931 dp_aux_irq_handler(dev);
1932
776ad806 1933 if (pch_iir & SDE_GMBUS)
515ac2bb 1934 gmbus_irq_handler(dev);
776ad806
JB
1935
1936 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1937 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1938
1939 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1940 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1941
1942 if (pch_iir & SDE_POISON)
1943 DRM_ERROR("PCH poison interrupt\n");
1944
9db4a9c7
JB
1945 if (pch_iir & SDE_FDI_MASK)
1946 for_each_pipe(pipe)
1947 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1948 pipe_name(pipe),
1949 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1950
1951 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1952 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1953
1954 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1955 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1956
776ad806 1957 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
8664281b
PZ
1958 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1959 false))
fc2c807b 1960 DRM_ERROR("PCH transcoder A FIFO underrun\n");
8664281b
PZ
1961
1962 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1963 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1964 false))
fc2c807b 1965 DRM_ERROR("PCH transcoder B FIFO underrun\n");
8664281b
PZ
1966}
1967
1968static void ivb_err_int_handler(struct drm_device *dev)
1969{
1970 struct drm_i915_private *dev_priv = dev->dev_private;
1971 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1972 enum pipe pipe;
8664281b 1973
de032bf4
PZ
1974 if (err_int & ERR_INT_POISON)
1975 DRM_ERROR("Poison interrupt\n");
1976
5a69b89f
DV
1977 for_each_pipe(pipe) {
1978 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1979 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1980 false))
fc2c807b
VS
1981 DRM_ERROR("Pipe %c FIFO underrun\n",
1982 pipe_name(pipe));
5a69b89f 1983 }
8bf1e9f1 1984
5a69b89f
DV
1985 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1986 if (IS_IVYBRIDGE(dev))
277de95e 1987 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1988 else
277de95e 1989 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1990 }
1991 }
8bf1e9f1 1992
8664281b
PZ
1993 I915_WRITE(GEN7_ERR_INT, err_int);
1994}
1995
1996static void cpt_serr_int_handler(struct drm_device *dev)
1997{
1998 struct drm_i915_private *dev_priv = dev->dev_private;
1999 u32 serr_int = I915_READ(SERR_INT);
2000
de032bf4
PZ
2001 if (serr_int & SERR_INT_POISON)
2002 DRM_ERROR("PCH poison interrupt\n");
2003
8664281b
PZ
2004 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2005 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2006 false))
fc2c807b 2007 DRM_ERROR("PCH transcoder A FIFO underrun\n");
8664281b
PZ
2008
2009 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2010 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2011 false))
fc2c807b 2012 DRM_ERROR("PCH transcoder B FIFO underrun\n");
8664281b
PZ
2013
2014 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2015 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2016 false))
fc2c807b 2017 DRM_ERROR("PCH transcoder C FIFO underrun\n");
8664281b
PZ
2018
2019 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
2020}
2021
23e81d69
AJ
2022static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2023{
2d1013dd 2024 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 2025 int pipe;
b543fb04 2026 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
23e81d69 2027
91d131d2
DV
2028 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
2029
cfc33bf7
VS
2030 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2031 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2032 SDE_AUDIO_POWER_SHIFT_CPT);
2033 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2034 port_name(port));
2035 }
23e81d69
AJ
2036
2037 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 2038 dp_aux_irq_handler(dev);
23e81d69
AJ
2039
2040 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 2041 gmbus_irq_handler(dev);
23e81d69
AJ
2042
2043 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2044 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2045
2046 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2047 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2048
2049 if (pch_iir & SDE_FDI_MASK_CPT)
2050 for_each_pipe(pipe)
2051 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2052 pipe_name(pipe),
2053 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
2054
2055 if (pch_iir & SDE_ERROR_CPT)
2056 cpt_serr_int_handler(dev);
23e81d69
AJ
2057}
2058
c008bc6e
PZ
2059static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2060{
2061 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 2062 enum pipe pipe;
c008bc6e
PZ
2063
2064 if (de_iir & DE_AUX_CHANNEL_A)
2065 dp_aux_irq_handler(dev);
2066
2067 if (de_iir & DE_GSE)
2068 intel_opregion_asle_intr(dev);
2069
c008bc6e
PZ
2070 if (de_iir & DE_POISON)
2071 DRM_ERROR("Poison interrupt\n");
2072
40da17c2
DV
2073 for_each_pipe(pipe) {
2074 if (de_iir & DE_PIPE_VBLANK(pipe))
8d7849db 2075 intel_pipe_handle_vblank(dev, pipe);
5b3a856b 2076
40da17c2
DV
2077 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2078 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b
VS
2079 DRM_ERROR("Pipe %c FIFO underrun\n",
2080 pipe_name(pipe));
5b3a856b 2081
40da17c2
DV
2082 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2083 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 2084
40da17c2
DV
2085 /* plane/pipes map 1:1 on ilk+ */
2086 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2087 intel_prepare_page_flip(dev, pipe);
2088 intel_finish_page_flip_plane(dev, pipe);
2089 }
c008bc6e
PZ
2090 }
2091
2092 /* check event from PCH */
2093 if (de_iir & DE_PCH_EVENT) {
2094 u32 pch_iir = I915_READ(SDEIIR);
2095
2096 if (HAS_PCH_CPT(dev))
2097 cpt_irq_handler(dev, pch_iir);
2098 else
2099 ibx_irq_handler(dev, pch_iir);
2100
2101 /* should clear PCH hotplug event before clear CPU irq */
2102 I915_WRITE(SDEIIR, pch_iir);
2103 }
2104
2105 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2106 ironlake_rps_change_irq_handler(dev);
2107}
2108
9719fb98
PZ
2109static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2110{
2111 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 2112 enum pipe pipe;
9719fb98
PZ
2113
2114 if (de_iir & DE_ERR_INT_IVB)
2115 ivb_err_int_handler(dev);
2116
2117 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2118 dp_aux_irq_handler(dev);
2119
2120 if (de_iir & DE_GSE_IVB)
2121 intel_opregion_asle_intr(dev);
2122
07d27e20
DL
2123 for_each_pipe(pipe) {
2124 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
8d7849db 2125 intel_pipe_handle_vblank(dev, pipe);
40da17c2
DV
2126
2127 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
2128 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2129 intel_prepare_page_flip(dev, pipe);
2130 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
2131 }
2132 }
2133
2134 /* check event from PCH */
2135 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2136 u32 pch_iir = I915_READ(SDEIIR);
2137
2138 cpt_irq_handler(dev, pch_iir);
2139
2140 /* clear PCH hotplug event before clear CPU irq */
2141 I915_WRITE(SDEIIR, pch_iir);
2142 }
2143}
2144
f1af8fc1 2145static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2146{
45a83f84 2147 struct drm_device *dev = arg;
2d1013dd 2148 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 2149 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2150 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2151
8664281b
PZ
2152 /* We get interrupts on unclaimed registers, so check for this before we
2153 * do any I915_{READ,WRITE}. */
907b28c5 2154 intel_uncore_check_errors(dev);
8664281b 2155
b1f14ad0
JB
2156 /* disable master interrupt before clearing iir */
2157 de_ier = I915_READ(DEIER);
2158 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2159 POSTING_READ(DEIER);
b1f14ad0 2160
44498aea
PZ
2161 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2162 * interrupts will will be stored on its back queue, and then we'll be
2163 * able to process them after we restore SDEIER (as soon as we restore
2164 * it, we'll get an interrupt if SDEIIR still has something to process
2165 * due to its back queue). */
ab5c608b
BW
2166 if (!HAS_PCH_NOP(dev)) {
2167 sde_ier = I915_READ(SDEIER);
2168 I915_WRITE(SDEIER, 0);
2169 POSTING_READ(SDEIER);
2170 }
44498aea 2171
b1f14ad0 2172 gt_iir = I915_READ(GTIIR);
0e43406b 2173 if (gt_iir) {
d8fc8a47 2174 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 2175 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
2176 else
2177 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
0e43406b
CW
2178 I915_WRITE(GTIIR, gt_iir);
2179 ret = IRQ_HANDLED;
b1f14ad0
JB
2180 }
2181
0e43406b
CW
2182 de_iir = I915_READ(DEIIR);
2183 if (de_iir) {
f1af8fc1
PZ
2184 if (INTEL_INFO(dev)->gen >= 7)
2185 ivb_display_irq_handler(dev, de_iir);
2186 else
2187 ilk_display_irq_handler(dev, de_iir);
0e43406b
CW
2188 I915_WRITE(DEIIR, de_iir);
2189 ret = IRQ_HANDLED;
b1f14ad0
JB
2190 }
2191
f1af8fc1
PZ
2192 if (INTEL_INFO(dev)->gen >= 6) {
2193 u32 pm_iir = I915_READ(GEN6_PMIIR);
2194 if (pm_iir) {
1403c0d4 2195 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1
PZ
2196 I915_WRITE(GEN6_PMIIR, pm_iir);
2197 ret = IRQ_HANDLED;
2198 }
0e43406b 2199 }
b1f14ad0 2200
b1f14ad0
JB
2201 I915_WRITE(DEIER, de_ier);
2202 POSTING_READ(DEIER);
ab5c608b
BW
2203 if (!HAS_PCH_NOP(dev)) {
2204 I915_WRITE(SDEIER, sde_ier);
2205 POSTING_READ(SDEIER);
2206 }
b1f14ad0
JB
2207
2208 return ret;
2209}
2210
abd58f01
BW
2211static irqreturn_t gen8_irq_handler(int irq, void *arg)
2212{
2213 struct drm_device *dev = arg;
2214 struct drm_i915_private *dev_priv = dev->dev_private;
2215 u32 master_ctl;
2216 irqreturn_t ret = IRQ_NONE;
2217 uint32_t tmp = 0;
c42664cc 2218 enum pipe pipe;
abd58f01 2219
abd58f01
BW
2220 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2221 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2222 if (!master_ctl)
2223 return IRQ_NONE;
2224
2225 I915_WRITE(GEN8_MASTER_IRQ, 0);
2226 POSTING_READ(GEN8_MASTER_IRQ);
2227
2228 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2229
2230 if (master_ctl & GEN8_DE_MISC_IRQ) {
2231 tmp = I915_READ(GEN8_DE_MISC_IIR);
2232 if (tmp & GEN8_DE_MISC_GSE)
2233 intel_opregion_asle_intr(dev);
2234 else if (tmp)
2235 DRM_ERROR("Unexpected DE Misc interrupt\n");
2236 else
2237 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2238
2239 if (tmp) {
2240 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2241 ret = IRQ_HANDLED;
2242 }
2243 }
2244
6d766f02
DV
2245 if (master_ctl & GEN8_DE_PORT_IRQ) {
2246 tmp = I915_READ(GEN8_DE_PORT_IIR);
2247 if (tmp & GEN8_AUX_CHANNEL_A)
2248 dp_aux_irq_handler(dev);
2249 else if (tmp)
2250 DRM_ERROR("Unexpected DE Port interrupt\n");
2251 else
2252 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2253
2254 if (tmp) {
2255 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2256 ret = IRQ_HANDLED;
2257 }
2258 }
2259
c42664cc
DV
2260 for_each_pipe(pipe) {
2261 uint32_t pipe_iir;
abd58f01 2262
c42664cc
DV
2263 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2264 continue;
abd58f01 2265
c42664cc
DV
2266 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2267 if (pipe_iir & GEN8_PIPE_VBLANK)
8d7849db 2268 intel_pipe_handle_vblank(dev, pipe);
abd58f01 2269
d0e1f1cb 2270 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
c42664cc
DV
2271 intel_prepare_page_flip(dev, pipe);
2272 intel_finish_page_flip_plane(dev, pipe);
abd58f01 2273 }
c42664cc 2274
0fbe7870
DV
2275 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2276 hsw_pipe_crc_irq_handler(dev, pipe);
2277
38d83c96
DV
2278 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2279 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2280 false))
fc2c807b
VS
2281 DRM_ERROR("Pipe %c FIFO underrun\n",
2282 pipe_name(pipe));
38d83c96
DV
2283 }
2284
30100f2b
DV
2285 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2286 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2287 pipe_name(pipe),
2288 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2289 }
c42664cc
DV
2290
2291 if (pipe_iir) {
2292 ret = IRQ_HANDLED;
2293 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2294 } else
abd58f01
BW
2295 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2296 }
2297
92d03a80
DV
2298 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2299 /*
2300 * FIXME(BDW): Assume for now that the new interrupt handling
2301 * scheme also closed the SDE interrupt handling race we've seen
2302 * on older pch-split platforms. But this needs testing.
2303 */
2304 u32 pch_iir = I915_READ(SDEIIR);
2305
2306 cpt_irq_handler(dev, pch_iir);
2307
2308 if (pch_iir) {
2309 I915_WRITE(SDEIIR, pch_iir);
2310 ret = IRQ_HANDLED;
2311 }
2312 }
2313
abd58f01
BW
2314 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2315 POSTING_READ(GEN8_MASTER_IRQ);
2316
2317 return ret;
2318}
2319
17e1df07
DV
2320static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2321 bool reset_completed)
2322{
2323 struct intel_ring_buffer *ring;
2324 int i;
2325
2326 /*
2327 * Notify all waiters for GPU completion events that reset state has
2328 * been changed, and that they need to restart their wait after
2329 * checking for potential errors (and bail out to drop locks if there is
2330 * a gpu reset pending so that i915_error_work_func can acquire them).
2331 */
2332
2333 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2334 for_each_ring(ring, dev_priv, i)
2335 wake_up_all(&ring->irq_queue);
2336
2337 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2338 wake_up_all(&dev_priv->pending_flip_queue);
2339
2340 /*
2341 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2342 * reset state is cleared.
2343 */
2344 if (reset_completed)
2345 wake_up_all(&dev_priv->gpu_error.reset_queue);
2346}
2347
8a905236
JB
2348/**
2349 * i915_error_work_func - do process context error handling work
2350 * @work: work struct
2351 *
2352 * Fire an error uevent so userspace can see that a hang or error
2353 * was detected.
2354 */
2355static void i915_error_work_func(struct work_struct *work)
2356{
1f83fee0
DV
2357 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2358 work);
2d1013dd
JN
2359 struct drm_i915_private *dev_priv =
2360 container_of(error, struct drm_i915_private, gpu_error);
8a905236 2361 struct drm_device *dev = dev_priv->dev;
cce723ed
BW
2362 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2363 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2364 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2365 int ret;
8a905236 2366
5bdebb18 2367 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2368
7db0ba24
DV
2369 /*
2370 * Note that there's only one work item which does gpu resets, so we
2371 * need not worry about concurrent gpu resets potentially incrementing
2372 * error->reset_counter twice. We only need to take care of another
2373 * racing irq/hangcheck declaring the gpu dead for a second time. A
2374 * quick check for that is good enough: schedule_work ensures the
2375 * correct ordering between hang detection and this work item, and since
2376 * the reset in-progress bit is only ever set by code outside of this
2377 * work we don't need to worry about any other races.
2378 */
2379 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2380 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2381 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2382 reset_event);
1f83fee0 2383
f454c694
ID
2384 /*
2385 * In most cases it's guaranteed that we get here with an RPM
2386 * reference held, for example because there is a pending GPU
2387 * request that won't finish until the reset is done. This
2388 * isn't the case at least when we get here by doing a
2389 * simulated reset via debugs, so get an RPM reference.
2390 */
2391 intel_runtime_pm_get(dev_priv);
17e1df07
DV
2392 /*
2393 * All state reset _must_ be completed before we update the
2394 * reset counter, for otherwise waiters might miss the reset
2395 * pending state and not properly drop locks, resulting in
2396 * deadlocks with the reset work.
2397 */
f69061be
DV
2398 ret = i915_reset(dev);
2399
17e1df07
DV
2400 intel_display_handle_reset(dev);
2401
f454c694
ID
2402 intel_runtime_pm_put(dev_priv);
2403
f69061be
DV
2404 if (ret == 0) {
2405 /*
2406 * After all the gem state is reset, increment the reset
2407 * counter and wake up everyone waiting for the reset to
2408 * complete.
2409 *
2410 * Since unlock operations are a one-sided barrier only,
2411 * we need to insert a barrier here to order any seqno
2412 * updates before
2413 * the counter increment.
2414 */
2415 smp_mb__before_atomic_inc();
2416 atomic_inc(&dev_priv->gpu_error.reset_counter);
2417
5bdebb18 2418 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2419 KOBJ_CHANGE, reset_done_event);
1f83fee0 2420 } else {
2ac0f450 2421 atomic_set_mask(I915_WEDGED, &error->reset_counter);
f316a42c 2422 }
1f83fee0 2423
17e1df07
DV
2424 /*
2425 * Note: The wake_up also serves as a memory barrier so that
2426 * waiters see the update value of the reset counter atomic_t.
2427 */
2428 i915_error_wake_up(dev_priv, true);
f316a42c 2429 }
8a905236
JB
2430}
2431
35aed2e6 2432static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2433{
2434 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2435 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2436 u32 eir = I915_READ(EIR);
050ee91f 2437 int pipe, i;
8a905236 2438
35aed2e6
CW
2439 if (!eir)
2440 return;
8a905236 2441
a70491cc 2442 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2443
bd9854f9
BW
2444 i915_get_extra_instdone(dev, instdone);
2445
8a905236
JB
2446 if (IS_G4X(dev)) {
2447 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2448 u32 ipeir = I915_READ(IPEIR_I965);
2449
a70491cc
JP
2450 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2451 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2452 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2453 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2454 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2455 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2456 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2457 POSTING_READ(IPEIR_I965);
8a905236
JB
2458 }
2459 if (eir & GM45_ERROR_PAGE_TABLE) {
2460 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2461 pr_err("page table error\n");
2462 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2463 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2464 POSTING_READ(PGTBL_ER);
8a905236
JB
2465 }
2466 }
2467
a6c45cf0 2468 if (!IS_GEN2(dev)) {
8a905236
JB
2469 if (eir & I915_ERROR_PAGE_TABLE) {
2470 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2471 pr_err("page table error\n");
2472 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2473 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2474 POSTING_READ(PGTBL_ER);
8a905236
JB
2475 }
2476 }
2477
2478 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2479 pr_err("memory refresh error:\n");
9db4a9c7 2480 for_each_pipe(pipe)
a70491cc 2481 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2482 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2483 /* pipestat has already been acked */
2484 }
2485 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2486 pr_err("instruction error\n");
2487 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2488 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2489 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2490 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2491 u32 ipeir = I915_READ(IPEIR);
2492
a70491cc
JP
2493 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2494 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2495 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2496 I915_WRITE(IPEIR, ipeir);
3143a2bf 2497 POSTING_READ(IPEIR);
8a905236
JB
2498 } else {
2499 u32 ipeir = I915_READ(IPEIR_I965);
2500
a70491cc
JP
2501 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2502 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2503 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2504 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2505 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2506 POSTING_READ(IPEIR_I965);
8a905236
JB
2507 }
2508 }
2509
2510 I915_WRITE(EIR, eir);
3143a2bf 2511 POSTING_READ(EIR);
8a905236
JB
2512 eir = I915_READ(EIR);
2513 if (eir) {
2514 /*
2515 * some errors might have become stuck,
2516 * mask them.
2517 */
2518 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2519 I915_WRITE(EMR, I915_READ(EMR) | eir);
2520 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2521 }
35aed2e6
CW
2522}
2523
2524/**
2525 * i915_handle_error - handle an error interrupt
2526 * @dev: drm device
2527 *
2528 * Do some basic checking of regsiter state at error interrupt time and
2529 * dump it to the syslog. Also call i915_capture_error_state() to make
2530 * sure we get a record and make it available in debugfs. Fire a uevent
2531 * so userspace knows something bad happened (should trigger collection
2532 * of a ring dump etc.).
2533 */
58174462
MK
2534void i915_handle_error(struct drm_device *dev, bool wedged,
2535 const char *fmt, ...)
35aed2e6
CW
2536{
2537 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2538 va_list args;
2539 char error_msg[80];
35aed2e6 2540
58174462
MK
2541 va_start(args, fmt);
2542 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2543 va_end(args);
2544
2545 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2546 i915_report_and_clear_eir(dev);
8a905236 2547
ba1234d1 2548 if (wedged) {
f69061be
DV
2549 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2550 &dev_priv->gpu_error.reset_counter);
ba1234d1 2551
11ed50ec 2552 /*
17e1df07
DV
2553 * Wakeup waiting processes so that the reset work function
2554 * i915_error_work_func doesn't deadlock trying to grab various
2555 * locks. By bumping the reset counter first, the woken
2556 * processes will see a reset in progress and back off,
2557 * releasing their locks and then wait for the reset completion.
2558 * We must do this for _all_ gpu waiters that might hold locks
2559 * that the reset work needs to acquire.
2560 *
2561 * Note: The wake_up serves as the required memory barrier to
2562 * ensure that the waiters see the updated value of the reset
2563 * counter atomic_t.
11ed50ec 2564 */
17e1df07 2565 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2566 }
2567
122f46ba
DV
2568 /*
2569 * Our reset work can grab modeset locks (since it needs to reset the
2570 * state of outstanding pagelips). Hence it must not be run on our own
2571 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2572 * code will deadlock.
2573 */
2574 schedule_work(&dev_priv->gpu_error.work);
8a905236
JB
2575}
2576
21ad8330 2577static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
4e5359cd 2578{
2d1013dd 2579 struct drm_i915_private *dev_priv = dev->dev_private;
4e5359cd
SF
2580 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 2582 struct drm_i915_gem_object *obj;
4e5359cd
SF
2583 struct intel_unpin_work *work;
2584 unsigned long flags;
2585 bool stall_detected;
2586
2587 /* Ignore early vblank irqs */
2588 if (intel_crtc == NULL)
2589 return;
2590
2591 spin_lock_irqsave(&dev->event_lock, flags);
2592 work = intel_crtc->unpin_work;
2593
e7d841ca
CW
2594 if (work == NULL ||
2595 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2596 !work->enable_stall_check) {
4e5359cd
SF
2597 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2598 spin_unlock_irqrestore(&dev->event_lock, flags);
2599 return;
2600 }
2601
2602 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
05394f39 2603 obj = work->pending_flip_obj;
a6c45cf0 2604 if (INTEL_INFO(dev)->gen >= 4) {
9db4a9c7 2605 int dspsurf = DSPSURF(intel_crtc->plane);
446f2545 2606 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
f343c5f6 2607 i915_gem_obj_ggtt_offset(obj);
4e5359cd 2608 } else {
9db4a9c7 2609 int dspaddr = DSPADDR(intel_crtc->plane);
f343c5f6 2610 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
f4510a27
MR
2611 crtc->y * crtc->primary->fb->pitches[0] +
2612 crtc->x * crtc->primary->fb->bits_per_pixel/8);
4e5359cd
SF
2613 }
2614
2615 spin_unlock_irqrestore(&dev->event_lock, flags);
2616
2617 if (stall_detected) {
2618 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2619 intel_prepare_page_flip(dev, intel_crtc->plane);
2620 }
2621}
2622
42f52ef8
KP
2623/* Called from drm generic code, passed 'crtc' which
2624 * we use as a pipe index
2625 */
f71d4af4 2626static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2627{
2d1013dd 2628 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2629 unsigned long irqflags;
71e0ffa5 2630
5eddb70b 2631 if (!i915_pipe_enabled(dev, pipe))
71e0ffa5 2632 return -EINVAL;
0a3e67a4 2633
1ec14ad3 2634 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2635 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2636 i915_enable_pipestat(dev_priv, pipe,
755e9019 2637 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2638 else
7c463586 2639 i915_enable_pipestat(dev_priv, pipe,
755e9019 2640 PIPE_VBLANK_INTERRUPT_STATUS);
8692d00e
CW
2641
2642 /* maintain vblank delivery even in deep C-states */
3d13ef2e 2643 if (INTEL_INFO(dev)->gen == 3)
6b26c86d 2644 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1ec14ad3 2645 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2646
0a3e67a4
JB
2647 return 0;
2648}
2649
f71d4af4 2650static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2651{
2d1013dd 2652 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2653 unsigned long irqflags;
b518421f 2654 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2655 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2656
2657 if (!i915_pipe_enabled(dev, pipe))
2658 return -EINVAL;
2659
2660 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2661 ironlake_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2662 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2663
2664 return 0;
2665}
2666
7e231dbe
JB
2667static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2668{
2d1013dd 2669 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2670 unsigned long irqflags;
7e231dbe
JB
2671
2672 if (!i915_pipe_enabled(dev, pipe))
2673 return -EINVAL;
2674
2675 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2676 i915_enable_pipestat(dev_priv, pipe,
755e9019 2677 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2678 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2679
2680 return 0;
2681}
2682
abd58f01
BW
2683static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2684{
2685 struct drm_i915_private *dev_priv = dev->dev_private;
2686 unsigned long irqflags;
abd58f01
BW
2687
2688 if (!i915_pipe_enabled(dev, pipe))
2689 return -EINVAL;
2690
2691 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2692 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2693 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2694 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2695 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2696 return 0;
2697}
2698
42f52ef8
KP
2699/* Called from drm generic code, passed 'crtc' which
2700 * we use as a pipe index
2701 */
f71d4af4 2702static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2703{
2d1013dd 2704 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2705 unsigned long irqflags;
0a3e67a4 2706
1ec14ad3 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3d13ef2e 2708 if (INTEL_INFO(dev)->gen == 3)
6b26c86d 2709 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
8692d00e 2710
f796cf8f 2711 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2712 PIPE_VBLANK_INTERRUPT_STATUS |
2713 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2715}
2716
f71d4af4 2717static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2718{
2d1013dd 2719 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2720 unsigned long irqflags;
b518421f 2721 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2722 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2723
2724 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2725 ironlake_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2726 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727}
2728
7e231dbe
JB
2729static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2730{
2d1013dd 2731 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2732 unsigned long irqflags;
7e231dbe
JB
2733
2734 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2735 i915_disable_pipestat(dev_priv, pipe,
755e9019 2736 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2737 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2738}
2739
abd58f01
BW
2740static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2741{
2742 struct drm_i915_private *dev_priv = dev->dev_private;
2743 unsigned long irqflags;
abd58f01
BW
2744
2745 if (!i915_pipe_enabled(dev, pipe))
2746 return;
2747
2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2749 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2750 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2751 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2752 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2753}
2754
893eead0
CW
2755static u32
2756ring_last_seqno(struct intel_ring_buffer *ring)
852835f3 2757{
893eead0
CW
2758 return list_entry(ring->request_list.prev,
2759 struct drm_i915_gem_request, list)->seqno;
2760}
2761
9107e9d2
CW
2762static bool
2763ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2764{
2765 return (list_empty(&ring->request_list) ||
2766 i915_seqno_passed(seqno, ring_last_seqno(ring)));
f65d9421
BG
2767}
2768
a028c4b0
DV
2769static bool
2770ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2771{
2772 if (INTEL_INFO(dev)->gen >= 8) {
2773 /*
2774 * FIXME: gen8 semaphore support - currently we don't emit
2775 * semaphores on bdw anyway, but this needs to be addressed when
2776 * we merge that code.
2777 */
2778 return false;
2779 } else {
2780 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2781 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2782 MI_SEMAPHORE_REGISTER);
2783 }
2784}
2785
921d42ea
DV
2786static struct intel_ring_buffer *
2787semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
2788{
2789 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2790 struct intel_ring_buffer *signaller;
2791 int i;
2792
2793 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2794 /*
2795 * FIXME: gen8 semaphore support - currently we don't emit
2796 * semaphores on bdw anyway, but this needs to be addressed when
2797 * we merge that code.
2798 */
2799 return NULL;
2800 } else {
2801 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2802
2803 for_each_ring(signaller, dev_priv, i) {
2804 if(ring == signaller)
2805 continue;
2806
ebc348b2 2807 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2808 return signaller;
2809 }
2810 }
2811
2812 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
2813 ring->id, ipehr);
2814
2815 return NULL;
2816}
2817
6274f212
CW
2818static struct intel_ring_buffer *
2819semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
a24a11e6
CW
2820{
2821 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d
DV
2822 u32 cmd, ipehr, head;
2823 int i;
a24a11e6
CW
2824
2825 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2826 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2827 return NULL;
a24a11e6 2828
88fe429d
DV
2829 /*
2830 * HEAD is likely pointing to the dword after the actual command,
2831 * so scan backwards until we find the MBOX. But limit it to just 3
2832 * dwords. Note that we don't care about ACTHD here since that might
2833 * point at at batch, and semaphores are always emitted into the
2834 * ringbuffer itself.
a24a11e6 2835 */
88fe429d
DV
2836 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2837
2838 for (i = 4; i; --i) {
2839 /*
2840 * Be paranoid and presume the hw has gone off into the wild -
2841 * our ring is smaller than what the hardware (and hence
2842 * HEAD_ADDR) allows. Also handles wrap-around.
2843 */
2844 head &= ring->size - 1;
2845
2846 /* This here seems to blow up */
2847 cmd = ioread32(ring->virtual_start + head);
a24a11e6
CW
2848 if (cmd == ipehr)
2849 break;
2850
88fe429d
DV
2851 head -= 4;
2852 }
a24a11e6 2853
88fe429d
DV
2854 if (!i)
2855 return NULL;
a24a11e6 2856
88fe429d 2857 *seqno = ioread32(ring->virtual_start + head + 4) + 1;
921d42ea 2858 return semaphore_wait_to_signaller_ring(ring, ipehr);
a24a11e6
CW
2859}
2860
6274f212
CW
2861static int semaphore_passed(struct intel_ring_buffer *ring)
2862{
2863 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2864 struct intel_ring_buffer *signaller;
2865 u32 seqno, ctl;
2866
2867 ring->hangcheck.deadlock = true;
2868
2869 signaller = semaphore_waits_for(ring, &seqno);
2870 if (signaller == NULL || signaller->hangcheck.deadlock)
2871 return -1;
2872
2873 /* cursory check for an unkickable deadlock */
2874 ctl = I915_READ_CTL(signaller);
2875 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2876 return -1;
2877
2878 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2879}
2880
2881static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2882{
2883 struct intel_ring_buffer *ring;
2884 int i;
2885
2886 for_each_ring(ring, dev_priv, i)
2887 ring->hangcheck.deadlock = false;
2888}
2889
ad8beaea 2890static enum intel_ring_hangcheck_action
50877445 2891ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
1ec14ad3
CW
2892{
2893 struct drm_device *dev = ring->dev;
2894 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2895 u32 tmp;
2896
6274f212 2897 if (ring->hangcheck.acthd != acthd)
f2f4d82f 2898 return HANGCHECK_ACTIVE;
6274f212 2899
9107e9d2 2900 if (IS_GEN2(dev))
f2f4d82f 2901 return HANGCHECK_HUNG;
9107e9d2
CW
2902
2903 /* Is the chip hanging on a WAIT_FOR_EVENT?
2904 * If so we can simply poke the RB_WAIT bit
2905 * and break the hang. This should work on
2906 * all but the second generation chipsets.
2907 */
2908 tmp = I915_READ_CTL(ring);
1ec14ad3 2909 if (tmp & RING_WAIT) {
58174462
MK
2910 i915_handle_error(dev, false,
2911 "Kicking stuck wait on %s",
2912 ring->name);
1ec14ad3 2913 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2914 return HANGCHECK_KICK;
6274f212
CW
2915 }
2916
2917 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2918 switch (semaphore_passed(ring)) {
2919 default:
f2f4d82f 2920 return HANGCHECK_HUNG;
6274f212 2921 case 1:
58174462
MK
2922 i915_handle_error(dev, false,
2923 "Kicking stuck semaphore on %s",
2924 ring->name);
6274f212 2925 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2926 return HANGCHECK_KICK;
6274f212 2927 case 0:
f2f4d82f 2928 return HANGCHECK_WAIT;
6274f212 2929 }
9107e9d2 2930 }
ed5cbb03 2931
f2f4d82f 2932 return HANGCHECK_HUNG;
ed5cbb03
MK
2933}
2934
f65d9421
BG
2935/**
2936 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2937 * batchbuffers in a long time. We keep track per ring seqno progress and
2938 * if there are no progress, hangcheck score for that ring is increased.
2939 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2940 * we kick the ring. If we see no progress on three subsequent calls
2941 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 2942 */
a658b5d2 2943static void i915_hangcheck_elapsed(unsigned long data)
f65d9421
BG
2944{
2945 struct drm_device *dev = (struct drm_device *)data;
2d1013dd 2946 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 2947 struct intel_ring_buffer *ring;
b4519513 2948 int i;
05407ff8 2949 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2950 bool stuck[I915_NUM_RINGS] = { 0 };
2951#define BUSY 1
2952#define KICK 5
2953#define HUNG 20
893eead0 2954
d330a953 2955 if (!i915.enable_hangcheck)
3e0dc6b0
BW
2956 return;
2957
b4519513 2958 for_each_ring(ring, dev_priv, i) {
50877445
CW
2959 u64 acthd;
2960 u32 seqno;
9107e9d2 2961 bool busy = true;
05407ff8 2962
6274f212
CW
2963 semaphore_clear_deadlocks(dev_priv);
2964
05407ff8
MK
2965 seqno = ring->get_seqno(ring, false);
2966 acthd = intel_ring_get_active_head(ring);
b4519513 2967
9107e9d2
CW
2968 if (ring->hangcheck.seqno == seqno) {
2969 if (ring_idle(ring, seqno)) {
da661464
MK
2970 ring->hangcheck.action = HANGCHECK_IDLE;
2971
9107e9d2
CW
2972 if (waitqueue_active(&ring->irq_queue)) {
2973 /* Issue a wake-up to catch stuck h/w. */
094f9a54 2974 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
2975 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2976 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2977 ring->name);
2978 else
2979 DRM_INFO("Fake missed irq on %s\n",
2980 ring->name);
094f9a54
CW
2981 wake_up_all(&ring->irq_queue);
2982 }
2983 /* Safeguard against driver failure */
2984 ring->hangcheck.score += BUSY;
9107e9d2
CW
2985 } else
2986 busy = false;
05407ff8 2987 } else {
6274f212
CW
2988 /* We always increment the hangcheck score
2989 * if the ring is busy and still processing
2990 * the same request, so that no single request
2991 * can run indefinitely (such as a chain of
2992 * batches). The only time we do not increment
2993 * the hangcheck score on this ring, if this
2994 * ring is in a legitimate wait for another
2995 * ring. In that case the waiting ring is a
2996 * victim and we want to be sure we catch the
2997 * right culprit. Then every time we do kick
2998 * the ring, add a small increment to the
2999 * score so that we can catch a batch that is
3000 * being repeatedly kicked and so responsible
3001 * for stalling the machine.
3002 */
ad8beaea
MK
3003 ring->hangcheck.action = ring_stuck(ring,
3004 acthd);
3005
3006 switch (ring->hangcheck.action) {
da661464 3007 case HANGCHECK_IDLE:
f2f4d82f 3008 case HANGCHECK_WAIT:
6274f212 3009 break;
f2f4d82f 3010 case HANGCHECK_ACTIVE:
ea04cb31 3011 ring->hangcheck.score += BUSY;
6274f212 3012 break;
f2f4d82f 3013 case HANGCHECK_KICK:
ea04cb31 3014 ring->hangcheck.score += KICK;
6274f212 3015 break;
f2f4d82f 3016 case HANGCHECK_HUNG:
ea04cb31 3017 ring->hangcheck.score += HUNG;
6274f212
CW
3018 stuck[i] = true;
3019 break;
3020 }
05407ff8 3021 }
9107e9d2 3022 } else {
da661464
MK
3023 ring->hangcheck.action = HANGCHECK_ACTIVE;
3024
9107e9d2
CW
3025 /* Gradually reduce the count so that we catch DoS
3026 * attempts across multiple batches.
3027 */
3028 if (ring->hangcheck.score > 0)
3029 ring->hangcheck.score--;
d1e61e7f
CW
3030 }
3031
05407ff8
MK
3032 ring->hangcheck.seqno = seqno;
3033 ring->hangcheck.acthd = acthd;
9107e9d2 3034 busy_count += busy;
893eead0 3035 }
b9201c14 3036
92cab734 3037 for_each_ring(ring, dev_priv, i) {
b6b0fac0 3038 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
3039 DRM_INFO("%s on %s\n",
3040 stuck[i] ? "stuck" : "no progress",
3041 ring->name);
a43adf07 3042 rings_hung++;
92cab734
MK
3043 }
3044 }
3045
05407ff8 3046 if (rings_hung)
58174462 3047 return i915_handle_error(dev, true, "Ring hung");
f65d9421 3048
05407ff8
MK
3049 if (busy_count)
3050 /* Reset timer case chip hangs without another request
3051 * being added */
10cd45b6
MK
3052 i915_queue_hangcheck(dev);
3053}
3054
3055void i915_queue_hangcheck(struct drm_device *dev)
3056{
3057 struct drm_i915_private *dev_priv = dev->dev_private;
d330a953 3058 if (!i915.enable_hangcheck)
10cd45b6
MK
3059 return;
3060
3061 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3062 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
3063}
3064
1c69eb42 3065static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
3066{
3067 struct drm_i915_private *dev_priv = dev->dev_private;
3068
3069 if (HAS_PCH_NOP(dev))
3070 return;
3071
f86f3fb0 3072 GEN5_IRQ_RESET(SDE);
105b122e
PZ
3073
3074 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3075 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 3076}
105b122e 3077
622364b6
PZ
3078/*
3079 * SDEIER is also touched by the interrupt handler to work around missed PCH
3080 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3081 * instead we unconditionally enable all PCH interrupt sources here, but then
3082 * only unmask them as needed with SDEIMR.
3083 *
3084 * This function needs to be called before interrupts are enabled.
3085 */
3086static void ibx_irq_pre_postinstall(struct drm_device *dev)
3087{
3088 struct drm_i915_private *dev_priv = dev->dev_private;
3089
3090 if (HAS_PCH_NOP(dev))
3091 return;
3092
3093 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
3094 I915_WRITE(SDEIER, 0xffffffff);
3095 POSTING_READ(SDEIER);
3096}
3097
7c4d664e 3098static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
3099{
3100 struct drm_i915_private *dev_priv = dev->dev_private;
3101
f86f3fb0 3102 GEN5_IRQ_RESET(GT);
a9d356a6 3103 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 3104 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
3105}
3106
1da177e4
LT
3107/* drm_dma.h hooks
3108*/
be30b29f 3109static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 3110{
2d1013dd 3111 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 3112
0c841212 3113 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 3114
f86f3fb0 3115 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
3116 if (IS_GEN7(dev))
3117 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 3118
7c4d664e 3119 gen5_gt_irq_reset(dev);
c650156a 3120
1c69eb42 3121 ibx_irq_reset(dev);
7d99163d 3122}
c650156a 3123
be30b29f
PZ
3124static void ironlake_irq_preinstall(struct drm_device *dev)
3125{
be30b29f 3126 ironlake_irq_reset(dev);
7d99163d
BW
3127}
3128
7e231dbe
JB
3129static void valleyview_irq_preinstall(struct drm_device *dev)
3130{
2d1013dd 3131 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3132 int pipe;
3133
7e231dbe
JB
3134 /* VLV magic */
3135 I915_WRITE(VLV_IMR, 0);
3136 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3137 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3138 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3139
7e231dbe
JB
3140 /* and GT */
3141 I915_WRITE(GTIIR, I915_READ(GTIIR));
3142 I915_WRITE(GTIIR, I915_READ(GTIIR));
d18ea1b5 3143
7c4d664e 3144 gen5_gt_irq_reset(dev);
7e231dbe
JB
3145
3146 I915_WRITE(DPINVGTT, 0xff);
3147
3148 I915_WRITE(PORT_HOTPLUG_EN, 0);
3149 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3150 for_each_pipe(pipe)
3151 I915_WRITE(PIPESTAT(pipe), 0xffff);
3152 I915_WRITE(VLV_IIR, 0xffffffff);
3153 I915_WRITE(VLV_IMR, 0xffffffff);
3154 I915_WRITE(VLV_IER, 0x0);
3155 POSTING_READ(VLV_IER);
3156}
3157
823f6b38 3158static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
3159{
3160 struct drm_i915_private *dev_priv = dev->dev_private;
3161 int pipe;
3162
abd58f01
BW
3163 I915_WRITE(GEN8_MASTER_IRQ, 0);
3164 POSTING_READ(GEN8_MASTER_IRQ);
3165
f86f3fb0
PZ
3166 GEN8_IRQ_RESET_NDX(GT, 0);
3167 GEN8_IRQ_RESET_NDX(GT, 1);
3168 GEN8_IRQ_RESET_NDX(GT, 2);
3169 GEN8_IRQ_RESET_NDX(GT, 3);
abd58f01 3170
823f6b38 3171 for_each_pipe(pipe)
f86f3fb0 3172 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3173
f86f3fb0
PZ
3174 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3175 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3176 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3177
1c69eb42 3178 ibx_irq_reset(dev);
abd58f01 3179}
09f2344d 3180
823f6b38
PZ
3181static void gen8_irq_preinstall(struct drm_device *dev)
3182{
3183 gen8_irq_reset(dev);
abd58f01
BW
3184}
3185
43f328d7
VS
3186static void cherryview_irq_preinstall(struct drm_device *dev)
3187{
3188 struct drm_i915_private *dev_priv = dev->dev_private;
3189 int pipe;
3190
3191 I915_WRITE(GEN8_MASTER_IRQ, 0);
3192 POSTING_READ(GEN8_MASTER_IRQ);
3193
3194 GEN8_IRQ_RESET_NDX(GT, 0);
3195 GEN8_IRQ_RESET_NDX(GT, 1);
3196 GEN8_IRQ_RESET_NDX(GT, 2);
3197 GEN8_IRQ_RESET_NDX(GT, 3);
3198
3199 GEN5_IRQ_RESET(GEN8_PCU_);
3200
3201 POSTING_READ(GEN8_PCU_IIR);
3202
3203 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3204
3205 I915_WRITE(PORT_HOTPLUG_EN, 0);
3206 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3207
3208 for_each_pipe(pipe)
3209 I915_WRITE(PIPESTAT(pipe), 0xffff);
3210
3211 I915_WRITE(VLV_IMR, 0xffffffff);
3212 I915_WRITE(VLV_IER, 0x0);
3213 I915_WRITE(VLV_IIR, 0xffffffff);
3214 POSTING_READ(VLV_IIR);
3215}
3216
82a28bcf 3217static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3218{
2d1013dd 3219 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf
DV
3220 struct drm_mode_config *mode_config = &dev->mode_config;
3221 struct intel_encoder *intel_encoder;
fee884ed 3222 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
82a28bcf
DV
3223
3224 if (HAS_PCH_IBX(dev)) {
fee884ed 3225 hotplug_irqs = SDE_HOTPLUG_MASK;
82a28bcf 3226 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
cd569aed 3227 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
fee884ed 3228 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
82a28bcf 3229 } else {
fee884ed 3230 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
82a28bcf 3231 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
cd569aed 3232 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
fee884ed 3233 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
82a28bcf 3234 }
7fe0b973 3235
fee884ed 3236 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3237
3238 /*
3239 * Enable digital hotplug on the PCH, and configure the DP short pulse
3240 * duration to 2ms (which is the minimum in the Display Port spec)
3241 *
3242 * This register is the same on all known PCH chips.
3243 */
7fe0b973
KP
3244 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3245 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3246 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3247 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3248 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3249 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3250}
3251
d46da437
PZ
3252static void ibx_irq_postinstall(struct drm_device *dev)
3253{
2d1013dd 3254 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3255 u32 mask;
e5868a31 3256
692a04cf
DV
3257 if (HAS_PCH_NOP(dev))
3258 return;
3259
105b122e 3260 if (HAS_PCH_IBX(dev))
5c673b60 3261 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3262 else
5c673b60 3263 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3264
337ba017 3265 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
d46da437 3266 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3267}
3268
0a9a8c91
DV
3269static void gen5_gt_irq_postinstall(struct drm_device *dev)
3270{
3271 struct drm_i915_private *dev_priv = dev->dev_private;
3272 u32 pm_irqs, gt_irqs;
3273
3274 pm_irqs = gt_irqs = 0;
3275
3276 dev_priv->gt_irq_mask = ~0;
040d2baa 3277 if (HAS_L3_DPF(dev)) {
0a9a8c91 3278 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3279 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3280 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3281 }
3282
3283 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3284 if (IS_GEN5(dev)) {
3285 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3286 ILK_BSD_USER_INTERRUPT;
3287 } else {
3288 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3289 }
3290
35079899 3291 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3292
3293 if (INTEL_INFO(dev)->gen >= 6) {
a6706b45 3294 pm_irqs |= dev_priv->pm_rps_events;
0a9a8c91
DV
3295
3296 if (HAS_VEBOX(dev))
3297 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3298
605cd25b 3299 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3300 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3301 }
3302}
3303
f71d4af4 3304static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3305{
4bc9d430 3306 unsigned long irqflags;
2d1013dd 3307 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3308 u32 display_mask, extra_mask;
3309
3310 if (INTEL_INFO(dev)->gen >= 7) {
3311 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3312 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3313 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3314 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3315 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
5c673b60 3316 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
8e76f8dc
PZ
3317 } else {
3318 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3319 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3320 DE_AUX_CHANNEL_A |
5b3a856b
DV
3321 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3322 DE_POISON);
5c673b60
DV
3323 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3324 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
8e76f8dc 3325 }
036a4a7d 3326
1ec14ad3 3327 dev_priv->irq_mask = ~display_mask;
036a4a7d 3328
0c841212
PZ
3329 I915_WRITE(HWSTAM, 0xeffe);
3330
622364b6
PZ
3331 ibx_irq_pre_postinstall(dev);
3332
35079899 3333 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3334
0a9a8c91 3335 gen5_gt_irq_postinstall(dev);
036a4a7d 3336
d46da437 3337 ibx_irq_postinstall(dev);
7fe0b973 3338
f97108d1 3339 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3340 /* Enable PCU event interrupts
3341 *
3342 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3343 * setup is guaranteed to run in single-threaded context. But we
3344 * need it to make the assert_spin_locked happy. */
3345 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f97108d1 3346 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
4bc9d430 3347 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
f97108d1
JB
3348 }
3349
036a4a7d
ZW
3350 return 0;
3351}
3352
f8b79e58
ID
3353static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3354{
3355 u32 pipestat_mask;
3356 u32 iir_mask;
3357
3358 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3359 PIPE_FIFO_UNDERRUN_STATUS;
3360
3361 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3362 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3363 POSTING_READ(PIPESTAT(PIPE_A));
3364
3365 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3366 PIPE_CRC_DONE_INTERRUPT_STATUS;
3367
3368 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3369 PIPE_GMBUS_INTERRUPT_STATUS);
3370 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3371
3372 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3373 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3374 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3375 dev_priv->irq_mask &= ~iir_mask;
3376
3377 I915_WRITE(VLV_IIR, iir_mask);
3378 I915_WRITE(VLV_IIR, iir_mask);
3379 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3380 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3381 POSTING_READ(VLV_IER);
3382}
3383
3384static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3385{
3386 u32 pipestat_mask;
3387 u32 iir_mask;
3388
3389 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3390 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3391 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
f8b79e58
ID
3392
3393 dev_priv->irq_mask |= iir_mask;
3394 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3395 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3396 I915_WRITE(VLV_IIR, iir_mask);
3397 I915_WRITE(VLV_IIR, iir_mask);
3398 POSTING_READ(VLV_IIR);
3399
3400 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3401 PIPE_CRC_DONE_INTERRUPT_STATUS;
3402
3403 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3404 PIPE_GMBUS_INTERRUPT_STATUS);
3405 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3406
3407 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3408 PIPE_FIFO_UNDERRUN_STATUS;
3409 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3410 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3411 POSTING_READ(PIPESTAT(PIPE_A));
3412}
3413
3414void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3415{
3416 assert_spin_locked(&dev_priv->irq_lock);
3417
3418 if (dev_priv->display_irqs_enabled)
3419 return;
3420
3421 dev_priv->display_irqs_enabled = true;
3422
3423 if (dev_priv->dev->irq_enabled)
3424 valleyview_display_irqs_install(dev_priv);
3425}
3426
3427void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3428{
3429 assert_spin_locked(&dev_priv->irq_lock);
3430
3431 if (!dev_priv->display_irqs_enabled)
3432 return;
3433
3434 dev_priv->display_irqs_enabled = false;
3435
3436 if (dev_priv->dev->irq_enabled)
3437 valleyview_display_irqs_uninstall(dev_priv);
3438}
3439
7e231dbe
JB
3440static int valleyview_irq_postinstall(struct drm_device *dev)
3441{
2d1013dd 3442 struct drm_i915_private *dev_priv = dev->dev_private;
b79480ba 3443 unsigned long irqflags;
7e231dbe 3444
f8b79e58 3445 dev_priv->irq_mask = ~0;
7e231dbe 3446
20afbda2
DV
3447 I915_WRITE(PORT_HOTPLUG_EN, 0);
3448 POSTING_READ(PORT_HOTPLUG_EN);
3449
7e231dbe 3450 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
f8b79e58 3451 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
7e231dbe 3452 I915_WRITE(VLV_IIR, 0xffffffff);
7e231dbe
JB
3453 POSTING_READ(VLV_IER);
3454
b79480ba
DV
3455 /* Interrupt setup is already guaranteed to be single-threaded, this is
3456 * just to make the assert_spin_locked check happy. */
3457 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f8b79e58
ID
3458 if (dev_priv->display_irqs_enabled)
3459 valleyview_display_irqs_install(dev_priv);
b79480ba 3460 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
31acc7f5 3461
7e231dbe
JB
3462 I915_WRITE(VLV_IIR, 0xffffffff);
3463 I915_WRITE(VLV_IIR, 0xffffffff);
3464
0a9a8c91 3465 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3466
3467 /* ack & enable invalid PTE error interrupts */
3468#if 0 /* FIXME: add support to irq handler for checking these bits */
3469 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3470 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3471#endif
3472
3473 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3474
3475 return 0;
3476}
3477
abd58f01
BW
3478static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3479{
3480 int i;
3481
3482 /* These are interrupts we'll toggle with the ring mask register */
3483 uint32_t gt_interrupts[] = {
3484 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3485 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3486 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3487 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3488 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3489 0,
3490 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3491 };
3492
337ba017 3493 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
35079899 3494 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
0961021a
BW
3495
3496 dev_priv->pm_irq_mask = 0xffffffff;
abd58f01
BW
3497}
3498
3499static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3500{
3501 struct drm_device *dev = dev_priv->dev;
d0e1f1cb 3502 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
13b3a0a7 3503 GEN8_PIPE_CDCLK_CRC_DONE |
13b3a0a7 3504 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
5c673b60
DV
3505 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3506 GEN8_PIPE_FIFO_UNDERRUN;
abd58f01 3507 int pipe;
13b3a0a7
DV
3508 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3509 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3510 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3511
337ba017 3512 for_each_pipe(pipe)
35079899
PZ
3513 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
3514 de_pipe_enables);
abd58f01 3515
35079899 3516 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
abd58f01
BW
3517}
3518
3519static int gen8_irq_postinstall(struct drm_device *dev)
3520{
3521 struct drm_i915_private *dev_priv = dev->dev_private;
3522
622364b6
PZ
3523 ibx_irq_pre_postinstall(dev);
3524
abd58f01
BW
3525 gen8_gt_irq_postinstall(dev_priv);
3526 gen8_de_irq_postinstall(dev_priv);
3527
3528 ibx_irq_postinstall(dev);
3529
3530 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3531 POSTING_READ(GEN8_MASTER_IRQ);
3532
3533 return 0;
3534}
3535
43f328d7
VS
3536static int cherryview_irq_postinstall(struct drm_device *dev)
3537{
3538 struct drm_i915_private *dev_priv = dev->dev_private;
3539 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3540 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
43f328d7 3541 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3278f67f
VS
3542 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3543 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3544 PIPE_CRC_DONE_INTERRUPT_STATUS;
43f328d7
VS
3545 unsigned long irqflags;
3546 int pipe;
3547
3548 /*
3549 * Leave vblank interrupts masked initially. enable/disable will
3550 * toggle them based on usage.
3551 */
3278f67f 3552 dev_priv->irq_mask = ~enable_mask;
43f328d7
VS
3553
3554 for_each_pipe(pipe)
3555 I915_WRITE(PIPESTAT(pipe), 0xffff);
3556
3557 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3278f67f 3558 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
43f328d7
VS
3559 for_each_pipe(pipe)
3560 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3561 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3562
3563 I915_WRITE(VLV_IIR, 0xffffffff);
3564 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3565 I915_WRITE(VLV_IER, enable_mask);
3566
3567 gen8_gt_irq_postinstall(dev_priv);
3568
3569 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3570 POSTING_READ(GEN8_MASTER_IRQ);
3571
3572 return 0;
3573}
3574
abd58f01
BW
3575static void gen8_irq_uninstall(struct drm_device *dev)
3576{
3577 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3578
3579 if (!dev_priv)
3580 return;
3581
d4eb6b10 3582 intel_hpd_irq_uninstall(dev_priv);
abd58f01 3583
823f6b38 3584 gen8_irq_reset(dev);
abd58f01
BW
3585}
3586
7e231dbe
JB
3587static void valleyview_irq_uninstall(struct drm_device *dev)
3588{
2d1013dd 3589 struct drm_i915_private *dev_priv = dev->dev_private;
f8b79e58 3590 unsigned long irqflags;
7e231dbe
JB
3591 int pipe;
3592
3593 if (!dev_priv)
3594 return;
3595
843d0e7d
ID
3596 I915_WRITE(VLV_MASTER_IER, 0);
3597
3ca1cced 3598 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 3599
7e231dbe
JB
3600 for_each_pipe(pipe)
3601 I915_WRITE(PIPESTAT(pipe), 0xffff);
3602
3603 I915_WRITE(HWSTAM, 0xffffffff);
3604 I915_WRITE(PORT_HOTPLUG_EN, 0);
3605 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
f8b79e58
ID
3606
3607 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3608 if (dev_priv->display_irqs_enabled)
3609 valleyview_display_irqs_uninstall(dev_priv);
3610 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3611
3612 dev_priv->irq_mask = 0;
3613
7e231dbe
JB
3614 I915_WRITE(VLV_IIR, 0xffffffff);
3615 I915_WRITE(VLV_IMR, 0xffffffff);
3616 I915_WRITE(VLV_IER, 0x0);
3617 POSTING_READ(VLV_IER);
3618}
3619
43f328d7
VS
3620static void cherryview_irq_uninstall(struct drm_device *dev)
3621{
3622 struct drm_i915_private *dev_priv = dev->dev_private;
3623 int pipe;
3624
3625 if (!dev_priv)
3626 return;
3627
3628 I915_WRITE(GEN8_MASTER_IRQ, 0);
3629 POSTING_READ(GEN8_MASTER_IRQ);
3630
3631#define GEN8_IRQ_FINI_NDX(type, which) \
3632do { \
3633 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3634 I915_WRITE(GEN8_##type##_IER(which), 0); \
3635 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3636 POSTING_READ(GEN8_##type##_IIR(which)); \
3637 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3638} while (0)
3639
3640#define GEN8_IRQ_FINI(type) \
3641do { \
3642 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3643 I915_WRITE(GEN8_##type##_IER, 0); \
3644 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3645 POSTING_READ(GEN8_##type##_IIR); \
3646 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3647} while (0)
3648
3649 GEN8_IRQ_FINI_NDX(GT, 0);
3650 GEN8_IRQ_FINI_NDX(GT, 1);
3651 GEN8_IRQ_FINI_NDX(GT, 2);
3652 GEN8_IRQ_FINI_NDX(GT, 3);
3653
3654 GEN8_IRQ_FINI(PCU);
3655
3656#undef GEN8_IRQ_FINI
3657#undef GEN8_IRQ_FINI_NDX
3658
3659 I915_WRITE(PORT_HOTPLUG_EN, 0);
3660 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3661
3662 for_each_pipe(pipe)
3663 I915_WRITE(PIPESTAT(pipe), 0xffff);
3664
3665 I915_WRITE(VLV_IMR, 0xffffffff);
3666 I915_WRITE(VLV_IER, 0x0);
3667 I915_WRITE(VLV_IIR, 0xffffffff);
3668 POSTING_READ(VLV_IIR);
3669}
3670
f71d4af4 3671static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3672{
2d1013dd 3673 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3674
3675 if (!dev_priv)
3676 return;
3677
3ca1cced 3678 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 3679
be30b29f 3680 ironlake_irq_reset(dev);
036a4a7d
ZW
3681}
3682
a266c7d5 3683static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3684{
2d1013dd 3685 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3686 int pipe;
91e3738e 3687
9db4a9c7
JB
3688 for_each_pipe(pipe)
3689 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3690 I915_WRITE16(IMR, 0xffff);
3691 I915_WRITE16(IER, 0x0);
3692 POSTING_READ16(IER);
c2798b19
CW
3693}
3694
3695static int i8xx_irq_postinstall(struct drm_device *dev)
3696{
2d1013dd 3697 struct drm_i915_private *dev_priv = dev->dev_private;
379ef82d 3698 unsigned long irqflags;
c2798b19 3699
c2798b19
CW
3700 I915_WRITE16(EMR,
3701 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3702
3703 /* Unmask the interrupts that we always want on. */
3704 dev_priv->irq_mask =
3705 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3706 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3707 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3708 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3709 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3710 I915_WRITE16(IMR, dev_priv->irq_mask);
3711
3712 I915_WRITE16(IER,
3713 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3714 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3715 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3716 I915_USER_INTERRUPT);
3717 POSTING_READ16(IER);
3718
379ef82d
DV
3719 /* Interrupt setup is already guaranteed to be single-threaded, this is
3720 * just to make the assert_spin_locked check happy. */
3721 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
755e9019
ID
3722 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3723 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
379ef82d
DV
3724 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3725
c2798b19
CW
3726 return 0;
3727}
3728
90a72f87
VS
3729/*
3730 * Returns true when a page flip has completed.
3731 */
3732static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3733 int plane, int pipe, u32 iir)
90a72f87 3734{
2d1013dd 3735 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3736 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3737
8d7849db 3738 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3739 return false;
3740
3741 if ((iir & flip_pending) == 0)
3742 return false;
3743
1f1c2e24 3744 intel_prepare_page_flip(dev, plane);
90a72f87
VS
3745
3746 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3747 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3748 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3749 * the flip is completed (no longer pending). Since this doesn't raise
3750 * an interrupt per se, we watch for the change at vblank.
3751 */
3752 if (I915_READ16(ISR) & flip_pending)
3753 return false;
3754
3755 intel_finish_page_flip(dev, pipe);
3756
3757 return true;
3758}
3759
ff1f525e 3760static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3761{
45a83f84 3762 struct drm_device *dev = arg;
2d1013dd 3763 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3764 u16 iir, new_iir;
3765 u32 pipe_stats[2];
3766 unsigned long irqflags;
c2798b19
CW
3767 int pipe;
3768 u16 flip_mask =
3769 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3770 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3771
c2798b19
CW
3772 iir = I915_READ16(IIR);
3773 if (iir == 0)
3774 return IRQ_NONE;
3775
3776 while (iir & ~flip_mask) {
3777 /* Can't rely on pipestat interrupt bit in iir as it might
3778 * have been cleared after the pipestat interrupt was received.
3779 * It doesn't set the bit in iir again, but it still produces
3780 * interrupts (for non-MSI).
3781 */
3782 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3783 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
58174462
MK
3784 i915_handle_error(dev, false,
3785 "Command parser error, iir 0x%08x",
3786 iir);
c2798b19
CW
3787
3788 for_each_pipe(pipe) {
3789 int reg = PIPESTAT(pipe);
3790 pipe_stats[pipe] = I915_READ(reg);
3791
3792 /*
3793 * Clear the PIPE*STAT regs before the IIR
3794 */
2d9d2b0b 3795 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3796 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19
CW
3797 }
3798 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3799
3800 I915_WRITE16(IIR, iir & ~flip_mask);
3801 new_iir = I915_READ16(IIR); /* Flush posted writes */
3802
d05c617e 3803 i915_update_dri1_breadcrumb(dev);
c2798b19
CW
3804
3805 if (iir & I915_USER_INTERRUPT)
3806 notify_ring(dev, &dev_priv->ring[RCS]);
3807
4356d586 3808 for_each_pipe(pipe) {
1f1c2e24 3809 int plane = pipe;
3a77c4c4 3810 if (HAS_FBC(dev))
1f1c2e24
VS
3811 plane = !plane;
3812
4356d586 3813 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
3814 i8xx_handle_vblank(dev, plane, pipe, iir))
3815 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 3816
4356d586 3817 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3818 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b
VS
3819
3820 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3821 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b 3822 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4356d586 3823 }
c2798b19
CW
3824
3825 iir = new_iir;
3826 }
3827
3828 return IRQ_HANDLED;
3829}
3830
3831static void i8xx_irq_uninstall(struct drm_device * dev)
3832{
2d1013dd 3833 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3834 int pipe;
3835
c2798b19
CW
3836 for_each_pipe(pipe) {
3837 /* Clear enable bits; then clear status bits */
3838 I915_WRITE(PIPESTAT(pipe), 0);
3839 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3840 }
3841 I915_WRITE16(IMR, 0xffff);
3842 I915_WRITE16(IER, 0x0);
3843 I915_WRITE16(IIR, I915_READ16(IIR));
3844}
3845
a266c7d5
CW
3846static void i915_irq_preinstall(struct drm_device * dev)
3847{
2d1013dd 3848 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3849 int pipe;
3850
a266c7d5
CW
3851 if (I915_HAS_HOTPLUG(dev)) {
3852 I915_WRITE(PORT_HOTPLUG_EN, 0);
3853 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3854 }
3855
00d98ebd 3856 I915_WRITE16(HWSTAM, 0xeffe);
a266c7d5
CW
3857 for_each_pipe(pipe)
3858 I915_WRITE(PIPESTAT(pipe), 0);
3859 I915_WRITE(IMR, 0xffffffff);
3860 I915_WRITE(IER, 0x0);
3861 POSTING_READ(IER);
3862}
3863
3864static int i915_irq_postinstall(struct drm_device *dev)
3865{
2d1013dd 3866 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 3867 u32 enable_mask;
379ef82d 3868 unsigned long irqflags;
a266c7d5 3869
38bde180
CW
3870 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3871
3872 /* Unmask the interrupts that we always want on. */
3873 dev_priv->irq_mask =
3874 ~(I915_ASLE_INTERRUPT |
3875 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3876 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3877 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3878 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3879 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3880
3881 enable_mask =
3882 I915_ASLE_INTERRUPT |
3883 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3884 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3885 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3886 I915_USER_INTERRUPT;
3887
a266c7d5 3888 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
3889 I915_WRITE(PORT_HOTPLUG_EN, 0);
3890 POSTING_READ(PORT_HOTPLUG_EN);
3891
a266c7d5
CW
3892 /* Enable in IER... */
3893 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3894 /* and unmask in IMR */
3895 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3896 }
3897
a266c7d5
CW
3898 I915_WRITE(IMR, dev_priv->irq_mask);
3899 I915_WRITE(IER, enable_mask);
3900 POSTING_READ(IER);
3901
f49e38dd 3902 i915_enable_asle_pipestat(dev);
20afbda2 3903
379ef82d
DV
3904 /* Interrupt setup is already guaranteed to be single-threaded, this is
3905 * just to make the assert_spin_locked check happy. */
3906 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
755e9019
ID
3907 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3908 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
379ef82d
DV
3909 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3910
20afbda2
DV
3911 return 0;
3912}
3913
90a72f87
VS
3914/*
3915 * Returns true when a page flip has completed.
3916 */
3917static bool i915_handle_vblank(struct drm_device *dev,
3918 int plane, int pipe, u32 iir)
3919{
2d1013dd 3920 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
3921 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3922
8d7849db 3923 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3924 return false;
3925
3926 if ((iir & flip_pending) == 0)
3927 return false;
3928
3929 intel_prepare_page_flip(dev, plane);
3930
3931 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3932 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3933 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3934 * the flip is completed (no longer pending). Since this doesn't raise
3935 * an interrupt per se, we watch for the change at vblank.
3936 */
3937 if (I915_READ(ISR) & flip_pending)
3938 return false;
3939
3940 intel_finish_page_flip(dev, pipe);
3941
3942 return true;
3943}
3944
ff1f525e 3945static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3946{
45a83f84 3947 struct drm_device *dev = arg;
2d1013dd 3948 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 3949 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
a266c7d5 3950 unsigned long irqflags;
38bde180
CW
3951 u32 flip_mask =
3952 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3953 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 3954 int pipe, ret = IRQ_NONE;
a266c7d5 3955
a266c7d5 3956 iir = I915_READ(IIR);
38bde180
CW
3957 do {
3958 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 3959 bool blc_event = false;
a266c7d5
CW
3960
3961 /* Can't rely on pipestat interrupt bit in iir as it might
3962 * have been cleared after the pipestat interrupt was received.
3963 * It doesn't set the bit in iir again, but it still produces
3964 * interrupts (for non-MSI).
3965 */
3966 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3967 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
58174462
MK
3968 i915_handle_error(dev, false,
3969 "Command parser error, iir 0x%08x",
3970 iir);
a266c7d5
CW
3971
3972 for_each_pipe(pipe) {
3973 int reg = PIPESTAT(pipe);
3974 pipe_stats[pipe] = I915_READ(reg);
3975
38bde180 3976 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 3977 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 3978 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 3979 irq_received = true;
a266c7d5
CW
3980 }
3981 }
3982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3983
3984 if (!irq_received)
3985 break;
3986
a266c7d5 3987 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
3988 if (I915_HAS_HOTPLUG(dev) &&
3989 iir & I915_DISPLAY_PORT_INTERRUPT)
3990 i9xx_hpd_irq_handler(dev);
a266c7d5 3991
38bde180 3992 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3993 new_iir = I915_READ(IIR); /* Flush posted writes */
3994
a266c7d5
CW
3995 if (iir & I915_USER_INTERRUPT)
3996 notify_ring(dev, &dev_priv->ring[RCS]);
a266c7d5 3997
a266c7d5 3998 for_each_pipe(pipe) {
38bde180 3999 int plane = pipe;
3a77c4c4 4000 if (HAS_FBC(dev))
38bde180 4001 plane = !plane;
90a72f87 4002
8291ee90 4003 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4004 i915_handle_vblank(dev, plane, pipe, iir))
4005 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
4006
4007 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4008 blc_event = true;
4356d586
DV
4009
4010 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4011 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b
VS
4012
4013 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4014 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b 4015 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
a266c7d5
CW
4016 }
4017
a266c7d5
CW
4018 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4019 intel_opregion_asle_intr(dev);
4020
4021 /* With MSI, interrupts are only generated when iir
4022 * transitions from zero to nonzero. If another bit got
4023 * set while we were handling the existing iir bits, then
4024 * we would never get another interrupt.
4025 *
4026 * This is fine on non-MSI as well, as if we hit this path
4027 * we avoid exiting the interrupt handler only to generate
4028 * another one.
4029 *
4030 * Note that for MSI this could cause a stray interrupt report
4031 * if an interrupt landed in the time between writing IIR and
4032 * the posting read. This should be rare enough to never
4033 * trigger the 99% of 100,000 interrupts test for disabling
4034 * stray interrupts.
4035 */
38bde180 4036 ret = IRQ_HANDLED;
a266c7d5 4037 iir = new_iir;
38bde180 4038 } while (iir & ~flip_mask);
a266c7d5 4039
d05c617e 4040 i915_update_dri1_breadcrumb(dev);
8291ee90 4041
a266c7d5
CW
4042 return ret;
4043}
4044
4045static void i915_irq_uninstall(struct drm_device * dev)
4046{
2d1013dd 4047 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4048 int pipe;
4049
3ca1cced 4050 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 4051
a266c7d5
CW
4052 if (I915_HAS_HOTPLUG(dev)) {
4053 I915_WRITE(PORT_HOTPLUG_EN, 0);
4054 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4055 }
4056
00d98ebd 4057 I915_WRITE16(HWSTAM, 0xffff);
55b39755
CW
4058 for_each_pipe(pipe) {
4059 /* Clear enable bits; then clear status bits */
a266c7d5 4060 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
4061 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4062 }
a266c7d5
CW
4063 I915_WRITE(IMR, 0xffffffff);
4064 I915_WRITE(IER, 0x0);
4065
a266c7d5
CW
4066 I915_WRITE(IIR, I915_READ(IIR));
4067}
4068
4069static void i965_irq_preinstall(struct drm_device * dev)
4070{
2d1013dd 4071 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4072 int pipe;
4073
adca4730
CW
4074 I915_WRITE(PORT_HOTPLUG_EN, 0);
4075 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4076
4077 I915_WRITE(HWSTAM, 0xeffe);
4078 for_each_pipe(pipe)
4079 I915_WRITE(PIPESTAT(pipe), 0);
4080 I915_WRITE(IMR, 0xffffffff);
4081 I915_WRITE(IER, 0x0);
4082 POSTING_READ(IER);
4083}
4084
4085static int i965_irq_postinstall(struct drm_device *dev)
4086{
2d1013dd 4087 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 4088 u32 enable_mask;
a266c7d5 4089 u32 error_mask;
b79480ba 4090 unsigned long irqflags;
a266c7d5 4091
a266c7d5 4092 /* Unmask the interrupts that we always want on. */
bbba0a97 4093 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4094 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4095 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4096 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4097 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4098 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4099 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4100
4101 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4102 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4103 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4104 enable_mask |= I915_USER_INTERRUPT;
4105
4106 if (IS_G4X(dev))
4107 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4108
b79480ba
DV
4109 /* Interrupt setup is already guaranteed to be single-threaded, this is
4110 * just to make the assert_spin_locked check happy. */
4111 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
755e9019
ID
4112 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4113 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4114 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
b79480ba 4115 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
a266c7d5 4116
a266c7d5
CW
4117 /*
4118 * Enable some error detection, note the instruction error mask
4119 * bit is reserved, so we leave it masked.
4120 */
4121 if (IS_G4X(dev)) {
4122 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4123 GM45_ERROR_MEM_PRIV |
4124 GM45_ERROR_CP_PRIV |
4125 I915_ERROR_MEMORY_REFRESH);
4126 } else {
4127 error_mask = ~(I915_ERROR_PAGE_TABLE |
4128 I915_ERROR_MEMORY_REFRESH);
4129 }
4130 I915_WRITE(EMR, error_mask);
4131
4132 I915_WRITE(IMR, dev_priv->irq_mask);
4133 I915_WRITE(IER, enable_mask);
4134 POSTING_READ(IER);
4135
20afbda2
DV
4136 I915_WRITE(PORT_HOTPLUG_EN, 0);
4137 POSTING_READ(PORT_HOTPLUG_EN);
4138
f49e38dd 4139 i915_enable_asle_pipestat(dev);
20afbda2
DV
4140
4141 return 0;
4142}
4143
bac56d5b 4144static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 4145{
2d1013dd 4146 struct drm_i915_private *dev_priv = dev->dev_private;
e5868a31 4147 struct drm_mode_config *mode_config = &dev->mode_config;
cd569aed 4148 struct intel_encoder *intel_encoder;
20afbda2
DV
4149 u32 hotplug_en;
4150
b5ea2d56
DV
4151 assert_spin_locked(&dev_priv->irq_lock);
4152
bac56d5b
EE
4153 if (I915_HAS_HOTPLUG(dev)) {
4154 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4155 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4156 /* Note HDMI and DP share hotplug bits */
e5868a31 4157 /* enable bits are the same for all generations */
cd569aed
EE
4158 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
4159 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4160 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
bac56d5b
EE
4161 /* Programming the CRT detection parameters tends
4162 to generate a spurious hotplug event about three
4163 seconds later. So just do it once.
4164 */
4165 if (IS_G4X(dev))
4166 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
85fc95ba 4167 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
bac56d5b 4168 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
a266c7d5 4169
bac56d5b
EE
4170 /* Ignore TV since it's buggy */
4171 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4172 }
a266c7d5
CW
4173}
4174
ff1f525e 4175static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4176{
45a83f84 4177 struct drm_device *dev = arg;
2d1013dd 4178 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4179 u32 iir, new_iir;
4180 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4181 unsigned long irqflags;
a266c7d5 4182 int ret = IRQ_NONE, pipe;
21ad8330
VS
4183 u32 flip_mask =
4184 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4185 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4186
a266c7d5
CW
4187 iir = I915_READ(IIR);
4188
a266c7d5 4189 for (;;) {
501e01d7 4190 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4191 bool blc_event = false;
4192
a266c7d5
CW
4193 /* Can't rely on pipestat interrupt bit in iir as it might
4194 * have been cleared after the pipestat interrupt was received.
4195 * It doesn't set the bit in iir again, but it still produces
4196 * interrupts (for non-MSI).
4197 */
4198 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4199 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
58174462
MK
4200 i915_handle_error(dev, false,
4201 "Command parser error, iir 0x%08x",
4202 iir);
a266c7d5
CW
4203
4204 for_each_pipe(pipe) {
4205 int reg = PIPESTAT(pipe);
4206 pipe_stats[pipe] = I915_READ(reg);
4207
4208 /*
4209 * Clear the PIPE*STAT regs before the IIR
4210 */
4211 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4212 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4213 irq_received = true;
a266c7d5
CW
4214 }
4215 }
4216 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4217
4218 if (!irq_received)
4219 break;
4220
4221 ret = IRQ_HANDLED;
4222
4223 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4224 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4225 i9xx_hpd_irq_handler(dev);
a266c7d5 4226
21ad8330 4227 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4228 new_iir = I915_READ(IIR); /* Flush posted writes */
4229
a266c7d5
CW
4230 if (iir & I915_USER_INTERRUPT)
4231 notify_ring(dev, &dev_priv->ring[RCS]);
4232 if (iir & I915_BSD_USER_INTERRUPT)
4233 notify_ring(dev, &dev_priv->ring[VCS]);
4234
a266c7d5 4235 for_each_pipe(pipe) {
2c8ba29f 4236 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4237 i915_handle_vblank(dev, pipe, pipe, iir))
4238 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4239
4240 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4241 blc_event = true;
4356d586
DV
4242
4243 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4244 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4245
2d9d2b0b
VS
4246 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4247 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b 4248 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2d9d2b0b 4249 }
a266c7d5
CW
4250
4251 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4252 intel_opregion_asle_intr(dev);
4253
515ac2bb
DV
4254 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4255 gmbus_irq_handler(dev);
4256
a266c7d5
CW
4257 /* With MSI, interrupts are only generated when iir
4258 * transitions from zero to nonzero. If another bit got
4259 * set while we were handling the existing iir bits, then
4260 * we would never get another interrupt.
4261 *
4262 * This is fine on non-MSI as well, as if we hit this path
4263 * we avoid exiting the interrupt handler only to generate
4264 * another one.
4265 *
4266 * Note that for MSI this could cause a stray interrupt report
4267 * if an interrupt landed in the time between writing IIR and
4268 * the posting read. This should be rare enough to never
4269 * trigger the 99% of 100,000 interrupts test for disabling
4270 * stray interrupts.
4271 */
4272 iir = new_iir;
4273 }
4274
d05c617e 4275 i915_update_dri1_breadcrumb(dev);
2c8ba29f 4276
a266c7d5
CW
4277 return ret;
4278}
4279
4280static void i965_irq_uninstall(struct drm_device * dev)
4281{
2d1013dd 4282 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4283 int pipe;
4284
4285 if (!dev_priv)
4286 return;
4287
3ca1cced 4288 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 4289
adca4730
CW
4290 I915_WRITE(PORT_HOTPLUG_EN, 0);
4291 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4292
4293 I915_WRITE(HWSTAM, 0xffffffff);
4294 for_each_pipe(pipe)
4295 I915_WRITE(PIPESTAT(pipe), 0);
4296 I915_WRITE(IMR, 0xffffffff);
4297 I915_WRITE(IER, 0x0);
4298
4299 for_each_pipe(pipe)
4300 I915_WRITE(PIPESTAT(pipe),
4301 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4302 I915_WRITE(IIR, I915_READ(IIR));
4303}
4304
3ca1cced 4305static void intel_hpd_irq_reenable(unsigned long data)
ac4c16c5 4306{
2d1013dd 4307 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
ac4c16c5
EE
4308 struct drm_device *dev = dev_priv->dev;
4309 struct drm_mode_config *mode_config = &dev->mode_config;
4310 unsigned long irqflags;
4311 int i;
4312
4313 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4314 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4315 struct drm_connector *connector;
4316
4317 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4318 continue;
4319
4320 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4321
4322 list_for_each_entry(connector, &mode_config->connector_list, head) {
4323 struct intel_connector *intel_connector = to_intel_connector(connector);
4324
4325 if (intel_connector->encoder->hpd_pin == i) {
4326 if (connector->polled != intel_connector->polled)
4327 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4328 drm_get_connector_name(connector));
4329 connector->polled = intel_connector->polled;
4330 if (!connector->polled)
4331 connector->polled = DRM_CONNECTOR_POLL_HPD;
4332 }
4333 }
4334 }
4335 if (dev_priv->display.hpd_irq_setup)
4336 dev_priv->display.hpd_irq_setup(dev);
4337 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4338}
4339
f71d4af4
JB
4340void intel_irq_init(struct drm_device *dev)
4341{
8b2e326d
CW
4342 struct drm_i915_private *dev_priv = dev->dev_private;
4343
4344 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
99584db3 4345 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
c6a828d3 4346 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4347 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4348
a6706b45
D
4349 /* Let's track the enabled rps events */
4350 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4351
99584db3
DV
4352 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4353 i915_hangcheck_elapsed,
61bac78e 4354 (unsigned long) dev);
3ca1cced 4355 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
ac4c16c5 4356 (unsigned long) dev_priv);
61bac78e 4357
97a19a24 4358 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4359
4cdb83ec
VS
4360 if (IS_GEN2(dev)) {
4361 dev->max_vblank_count = 0;
4362 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4363 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
f71d4af4
JB
4364 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4365 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
391f75e2
VS
4366 } else {
4367 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4368 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4369 }
4370
c2baf4b7 4371 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
c3613de9 4372 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
c2baf4b7
VS
4373 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4374 }
f71d4af4 4375
43f328d7
VS
4376 if (IS_CHERRYVIEW(dev)) {
4377 dev->driver->irq_handler = cherryview_irq_handler;
4378 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4379 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4380 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4381 dev->driver->enable_vblank = valleyview_enable_vblank;
4382 dev->driver->disable_vblank = valleyview_disable_vblank;
4383 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4384 } else if (IS_VALLEYVIEW(dev)) {
7e231dbe
JB
4385 dev->driver->irq_handler = valleyview_irq_handler;
4386 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4387 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4388 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4389 dev->driver->enable_vblank = valleyview_enable_vblank;
4390 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4391 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
abd58f01
BW
4392 } else if (IS_GEN8(dev)) {
4393 dev->driver->irq_handler = gen8_irq_handler;
4394 dev->driver->irq_preinstall = gen8_irq_preinstall;
4395 dev->driver->irq_postinstall = gen8_irq_postinstall;
4396 dev->driver->irq_uninstall = gen8_irq_uninstall;
4397 dev->driver->enable_vblank = gen8_enable_vblank;
4398 dev->driver->disable_vblank = gen8_disable_vblank;
4399 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4
JB
4400 } else if (HAS_PCH_SPLIT(dev)) {
4401 dev->driver->irq_handler = ironlake_irq_handler;
4402 dev->driver->irq_preinstall = ironlake_irq_preinstall;
4403 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4404 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4405 dev->driver->enable_vblank = ironlake_enable_vblank;
4406 dev->driver->disable_vblank = ironlake_disable_vblank;
82a28bcf 4407 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4 4408 } else {
c2798b19
CW
4409 if (INTEL_INFO(dev)->gen == 2) {
4410 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4411 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4412 dev->driver->irq_handler = i8xx_irq_handler;
4413 dev->driver->irq_uninstall = i8xx_irq_uninstall;
a266c7d5
CW
4414 } else if (INTEL_INFO(dev)->gen == 3) {
4415 dev->driver->irq_preinstall = i915_irq_preinstall;
4416 dev->driver->irq_postinstall = i915_irq_postinstall;
4417 dev->driver->irq_uninstall = i915_irq_uninstall;
4418 dev->driver->irq_handler = i915_irq_handler;
20afbda2 4419 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 4420 } else {
a266c7d5
CW
4421 dev->driver->irq_preinstall = i965_irq_preinstall;
4422 dev->driver->irq_postinstall = i965_irq_postinstall;
4423 dev->driver->irq_uninstall = i965_irq_uninstall;
4424 dev->driver->irq_handler = i965_irq_handler;
bac56d5b 4425 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 4426 }
f71d4af4
JB
4427 dev->driver->enable_vblank = i915_enable_vblank;
4428 dev->driver->disable_vblank = i915_disable_vblank;
4429 }
4430}
20afbda2
DV
4431
4432void intel_hpd_init(struct drm_device *dev)
4433{
4434 struct drm_i915_private *dev_priv = dev->dev_private;
821450c6
EE
4435 struct drm_mode_config *mode_config = &dev->mode_config;
4436 struct drm_connector *connector;
b5ea2d56 4437 unsigned long irqflags;
821450c6 4438 int i;
20afbda2 4439
821450c6
EE
4440 for (i = 1; i < HPD_NUM_PINS; i++) {
4441 dev_priv->hpd_stats[i].hpd_cnt = 0;
4442 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4443 }
4444 list_for_each_entry(connector, &mode_config->connector_list, head) {
4445 struct intel_connector *intel_connector = to_intel_connector(connector);
4446 connector->polled = intel_connector->polled;
4447 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4448 connector->polled = DRM_CONNECTOR_POLL_HPD;
4449 }
b5ea2d56
DV
4450
4451 /* Interrupt setup is already guaranteed to be single-threaded, this is
4452 * just to make the assert_spin_locked checks happy. */
4453 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
20afbda2
DV
4454 if (dev_priv->display.hpd_irq_setup)
4455 dev_priv->display.hpd_irq_setup(dev);
b5ea2d56 4456 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
20afbda2 4457}
c67a470b 4458
5d584b2e 4459/* Disable interrupts so we can allow runtime PM. */
730488b2 4460void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
c67a470b
PZ
4461{
4462 struct drm_i915_private *dev_priv = dev->dev_private;
c67a470b 4463
730488b2 4464 dev->driver->irq_uninstall(dev);
5d584b2e 4465 dev_priv->pm.irqs_disabled = true;
c67a470b
PZ
4466}
4467
5d584b2e 4468/* Restore interrupts so we can recover from runtime PM. */
730488b2 4469void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
c67a470b
PZ
4470{
4471 struct drm_i915_private *dev_priv = dev->dev_private;
c67a470b 4472
5d584b2e 4473 dev_priv->pm.irqs_disabled = false;
730488b2
PZ
4474 dev->driver->irq_preinstall(dev);
4475 dev->driver->irq_postinstall(dev);
c67a470b 4476}