1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
48 static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
56 static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
65 static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
112 I915_WRITE((reg), 0xffffffff); \
114 I915_WRITE((reg), 0xffffffff); \
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
133 /* For display hotplug interrupt */
135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
137 assert_spin_locked(&dev_priv->irq_lock);
139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
152 assert_spin_locked(&dev_priv->irq_lock);
154 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
174 assert_spin_locked(&dev_priv->irq_lock);
176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
185 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
187 ilk_update_gt_irq(dev_priv, mask, mask);
190 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
192 ilk_update_gt_irq(dev_priv, mask, 0);
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
207 assert_spin_locked(&dev_priv->irq_lock);
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
223 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
225 snb_update_pm_irq(dev_priv, mask, mask);
228 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
230 snb_update_pm_irq(dev_priv, mask, 0);
233 static bool ivb_can_enable_err_int(struct drm_device *dev)
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
239 assert_spin_locked(&dev_priv->irq_lock);
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
244 if (crtc->cpu_fifo_underrun_disabled)
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
257 * Copied from the snb function, updated with relevant register offsets
259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
265 assert_spin_locked(&dev_priv->irq_lock);
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
281 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
283 bdw_update_pm_irq(dev_priv, mask, mask);
286 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
288 bdw_update_pm_irq(dev_priv, mask, 0);
291 static bool cpt_can_enable_serr_int(struct drm_device *dev)
293 struct drm_i915_private *dev_priv = dev->dev_private;
295 struct intel_crtc *crtc;
297 assert_spin_locked(&dev_priv->irq_lock);
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
302 if (crtc->pch_fifo_underrun_disabled)
309 void i9xx_check_fifo_underruns(struct drm_device *dev)
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
321 if (crtc->cpu_fifo_underrun_disabled)
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
337 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
339 bool enable, bool old)
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe);
343 u32 pipestat = I915_READ(reg) & 0xffff0000;
345 assert_spin_locked(&dev_priv->irq_lock);
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
356 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
357 enum pipe pipe, bool enable)
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
364 ironlake_enable_display_irq(dev_priv, bit);
366 ironlake_disable_display_irq(dev_priv, bit);
369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
371 bool enable, bool old)
373 struct drm_i915_private *dev_priv = dev->dev_private;
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
377 if (!ivb_can_enable_err_int(dev))
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
392 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
393 enum pipe pipe, bool enable)
395 struct drm_i915_private *dev_priv = dev->dev_private;
397 assert_spin_locked(&dev_priv->irq_lock);
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
408 * ibx_display_interrupt_update - update SDEIMR
409 * @dev_priv: driver private
410 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable
413 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask)
417 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask;
419 sdeimr |= (~enabled_irq_mask & interrupt_mask);
421 assert_spin_locked(&dev_priv->irq_lock);
423 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
426 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR);
429 #define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431 #define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
434 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
443 ibx_enable_display_interrupt(dev_priv, bit);
445 ibx_disable_display_interrupt(dev_priv, bit);
448 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
450 bool enable, bool old)
452 struct drm_i915_private *dev_priv = dev->dev_private;
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
458 if (!cpt_can_enable_serr_int(dev))
461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
485 * Returns the previous state of underrun reporting.
487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
495 assert_spin_locked(&dev_priv->irq_lock);
497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
515 struct drm_i915_private *dev_priv = dev->dev_private;
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
533 return !intel_crtc->cpu_fifo_underrun_disabled;
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
548 * Returns the previous state of underrun reporting.
550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
571 old = !intel_crtc->pch_fifo_underrun_disabled;
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
574 if (HAS_PCH_IBX(dev))
575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
585 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
586 u32 enable_mask, u32 status_mask)
588 u32 reg = PIPESTAT(pipe);
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
591 assert_spin_locked(&dev_priv->irq_lock);
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
599 if ((pipestat & enable_mask) == enable_mask)
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
604 /* Enable the interrupt, clear any pending status */
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
617 assert_spin_locked(&dev_priv->irq_lock);
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
625 if ((pipestat & enable_mask) == 0)
628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
630 pipestat &= ~enable_mask;
631 I915_WRITE(reg, pipestat);
635 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
637 u32 enable_mask = status_mask << 16;
640 * On pipe A we don't support the PSR interrupt yet,
641 * on pipe B and C the same bit MBZ.
643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
653 SPRITE0_FLIP_DONE_INT_EN_VLV |
654 SPRITE1_FLIP_DONE_INT_EN_VLV);
655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
664 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
669 if (IS_VALLEYVIEW(dev_priv->dev))
670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
673 enable_mask = status_mask << 16;
674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
678 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
683 if (IS_VALLEYVIEW(dev_priv->dev))
684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
687 enable_mask = status_mask << 16;
688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
694 static void i915_enable_asle_pipestat(struct drm_device *dev)
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
713 * i915_pipe_enabled - check if a pipe is enabled
715 * @pipe: pipe to check
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
722 i915_pipe_enabled(struct drm_device *dev, int pipe)
724 struct drm_i915_private *dev_priv = dev->dev_private;
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
731 return intel_crtc->active;
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
741 * Assumptions about the fictitious mode used in this example:
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
756 * | | start of vsync:
757 * | | generate vsync interrupt
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
777 * vbs = vblank_start (number)
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
789 /* Gen2 doesn't have a hardware frame counter */
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
831 /* Convert to pixel count */
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
874 return I915_READ(reg);
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
901 return (position + crtc->scanline_offset) % vtotal;
904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
916 unsigned long irqflags;
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
947 /* Get optional system timestamp before query. */
949 *stime = ktime_get();
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
955 position = __intel_get_crtc_scanline(intel_crtc);
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
963 /* convert to pixel counts */
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
977 if (position >= vtotal)
978 position = vtotal - 1;
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
989 position = (position + htotal - hsync_start) % vtotal;
992 /* Get optional system timestamp after query. */
994 *etime = ktime_get();
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1000 in_vbl = position >= vbl_start && position < vbl_end;
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1011 position += vtotal - vbl_end;
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1023 ret |= DRM_SCANOUTPOS_INVBL;
1028 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1043 struct timeval *vblank_time,
1046 struct drm_crtc *crtc;
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1053 /* Get drm_crtc to timestamp: */
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1072 static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1075 enum drm_connector_status old_status;
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1093 static void i915_digport_work_func(struct work_struct *work)
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1117 if (long_port_mask & (1 << i)) {
1120 } else if (short_port_mask & (1 << i))
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1126 /* if we get true fallback to old school hpd */
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1141 * Handle hotplug events outside the interrupt handler proper.
1143 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1145 static void i915_hotplug_work_func(struct work_struct *work)
1147 struct drm_i915_private *dev_priv =
1148 container_of(work, struct drm_i915_private, hotplug_work);
1149 struct drm_device *dev = dev_priv->dev;
1150 struct drm_mode_config *mode_config = &dev->mode_config;
1151 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false;
1156 bool changed = false;
1159 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1164 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0;
1166 list_for_each_entry(connector, &mode_config->connector_list, head) {
1167 intel_connector = to_intel_connector(connector);
1168 if (!intel_connector->encoder)
1170 intel_encoder = intel_connector->encoder;
1171 if (intel_encoder->hpd_pin > HPD_NONE &&
1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1173 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1174 DRM_INFO("HPD interrupt storm detected on connector %s: "
1175 "switching from hotplug detection to polling\n",
1177 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1178 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1179 | DRM_CONNECTOR_POLL_DISCONNECT;
1180 hpd_disabled = true;
1182 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1184 connector->name, intel_encoder->hpd_pin);
1187 /* if there were no outputs to poll, poll was disabled,
1188 * therefore make sure it's enabled when disabling HPD on
1189 * some connectors */
1191 drm_kms_helper_poll_enable(dev);
1192 mod_timer(&dev_priv->hotplug_reenable_timer,
1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1198 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector);
1200 if (!intel_connector->encoder)
1202 intel_encoder = intel_connector->encoder;
1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1204 if (intel_encoder->hot_plug)
1205 intel_encoder->hot_plug(intel_encoder);
1206 if (intel_hpd_irq_event(dev, connector))
1210 mutex_unlock(&mode_config->mutex);
1213 drm_kms_helper_hotplug_event(dev);
1216 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1218 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1221 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1223 struct drm_i915_private *dev_priv = dev->dev_private;
1224 u32 busy_up, busy_down, max_avg, min_avg;
1227 spin_lock(&mchdev_lock);
1229 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1231 new_delay = dev_priv->ips.cur_delay;
1233 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1234 busy_up = I915_READ(RCPREVBSYTUPAVG);
1235 busy_down = I915_READ(RCPREVBSYTDNAVG);
1236 max_avg = I915_READ(RCBMAXAVG);
1237 min_avg = I915_READ(RCBMINAVG);
1239 /* Handle RCS change request from hw */
1240 if (busy_up > max_avg) {
1241 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1242 new_delay = dev_priv->ips.cur_delay - 1;
1243 if (new_delay < dev_priv->ips.max_delay)
1244 new_delay = dev_priv->ips.max_delay;
1245 } else if (busy_down < min_avg) {
1246 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1247 new_delay = dev_priv->ips.cur_delay + 1;
1248 if (new_delay > dev_priv->ips.min_delay)
1249 new_delay = dev_priv->ips.min_delay;
1252 if (ironlake_set_drps(dev, new_delay))
1253 dev_priv->ips.cur_delay = new_delay;
1255 spin_unlock(&mchdev_lock);
1260 static void notify_ring(struct drm_device *dev,
1261 struct intel_engine_cs *ring)
1263 if (!intel_ring_initialized(ring))
1266 trace_i915_gem_request_complete(ring);
1268 if (drm_core_check_feature(dev, DRIVER_MODESET))
1269 intel_notify_mmio_flip(ring);
1271 wake_up_all(&ring->irq_queue);
1272 i915_queue_hangcheck(dev);
1275 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1276 struct intel_rps_ei *rps_ei)
1278 u32 cz_ts, cz_freq_khz;
1279 u32 render_count, media_count;
1280 u32 elapsed_render, elapsed_media, elapsed_time;
1283 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1284 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1286 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1287 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1289 if (rps_ei->cz_clock == 0) {
1290 rps_ei->cz_clock = cz_ts;
1291 rps_ei->render_c0 = render_count;
1292 rps_ei->media_c0 = media_count;
1294 return dev_priv->rps.cur_freq;
1297 elapsed_time = cz_ts - rps_ei->cz_clock;
1298 rps_ei->cz_clock = cz_ts;
1300 elapsed_render = render_count - rps_ei->render_c0;
1301 rps_ei->render_c0 = render_count;
1303 elapsed_media = media_count - rps_ei->media_c0;
1304 rps_ei->media_c0 = media_count;
1306 /* Convert all the counters into common unit of milli sec */
1307 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1308 elapsed_render /= cz_freq_khz;
1309 elapsed_media /= cz_freq_khz;
1312 * Calculate overall C0 residency percentage
1313 * only if elapsed time is non zero
1317 ((max(elapsed_render, elapsed_media) * 100)
1325 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1326 * busy-ness calculated from C0 counters of render & media power wells
1327 * @dev_priv: DRM device private
1330 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1332 u32 residency_C0_up = 0, residency_C0_down = 0;
1335 dev_priv->rps.ei_interrupt_count++;
1337 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1340 if (dev_priv->rps.up_ei.cz_clock == 0) {
1341 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1342 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1343 return dev_priv->rps.cur_freq;
1348 * To down throttle, C0 residency should be less than down threshold
1349 * for continous EI intervals. So calculate down EI counters
1350 * once in VLV_INT_COUNT_FOR_DOWN_EI
1352 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1354 dev_priv->rps.ei_interrupt_count = 0;
1356 residency_C0_down = vlv_c0_residency(dev_priv,
1357 &dev_priv->rps.down_ei);
1359 residency_C0_up = vlv_c0_residency(dev_priv,
1360 &dev_priv->rps.up_ei);
1363 new_delay = dev_priv->rps.cur_freq;
1365 adj = dev_priv->rps.last_adj;
1366 /* C0 residency is greater than UP threshold. Increase Frequency */
1367 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1373 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1374 new_delay = dev_priv->rps.cur_freq + adj;
1377 * For better performance, jump directly
1378 * to RPe if we're below it.
1380 if (new_delay < dev_priv->rps.efficient_freq)
1381 new_delay = dev_priv->rps.efficient_freq;
1383 } else if (!dev_priv->rps.ei_interrupt_count &&
1384 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1390 * This means, C0 residency is less than down threshold over
1391 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1393 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1394 new_delay = dev_priv->rps.cur_freq + adj;
1400 static void gen6_pm_rps_work(struct work_struct *work)
1402 struct drm_i915_private *dev_priv =
1403 container_of(work, struct drm_i915_private, rps.work);
1407 spin_lock_irq(&dev_priv->irq_lock);
1408 pm_iir = dev_priv->rps.pm_iir;
1409 dev_priv->rps.pm_iir = 0;
1410 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1411 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1414 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1416 spin_unlock_irq(&dev_priv->irq_lock);
1418 /* Make sure we didn't queue anything we're not going to process. */
1419 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1421 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1424 mutex_lock(&dev_priv->rps.hw_lock);
1426 adj = dev_priv->rps.last_adj;
1427 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1431 /* CHV needs even encode values */
1432 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1434 new_delay = dev_priv->rps.cur_freq + adj;
1437 * For better performance, jump directly
1438 * to RPe if we're below it.
1440 if (new_delay < dev_priv->rps.efficient_freq)
1441 new_delay = dev_priv->rps.efficient_freq;
1442 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1443 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1444 new_delay = dev_priv->rps.efficient_freq;
1446 new_delay = dev_priv->rps.min_freq_softlimit;
1448 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1449 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1450 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1454 /* CHV needs even encode values */
1455 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1457 new_delay = dev_priv->rps.cur_freq + adj;
1458 } else { /* unknown event */
1459 new_delay = dev_priv->rps.cur_freq;
1462 /* sysfs frequency interfaces may have snuck in while servicing the
1465 new_delay = clamp_t(int, new_delay,
1466 dev_priv->rps.min_freq_softlimit,
1467 dev_priv->rps.max_freq_softlimit);
1469 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1471 if (IS_VALLEYVIEW(dev_priv->dev))
1472 valleyview_set_rps(dev_priv->dev, new_delay);
1474 gen6_set_rps(dev_priv->dev, new_delay);
1476 mutex_unlock(&dev_priv->rps.hw_lock);
1481 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1483 * @work: workqueue struct
1485 * Doesn't actually do anything except notify userspace. As a consequence of
1486 * this event, userspace should try to remap the bad rows since statistically
1487 * it is likely the same row is more likely to go bad again.
1489 static void ivybridge_parity_work(struct work_struct *work)
1491 struct drm_i915_private *dev_priv =
1492 container_of(work, struct drm_i915_private, l3_parity.error_work);
1493 u32 error_status, row, bank, subbank;
1494 char *parity_event[6];
1496 unsigned long flags;
1499 /* We must turn off DOP level clock gating to access the L3 registers.
1500 * In order to prevent a get/put style interface, acquire struct mutex
1501 * any time we access those registers.
1503 mutex_lock(&dev_priv->dev->struct_mutex);
1505 /* If we've screwed up tracking, just let the interrupt fire again */
1506 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1509 misccpctl = I915_READ(GEN7_MISCCPCTL);
1510 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1511 POSTING_READ(GEN7_MISCCPCTL);
1513 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1517 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1520 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1522 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1524 error_status = I915_READ(reg);
1525 row = GEN7_PARITY_ERROR_ROW(error_status);
1526 bank = GEN7_PARITY_ERROR_BANK(error_status);
1527 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1529 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1532 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1533 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1534 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1535 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1536 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1537 parity_event[5] = NULL;
1539 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1540 KOBJ_CHANGE, parity_event);
1542 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1543 slice, row, bank, subbank);
1545 kfree(parity_event[4]);
1546 kfree(parity_event[3]);
1547 kfree(parity_event[2]);
1548 kfree(parity_event[1]);
1551 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1554 WARN_ON(dev_priv->l3_parity.which_slice);
1555 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1559 mutex_unlock(&dev_priv->dev->struct_mutex);
1562 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1564 struct drm_i915_private *dev_priv = dev->dev_private;
1566 if (!HAS_L3_DPF(dev))
1569 spin_lock(&dev_priv->irq_lock);
1570 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1571 spin_unlock(&dev_priv->irq_lock);
1573 iir &= GT_PARITY_ERROR(dev);
1574 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1575 dev_priv->l3_parity.which_slice |= 1 << 1;
1577 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1578 dev_priv->l3_parity.which_slice |= 1 << 0;
1580 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1583 static void ilk_gt_irq_handler(struct drm_device *dev,
1584 struct drm_i915_private *dev_priv,
1588 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1589 notify_ring(dev, &dev_priv->ring[RCS]);
1590 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1591 notify_ring(dev, &dev_priv->ring[VCS]);
1594 static void snb_gt_irq_handler(struct drm_device *dev,
1595 struct drm_i915_private *dev_priv,
1600 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1601 notify_ring(dev, &dev_priv->ring[RCS]);
1602 if (gt_iir & GT_BSD_USER_INTERRUPT)
1603 notify_ring(dev, &dev_priv->ring[VCS]);
1604 if (gt_iir & GT_BLT_USER_INTERRUPT)
1605 notify_ring(dev, &dev_priv->ring[BCS]);
1607 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1608 GT_BSD_CS_ERROR_INTERRUPT |
1609 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1610 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1614 if (gt_iir & GT_PARITY_ERROR(dev))
1615 ivybridge_parity_error_irq_handler(dev, gt_iir);
1618 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1620 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1623 spin_lock(&dev_priv->irq_lock);
1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1625 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1626 spin_unlock(&dev_priv->irq_lock);
1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
1631 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1632 struct drm_i915_private *dev_priv,
1637 irqreturn_t ret = IRQ_NONE;
1639 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1640 tmp = I915_READ(GEN8_GT_IIR(0));
1642 I915_WRITE(GEN8_GT_IIR(0), tmp);
1644 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1646 if (rcs & GT_RENDER_USER_INTERRUPT)
1647 notify_ring(dev, &dev_priv->ring[RCS]);
1648 if (bcs & GT_RENDER_USER_INTERRUPT)
1649 notify_ring(dev, &dev_priv->ring[BCS]);
1650 if ((rcs | bcs) & GT_CONTEXT_SWITCH_INTERRUPT)
1651 DRM_DEBUG_DRIVER("TODO: Context switch\n");
1653 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1656 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1657 tmp = I915_READ(GEN8_GT_IIR(1));
1659 I915_WRITE(GEN8_GT_IIR(1), tmp);
1661 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1662 if (vcs & GT_RENDER_USER_INTERRUPT)
1663 notify_ring(dev, &dev_priv->ring[VCS]);
1664 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1665 DRM_DEBUG_DRIVER("TODO: Context switch\n");
1666 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1667 if (vcs & GT_RENDER_USER_INTERRUPT)
1668 notify_ring(dev, &dev_priv->ring[VCS2]);
1669 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1670 DRM_DEBUG_DRIVER("TODO: Context switch\n");
1672 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1675 if (master_ctl & GEN8_GT_PM_IRQ) {
1676 tmp = I915_READ(GEN8_GT_IIR(2));
1677 if (tmp & dev_priv->pm_rps_events) {
1678 I915_WRITE(GEN8_GT_IIR(2),
1679 tmp & dev_priv->pm_rps_events);
1681 gen8_rps_irq_handler(dev_priv, tmp);
1683 DRM_ERROR("The master control interrupt lied (PM)!\n");
1686 if (master_ctl & GEN8_GT_VECS_IRQ) {
1687 tmp = I915_READ(GEN8_GT_IIR(3));
1689 I915_WRITE(GEN8_GT_IIR(3), tmp);
1691 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1692 if (vcs & GT_RENDER_USER_INTERRUPT)
1693 notify_ring(dev, &dev_priv->ring[VECS]);
1694 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1695 DRM_DEBUG_DRIVER("TODO: Context switch\n");
1697 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1703 #define HPD_STORM_DETECT_PERIOD 1000
1704 #define HPD_STORM_THRESHOLD 5
1706 static int ilk_port_to_hotplug_shift(enum port port)
1722 static int g4x_port_to_hotplug_shift(enum port port)
1738 static inline enum port get_port_from_pin(enum hpd_pin pin)
1748 return PORT_A; /* no hpd */
1752 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1753 u32 hotplug_trigger,
1754 u32 dig_hotplug_reg,
1757 struct drm_i915_private *dev_priv = dev->dev_private;
1760 bool storm_detected = false;
1761 bool queue_dig = false, queue_hp = false;
1763 u32 dig_port_mask = 0;
1765 if (!hotplug_trigger)
1768 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1769 hotplug_trigger, dig_hotplug_reg);
1771 spin_lock(&dev_priv->irq_lock);
1772 for (i = 1; i < HPD_NUM_PINS; i++) {
1773 if (!(hpd[i] & hotplug_trigger))
1776 port = get_port_from_pin(i);
1777 if (port && dev_priv->hpd_irq_port[port]) {
1781 dig_shift = g4x_port_to_hotplug_shift(port);
1782 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1784 dig_shift = ilk_port_to_hotplug_shift(port);
1785 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1788 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1790 long_hpd ? "long" : "short");
1791 /* for long HPD pulses we want to have the digital queue happen,
1792 but we still want HPD storm detection to function. */
1794 dev_priv->long_hpd_port_mask |= (1 << port);
1795 dig_port_mask |= hpd[i];
1797 /* for short HPD just trigger the digital queue */
1798 dev_priv->short_hpd_port_mask |= (1 << port);
1799 hotplug_trigger &= ~hpd[i];
1805 for (i = 1; i < HPD_NUM_PINS; i++) {
1806 if (hpd[i] & hotplug_trigger &&
1807 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1809 * On GMCH platforms the interrupt mask bits only
1810 * prevent irq generation, not the setting of the
1811 * hotplug bits itself. So only WARN about unexpected
1812 * interrupts on saner platforms.
1814 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1815 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1816 hotplug_trigger, i, hpd[i]);
1821 if (!(hpd[i] & hotplug_trigger) ||
1822 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1825 if (!(dig_port_mask & hpd[i])) {
1826 dev_priv->hpd_event_bits |= (1 << i);
1830 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1831 dev_priv->hpd_stats[i].hpd_last_jiffies
1832 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1833 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1834 dev_priv->hpd_stats[i].hpd_cnt = 0;
1835 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1836 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1837 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1838 dev_priv->hpd_event_bits &= ~(1 << i);
1839 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1840 storm_detected = true;
1842 dev_priv->hpd_stats[i].hpd_cnt++;
1843 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1844 dev_priv->hpd_stats[i].hpd_cnt);
1849 dev_priv->display.hpd_irq_setup(dev);
1850 spin_unlock(&dev_priv->irq_lock);
1853 * Our hotplug handler can grab modeset locks (by calling down into the
1854 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1855 * queue for otherwise the flush_work in the pageflip code will
1859 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1861 schedule_work(&dev_priv->hotplug_work);
1864 static void gmbus_irq_handler(struct drm_device *dev)
1866 struct drm_i915_private *dev_priv = dev->dev_private;
1868 wake_up_all(&dev_priv->gmbus_wait_queue);
1871 static void dp_aux_irq_handler(struct drm_device *dev)
1873 struct drm_i915_private *dev_priv = dev->dev_private;
1875 wake_up_all(&dev_priv->gmbus_wait_queue);
1878 #if defined(CONFIG_DEBUG_FS)
1879 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1880 uint32_t crc0, uint32_t crc1,
1881 uint32_t crc2, uint32_t crc3,
1884 struct drm_i915_private *dev_priv = dev->dev_private;
1885 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1886 struct intel_pipe_crc_entry *entry;
1889 spin_lock(&pipe_crc->lock);
1891 if (!pipe_crc->entries) {
1892 spin_unlock(&pipe_crc->lock);
1893 DRM_ERROR("spurious interrupt\n");
1897 head = pipe_crc->head;
1898 tail = pipe_crc->tail;
1900 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1901 spin_unlock(&pipe_crc->lock);
1902 DRM_ERROR("CRC buffer overflowing\n");
1906 entry = &pipe_crc->entries[head];
1908 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1909 entry->crc[0] = crc0;
1910 entry->crc[1] = crc1;
1911 entry->crc[2] = crc2;
1912 entry->crc[3] = crc3;
1913 entry->crc[4] = crc4;
1915 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1916 pipe_crc->head = head;
1918 spin_unlock(&pipe_crc->lock);
1920 wake_up_interruptible(&pipe_crc->wq);
1924 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1925 uint32_t crc0, uint32_t crc1,
1926 uint32_t crc2, uint32_t crc3,
1931 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1933 struct drm_i915_private *dev_priv = dev->dev_private;
1935 display_pipe_crc_irq_handler(dev, pipe,
1936 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1940 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1942 struct drm_i915_private *dev_priv = dev->dev_private;
1944 display_pipe_crc_irq_handler(dev, pipe,
1945 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1946 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1947 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1948 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1949 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1952 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1954 struct drm_i915_private *dev_priv = dev->dev_private;
1955 uint32_t res1, res2;
1957 if (INTEL_INFO(dev)->gen >= 3)
1958 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1962 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1963 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1967 display_pipe_crc_irq_handler(dev, pipe,
1968 I915_READ(PIPE_CRC_RES_RED(pipe)),
1969 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1970 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1974 /* The RPS events need forcewake, so we add them to a work queue and mask their
1975 * IMR bits until the work is done. Other interrupts can be processed without
1976 * the work queue. */
1977 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1979 if (pm_iir & dev_priv->pm_rps_events) {
1980 spin_lock(&dev_priv->irq_lock);
1981 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1982 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1983 spin_unlock(&dev_priv->irq_lock);
1985 queue_work(dev_priv->wq, &dev_priv->rps.work);
1988 if (HAS_VEBOX(dev_priv->dev)) {
1989 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1990 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1992 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1993 i915_handle_error(dev_priv->dev, false,
1994 "VEBOX CS error interrupt 0x%08x",
2000 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
2002 if (!drm_handle_vblank(dev, pipe))
2008 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2010 struct drm_i915_private *dev_priv = dev->dev_private;
2011 u32 pipe_stats[I915_MAX_PIPES] = { };
2014 spin_lock(&dev_priv->irq_lock);
2015 for_each_pipe(pipe) {
2017 u32 mask, iir_bit = 0;
2020 * PIPESTAT bits get signalled even when the interrupt is
2021 * disabled with the mask bits, and some of the status bits do
2022 * not generate interrupts at all (like the underrun bit). Hence
2023 * we need to be careful that we only handle what we want to
2027 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2028 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2032 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2035 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2038 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2042 mask |= dev_priv->pipestat_irq_mask[pipe];
2047 reg = PIPESTAT(pipe);
2048 mask |= PIPESTAT_INT_ENABLE_MASK;
2049 pipe_stats[pipe] = I915_READ(reg) & mask;
2052 * Clear the PIPE*STAT regs before the IIR
2054 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2055 PIPESTAT_INT_STATUS_MASK))
2056 I915_WRITE(reg, pipe_stats[pipe]);
2058 spin_unlock(&dev_priv->irq_lock);
2060 for_each_pipe(pipe) {
2061 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2062 intel_pipe_handle_vblank(dev, pipe);
2064 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2065 intel_prepare_page_flip(dev, pipe);
2066 intel_finish_page_flip(dev, pipe);
2069 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2070 i9xx_pipe_crc_irq_handler(dev, pipe);
2072 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2073 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2074 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2077 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2078 gmbus_irq_handler(dev);
2081 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2083 struct drm_i915_private *dev_priv = dev->dev_private;
2084 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2086 if (hotplug_status) {
2087 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2089 * Make sure hotplug status is cleared before we clear IIR, or else we
2090 * may miss hotplug events.
2092 POSTING_READ(PORT_HOTPLUG_STAT);
2095 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2097 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2099 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2101 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2104 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2105 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2106 dp_aux_irq_handler(dev);
2110 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2112 struct drm_device *dev = arg;
2113 struct drm_i915_private *dev_priv = dev->dev_private;
2114 u32 iir, gt_iir, pm_iir;
2115 irqreturn_t ret = IRQ_NONE;
2118 /* Find, clear, then process each source of interrupt */
2120 gt_iir = I915_READ(GTIIR);
2122 I915_WRITE(GTIIR, gt_iir);
2124 pm_iir = I915_READ(GEN6_PMIIR);
2126 I915_WRITE(GEN6_PMIIR, pm_iir);
2128 iir = I915_READ(VLV_IIR);
2130 /* Consume port before clearing IIR or we'll miss events */
2131 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2132 i9xx_hpd_irq_handler(dev);
2133 I915_WRITE(VLV_IIR, iir);
2136 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2142 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2144 gen6_rps_irq_handler(dev_priv, pm_iir);
2145 /* Call regardless, as some status bits might not be
2146 * signalled in iir */
2147 valleyview_pipestat_irq_handler(dev, iir);
2154 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2156 struct drm_device *dev = arg;
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 u32 master_ctl, iir;
2159 irqreturn_t ret = IRQ_NONE;
2162 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2163 iir = I915_READ(VLV_IIR);
2165 if (master_ctl == 0 && iir == 0)
2170 I915_WRITE(GEN8_MASTER_IRQ, 0);
2172 /* Find, clear, then process each source of interrupt */
2175 /* Consume port before clearing IIR or we'll miss events */
2176 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2177 i9xx_hpd_irq_handler(dev);
2178 I915_WRITE(VLV_IIR, iir);
2181 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2183 /* Call regardless, as some status bits might not be
2184 * signalled in iir */
2185 valleyview_pipestat_irq_handler(dev, iir);
2187 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2188 POSTING_READ(GEN8_MASTER_IRQ);
2194 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2196 struct drm_i915_private *dev_priv = dev->dev_private;
2198 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2199 u32 dig_hotplug_reg;
2201 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2202 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2204 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2206 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2207 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2208 SDE_AUDIO_POWER_SHIFT);
2209 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2213 if (pch_iir & SDE_AUX_MASK)
2214 dp_aux_irq_handler(dev);
2216 if (pch_iir & SDE_GMBUS)
2217 gmbus_irq_handler(dev);
2219 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2220 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2222 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2223 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2225 if (pch_iir & SDE_POISON)
2226 DRM_ERROR("PCH poison interrupt\n");
2228 if (pch_iir & SDE_FDI_MASK)
2230 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2232 I915_READ(FDI_RX_IIR(pipe)));
2234 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2235 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2237 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2238 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2240 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2241 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2243 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2245 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2246 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2248 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2251 static void ivb_err_int_handler(struct drm_device *dev)
2253 struct drm_i915_private *dev_priv = dev->dev_private;
2254 u32 err_int = I915_READ(GEN7_ERR_INT);
2257 if (err_int & ERR_INT_POISON)
2258 DRM_ERROR("Poison interrupt\n");
2260 for_each_pipe(pipe) {
2261 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2262 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2264 DRM_ERROR("Pipe %c FIFO underrun\n",
2268 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2269 if (IS_IVYBRIDGE(dev))
2270 ivb_pipe_crc_irq_handler(dev, pipe);
2272 hsw_pipe_crc_irq_handler(dev, pipe);
2276 I915_WRITE(GEN7_ERR_INT, err_int);
2279 static void cpt_serr_int_handler(struct drm_device *dev)
2281 struct drm_i915_private *dev_priv = dev->dev_private;
2282 u32 serr_int = I915_READ(SERR_INT);
2284 if (serr_int & SERR_INT_POISON)
2285 DRM_ERROR("PCH poison interrupt\n");
2287 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2288 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2290 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2292 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2293 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2295 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2297 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2298 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2300 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2302 I915_WRITE(SERR_INT, serr_int);
2305 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2307 struct drm_i915_private *dev_priv = dev->dev_private;
2309 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2310 u32 dig_hotplug_reg;
2312 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2313 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2315 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2317 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2318 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2319 SDE_AUDIO_POWER_SHIFT_CPT);
2320 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2324 if (pch_iir & SDE_AUX_MASK_CPT)
2325 dp_aux_irq_handler(dev);
2327 if (pch_iir & SDE_GMBUS_CPT)
2328 gmbus_irq_handler(dev);
2330 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2331 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2333 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2334 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2336 if (pch_iir & SDE_FDI_MASK_CPT)
2338 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2340 I915_READ(FDI_RX_IIR(pipe)));
2342 if (pch_iir & SDE_ERROR_CPT)
2343 cpt_serr_int_handler(dev);
2346 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2348 struct drm_i915_private *dev_priv = dev->dev_private;
2351 if (de_iir & DE_AUX_CHANNEL_A)
2352 dp_aux_irq_handler(dev);
2354 if (de_iir & DE_GSE)
2355 intel_opregion_asle_intr(dev);
2357 if (de_iir & DE_POISON)
2358 DRM_ERROR("Poison interrupt\n");
2360 for_each_pipe(pipe) {
2361 if (de_iir & DE_PIPE_VBLANK(pipe))
2362 intel_pipe_handle_vblank(dev, pipe);
2364 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2365 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2366 DRM_ERROR("Pipe %c FIFO underrun\n",
2369 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2370 i9xx_pipe_crc_irq_handler(dev, pipe);
2372 /* plane/pipes map 1:1 on ilk+ */
2373 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2374 intel_prepare_page_flip(dev, pipe);
2375 intel_finish_page_flip_plane(dev, pipe);
2379 /* check event from PCH */
2380 if (de_iir & DE_PCH_EVENT) {
2381 u32 pch_iir = I915_READ(SDEIIR);
2383 if (HAS_PCH_CPT(dev))
2384 cpt_irq_handler(dev, pch_iir);
2386 ibx_irq_handler(dev, pch_iir);
2388 /* should clear PCH hotplug event before clear CPU irq */
2389 I915_WRITE(SDEIIR, pch_iir);
2392 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2393 ironlake_rps_change_irq_handler(dev);
2396 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2398 struct drm_i915_private *dev_priv = dev->dev_private;
2401 if (de_iir & DE_ERR_INT_IVB)
2402 ivb_err_int_handler(dev);
2404 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2405 dp_aux_irq_handler(dev);
2407 if (de_iir & DE_GSE_IVB)
2408 intel_opregion_asle_intr(dev);
2410 for_each_pipe(pipe) {
2411 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2412 intel_pipe_handle_vblank(dev, pipe);
2414 /* plane/pipes map 1:1 on ilk+ */
2415 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2416 intel_prepare_page_flip(dev, pipe);
2417 intel_finish_page_flip_plane(dev, pipe);
2421 /* check event from PCH */
2422 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2423 u32 pch_iir = I915_READ(SDEIIR);
2425 cpt_irq_handler(dev, pch_iir);
2427 /* clear PCH hotplug event before clear CPU irq */
2428 I915_WRITE(SDEIIR, pch_iir);
2433 * To handle irqs with the minimum potential races with fresh interrupts, we:
2434 * 1 - Disable Master Interrupt Control.
2435 * 2 - Find the source(s) of the interrupt.
2436 * 3 - Clear the Interrupt Identity bits (IIR).
2437 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2438 * 5 - Re-enable Master Interrupt Control.
2440 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2442 struct drm_device *dev = arg;
2443 struct drm_i915_private *dev_priv = dev->dev_private;
2444 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2445 irqreturn_t ret = IRQ_NONE;
2447 /* We get interrupts on unclaimed registers, so check for this before we
2448 * do any I915_{READ,WRITE}. */
2449 intel_uncore_check_errors(dev);
2451 /* disable master interrupt before clearing iir */
2452 de_ier = I915_READ(DEIER);
2453 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2454 POSTING_READ(DEIER);
2456 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2457 * interrupts will will be stored on its back queue, and then we'll be
2458 * able to process them after we restore SDEIER (as soon as we restore
2459 * it, we'll get an interrupt if SDEIIR still has something to process
2460 * due to its back queue). */
2461 if (!HAS_PCH_NOP(dev)) {
2462 sde_ier = I915_READ(SDEIER);
2463 I915_WRITE(SDEIER, 0);
2464 POSTING_READ(SDEIER);
2467 /* Find, clear, then process each source of interrupt */
2469 gt_iir = I915_READ(GTIIR);
2471 I915_WRITE(GTIIR, gt_iir);
2473 if (INTEL_INFO(dev)->gen >= 6)
2474 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2476 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2479 de_iir = I915_READ(DEIIR);
2481 I915_WRITE(DEIIR, de_iir);
2483 if (INTEL_INFO(dev)->gen >= 7)
2484 ivb_display_irq_handler(dev, de_iir);
2486 ilk_display_irq_handler(dev, de_iir);
2489 if (INTEL_INFO(dev)->gen >= 6) {
2490 u32 pm_iir = I915_READ(GEN6_PMIIR);
2492 I915_WRITE(GEN6_PMIIR, pm_iir);
2494 gen6_rps_irq_handler(dev_priv, pm_iir);
2498 I915_WRITE(DEIER, de_ier);
2499 POSTING_READ(DEIER);
2500 if (!HAS_PCH_NOP(dev)) {
2501 I915_WRITE(SDEIER, sde_ier);
2502 POSTING_READ(SDEIER);
2508 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2510 struct drm_device *dev = arg;
2511 struct drm_i915_private *dev_priv = dev->dev_private;
2513 irqreturn_t ret = IRQ_NONE;
2517 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2518 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2522 I915_WRITE(GEN8_MASTER_IRQ, 0);
2523 POSTING_READ(GEN8_MASTER_IRQ);
2525 /* Find, clear, then process each source of interrupt */
2527 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2529 if (master_ctl & GEN8_DE_MISC_IRQ) {
2530 tmp = I915_READ(GEN8_DE_MISC_IIR);
2532 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2534 if (tmp & GEN8_DE_MISC_GSE)
2535 intel_opregion_asle_intr(dev);
2537 DRM_ERROR("Unexpected DE Misc interrupt\n");
2540 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2543 if (master_ctl & GEN8_DE_PORT_IRQ) {
2544 tmp = I915_READ(GEN8_DE_PORT_IIR);
2546 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2548 if (tmp & GEN8_AUX_CHANNEL_A)
2549 dp_aux_irq_handler(dev);
2551 DRM_ERROR("Unexpected DE Port interrupt\n");
2554 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2557 for_each_pipe(pipe) {
2560 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2563 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2566 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2567 if (pipe_iir & GEN8_PIPE_VBLANK)
2568 intel_pipe_handle_vblank(dev, pipe);
2570 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2571 intel_prepare_page_flip(dev, pipe);
2572 intel_finish_page_flip_plane(dev, pipe);
2575 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2576 hsw_pipe_crc_irq_handler(dev, pipe);
2578 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2579 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2581 DRM_ERROR("Pipe %c FIFO underrun\n",
2585 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2586 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2588 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2591 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2594 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2596 * FIXME(BDW): Assume for now that the new interrupt handling
2597 * scheme also closed the SDE interrupt handling race we've seen
2598 * on older pch-split platforms. But this needs testing.
2600 u32 pch_iir = I915_READ(SDEIIR);
2602 I915_WRITE(SDEIIR, pch_iir);
2604 cpt_irq_handler(dev, pch_iir);
2606 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2610 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2611 POSTING_READ(GEN8_MASTER_IRQ);
2616 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2617 bool reset_completed)
2619 struct intel_engine_cs *ring;
2623 * Notify all waiters for GPU completion events that reset state has
2624 * been changed, and that they need to restart their wait after
2625 * checking for potential errors (and bail out to drop locks if there is
2626 * a gpu reset pending so that i915_error_work_func can acquire them).
2629 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2630 for_each_ring(ring, dev_priv, i)
2631 wake_up_all(&ring->irq_queue);
2633 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2634 wake_up_all(&dev_priv->pending_flip_queue);
2637 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2638 * reset state is cleared.
2640 if (reset_completed)
2641 wake_up_all(&dev_priv->gpu_error.reset_queue);
2645 * i915_error_work_func - do process context error handling work
2646 * @work: work struct
2648 * Fire an error uevent so userspace can see that a hang or error
2651 static void i915_error_work_func(struct work_struct *work)
2653 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2655 struct drm_i915_private *dev_priv =
2656 container_of(error, struct drm_i915_private, gpu_error);
2657 struct drm_device *dev = dev_priv->dev;
2658 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2659 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2660 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2663 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2666 * Note that there's only one work item which does gpu resets, so we
2667 * need not worry about concurrent gpu resets potentially incrementing
2668 * error->reset_counter twice. We only need to take care of another
2669 * racing irq/hangcheck declaring the gpu dead for a second time. A
2670 * quick check for that is good enough: schedule_work ensures the
2671 * correct ordering between hang detection and this work item, and since
2672 * the reset in-progress bit is only ever set by code outside of this
2673 * work we don't need to worry about any other races.
2675 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2676 DRM_DEBUG_DRIVER("resetting chip\n");
2677 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2681 * In most cases it's guaranteed that we get here with an RPM
2682 * reference held, for example because there is a pending GPU
2683 * request that won't finish until the reset is done. This
2684 * isn't the case at least when we get here by doing a
2685 * simulated reset via debugs, so get an RPM reference.
2687 intel_runtime_pm_get(dev_priv);
2689 * All state reset _must_ be completed before we update the
2690 * reset counter, for otherwise waiters might miss the reset
2691 * pending state and not properly drop locks, resulting in
2692 * deadlocks with the reset work.
2694 ret = i915_reset(dev);
2696 intel_display_handle_reset(dev);
2698 intel_runtime_pm_put(dev_priv);
2702 * After all the gem state is reset, increment the reset
2703 * counter and wake up everyone waiting for the reset to
2706 * Since unlock operations are a one-sided barrier only,
2707 * we need to insert a barrier here to order any seqno
2709 * the counter increment.
2711 smp_mb__before_atomic();
2712 atomic_inc(&dev_priv->gpu_error.reset_counter);
2714 kobject_uevent_env(&dev->primary->kdev->kobj,
2715 KOBJ_CHANGE, reset_done_event);
2717 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2721 * Note: The wake_up also serves as a memory barrier so that
2722 * waiters see the update value of the reset counter atomic_t.
2724 i915_error_wake_up(dev_priv, true);
2728 static void i915_report_and_clear_eir(struct drm_device *dev)
2730 struct drm_i915_private *dev_priv = dev->dev_private;
2731 uint32_t instdone[I915_NUM_INSTDONE_REG];
2732 u32 eir = I915_READ(EIR);
2738 pr_err("render error detected, EIR: 0x%08x\n", eir);
2740 i915_get_extra_instdone(dev, instdone);
2743 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2744 u32 ipeir = I915_READ(IPEIR_I965);
2746 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2747 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2748 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2749 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2750 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2751 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2752 I915_WRITE(IPEIR_I965, ipeir);
2753 POSTING_READ(IPEIR_I965);
2755 if (eir & GM45_ERROR_PAGE_TABLE) {
2756 u32 pgtbl_err = I915_READ(PGTBL_ER);
2757 pr_err("page table error\n");
2758 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2759 I915_WRITE(PGTBL_ER, pgtbl_err);
2760 POSTING_READ(PGTBL_ER);
2764 if (!IS_GEN2(dev)) {
2765 if (eir & I915_ERROR_PAGE_TABLE) {
2766 u32 pgtbl_err = I915_READ(PGTBL_ER);
2767 pr_err("page table error\n");
2768 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2769 I915_WRITE(PGTBL_ER, pgtbl_err);
2770 POSTING_READ(PGTBL_ER);
2774 if (eir & I915_ERROR_MEMORY_REFRESH) {
2775 pr_err("memory refresh error:\n");
2777 pr_err("pipe %c stat: 0x%08x\n",
2778 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2779 /* pipestat has already been acked */
2781 if (eir & I915_ERROR_INSTRUCTION) {
2782 pr_err("instruction error\n");
2783 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2784 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2785 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2786 if (INTEL_INFO(dev)->gen < 4) {
2787 u32 ipeir = I915_READ(IPEIR);
2789 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2790 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2791 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2792 I915_WRITE(IPEIR, ipeir);
2793 POSTING_READ(IPEIR);
2795 u32 ipeir = I915_READ(IPEIR_I965);
2797 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2798 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2799 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2800 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2801 I915_WRITE(IPEIR_I965, ipeir);
2802 POSTING_READ(IPEIR_I965);
2806 I915_WRITE(EIR, eir);
2808 eir = I915_READ(EIR);
2811 * some errors might have become stuck,
2814 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2815 I915_WRITE(EMR, I915_READ(EMR) | eir);
2816 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2821 * i915_handle_error - handle an error interrupt
2824 * Do some basic checking of regsiter state at error interrupt time and
2825 * dump it to the syslog. Also call i915_capture_error_state() to make
2826 * sure we get a record and make it available in debugfs. Fire a uevent
2827 * so userspace knows something bad happened (should trigger collection
2828 * of a ring dump etc.).
2830 void i915_handle_error(struct drm_device *dev, bool wedged,
2831 const char *fmt, ...)
2833 struct drm_i915_private *dev_priv = dev->dev_private;
2837 va_start(args, fmt);
2838 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2841 i915_capture_error_state(dev, wedged, error_msg);
2842 i915_report_and_clear_eir(dev);
2845 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2846 &dev_priv->gpu_error.reset_counter);
2849 * Wakeup waiting processes so that the reset work function
2850 * i915_error_work_func doesn't deadlock trying to grab various
2851 * locks. By bumping the reset counter first, the woken
2852 * processes will see a reset in progress and back off,
2853 * releasing their locks and then wait for the reset completion.
2854 * We must do this for _all_ gpu waiters that might hold locks
2855 * that the reset work needs to acquire.
2857 * Note: The wake_up serves as the required memory barrier to
2858 * ensure that the waiters see the updated value of the reset
2861 i915_error_wake_up(dev_priv, false);
2865 * Our reset work can grab modeset locks (since it needs to reset the
2866 * state of outstanding pagelips). Hence it must not be run on our own
2867 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2868 * code will deadlock.
2870 schedule_work(&dev_priv->gpu_error.work);
2873 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2875 struct drm_i915_private *dev_priv = dev->dev_private;
2876 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2878 struct drm_i915_gem_object *obj;
2879 struct intel_unpin_work *work;
2880 unsigned long flags;
2881 bool stall_detected;
2883 /* Ignore early vblank irqs */
2884 if (intel_crtc == NULL)
2887 spin_lock_irqsave(&dev->event_lock, flags);
2888 work = intel_crtc->unpin_work;
2891 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2892 !work->enable_stall_check) {
2893 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2894 spin_unlock_irqrestore(&dev->event_lock, flags);
2898 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2899 obj = work->pending_flip_obj;
2900 if (INTEL_INFO(dev)->gen >= 4) {
2901 int dspsurf = DSPSURF(intel_crtc->plane);
2902 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2903 i915_gem_obj_ggtt_offset(obj);
2905 int dspaddr = DSPADDR(intel_crtc->plane);
2906 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2907 crtc->y * crtc->primary->fb->pitches[0] +
2908 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2911 spin_unlock_irqrestore(&dev->event_lock, flags);
2913 if (stall_detected) {
2914 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2915 intel_prepare_page_flip(dev, intel_crtc->plane);
2919 /* Called from drm generic code, passed 'crtc' which
2920 * we use as a pipe index
2922 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2924 struct drm_i915_private *dev_priv = dev->dev_private;
2925 unsigned long irqflags;
2927 if (!i915_pipe_enabled(dev, pipe))
2930 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2931 if (INTEL_INFO(dev)->gen >= 4)
2932 i915_enable_pipestat(dev_priv, pipe,
2933 PIPE_START_VBLANK_INTERRUPT_STATUS);
2935 i915_enable_pipestat(dev_priv, pipe,
2936 PIPE_VBLANK_INTERRUPT_STATUS);
2937 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2942 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2944 struct drm_i915_private *dev_priv = dev->dev_private;
2945 unsigned long irqflags;
2946 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2947 DE_PIPE_VBLANK(pipe);
2949 if (!i915_pipe_enabled(dev, pipe))
2952 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2953 ironlake_enable_display_irq(dev_priv, bit);
2954 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2959 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2961 struct drm_i915_private *dev_priv = dev->dev_private;
2962 unsigned long irqflags;
2964 if (!i915_pipe_enabled(dev, pipe))
2967 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2968 i915_enable_pipestat(dev_priv, pipe,
2969 PIPE_START_VBLANK_INTERRUPT_STATUS);
2970 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2975 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2977 struct drm_i915_private *dev_priv = dev->dev_private;
2978 unsigned long irqflags;
2980 if (!i915_pipe_enabled(dev, pipe))
2983 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2984 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2985 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2986 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2987 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2991 /* Called from drm generic code, passed 'crtc' which
2992 * we use as a pipe index
2994 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2996 struct drm_i915_private *dev_priv = dev->dev_private;
2997 unsigned long irqflags;
2999 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3000 i915_disable_pipestat(dev_priv, pipe,
3001 PIPE_VBLANK_INTERRUPT_STATUS |
3002 PIPE_START_VBLANK_INTERRUPT_STATUS);
3003 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3006 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
3008 struct drm_i915_private *dev_priv = dev->dev_private;
3009 unsigned long irqflags;
3010 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
3011 DE_PIPE_VBLANK(pipe);
3013 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3014 ironlake_disable_display_irq(dev_priv, bit);
3015 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3018 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3020 struct drm_i915_private *dev_priv = dev->dev_private;
3021 unsigned long irqflags;
3023 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3024 i915_disable_pipestat(dev_priv, pipe,
3025 PIPE_START_VBLANK_INTERRUPT_STATUS);
3026 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3029 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3031 struct drm_i915_private *dev_priv = dev->dev_private;
3032 unsigned long irqflags;
3034 if (!i915_pipe_enabled(dev, pipe))
3037 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3038 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3039 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3040 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3041 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3045 ring_last_seqno(struct intel_engine_cs *ring)
3047 return list_entry(ring->request_list.prev,
3048 struct drm_i915_gem_request, list)->seqno;
3052 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3054 return (list_empty(&ring->request_list) ||
3055 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3059 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3061 if (INTEL_INFO(dev)->gen >= 8) {
3062 return (ipehr >> 23) == 0x1c;
3064 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3065 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3066 MI_SEMAPHORE_REGISTER);
3070 static struct intel_engine_cs *
3071 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3073 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3074 struct intel_engine_cs *signaller;
3077 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3078 for_each_ring(signaller, dev_priv, i) {
3079 if (ring == signaller)
3082 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3086 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3088 for_each_ring(signaller, dev_priv, i) {
3089 if(ring == signaller)
3092 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3097 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3098 ring->id, ipehr, offset);
3103 static struct intel_engine_cs *
3104 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3106 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3107 u32 cmd, ipehr, head;
3111 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3112 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3116 * HEAD is likely pointing to the dword after the actual command,
3117 * so scan backwards until we find the MBOX. But limit it to just 3
3118 * or 4 dwords depending on the semaphore wait command size.
3119 * Note that we don't care about ACTHD here since that might
3120 * point at at batch, and semaphores are always emitted into the
3121 * ringbuffer itself.
3123 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3124 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3126 for (i = backwards; i; --i) {
3128 * Be paranoid and presume the hw has gone off into the wild -
3129 * our ring is smaller than what the hardware (and hence
3130 * HEAD_ADDR) allows. Also handles wrap-around.
3132 head &= ring->buffer->size - 1;
3134 /* This here seems to blow up */
3135 cmd = ioread32(ring->buffer->virtual_start + head);
3145 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3146 if (INTEL_INFO(ring->dev)->gen >= 8) {
3147 offset = ioread32(ring->buffer->virtual_start + head + 12);
3149 offset = ioread32(ring->buffer->virtual_start + head + 8);
3151 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3154 static int semaphore_passed(struct intel_engine_cs *ring)
3156 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3157 struct intel_engine_cs *signaller;
3160 ring->hangcheck.deadlock++;
3162 signaller = semaphore_waits_for(ring, &seqno);
3163 if (signaller == NULL)
3166 /* Prevent pathological recursion due to driver bugs */
3167 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3170 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3173 /* cursory check for an unkickable deadlock */
3174 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3175 semaphore_passed(signaller) < 0)
3181 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3183 struct intel_engine_cs *ring;
3186 for_each_ring(ring, dev_priv, i)
3187 ring->hangcheck.deadlock = 0;
3190 static enum intel_ring_hangcheck_action
3191 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3193 struct drm_device *dev = ring->dev;
3194 struct drm_i915_private *dev_priv = dev->dev_private;
3197 if (acthd != ring->hangcheck.acthd) {
3198 if (acthd > ring->hangcheck.max_acthd) {
3199 ring->hangcheck.max_acthd = acthd;
3200 return HANGCHECK_ACTIVE;
3203 return HANGCHECK_ACTIVE_LOOP;
3207 return HANGCHECK_HUNG;
3209 /* Is the chip hanging on a WAIT_FOR_EVENT?
3210 * If so we can simply poke the RB_WAIT bit
3211 * and break the hang. This should work on
3212 * all but the second generation chipsets.
3214 tmp = I915_READ_CTL(ring);
3215 if (tmp & RING_WAIT) {
3216 i915_handle_error(dev, false,
3217 "Kicking stuck wait on %s",
3219 I915_WRITE_CTL(ring, tmp);
3220 return HANGCHECK_KICK;
3223 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3224 switch (semaphore_passed(ring)) {
3226 return HANGCHECK_HUNG;
3228 i915_handle_error(dev, false,
3229 "Kicking stuck semaphore on %s",
3231 I915_WRITE_CTL(ring, tmp);
3232 return HANGCHECK_KICK;
3234 return HANGCHECK_WAIT;
3238 return HANGCHECK_HUNG;
3242 * This is called when the chip hasn't reported back with completed
3243 * batchbuffers in a long time. We keep track per ring seqno progress and
3244 * if there are no progress, hangcheck score for that ring is increased.
3245 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3246 * we kick the ring. If we see no progress on three subsequent calls
3247 * we assume chip is wedged and try to fix it by resetting the chip.
3249 static void i915_hangcheck_elapsed(unsigned long data)
3251 struct drm_device *dev = (struct drm_device *)data;
3252 struct drm_i915_private *dev_priv = dev->dev_private;
3253 struct intel_engine_cs *ring;
3255 int busy_count = 0, rings_hung = 0;
3256 bool stuck[I915_NUM_RINGS] = { 0 };
3261 if (!i915.enable_hangcheck)
3264 for_each_ring(ring, dev_priv, i) {
3269 semaphore_clear_deadlocks(dev_priv);
3271 seqno = ring->get_seqno(ring, false);
3272 acthd = intel_ring_get_active_head(ring);
3274 if (ring->hangcheck.seqno == seqno) {
3275 if (ring_idle(ring, seqno)) {
3276 ring->hangcheck.action = HANGCHECK_IDLE;
3278 if (waitqueue_active(&ring->irq_queue)) {
3279 /* Issue a wake-up to catch stuck h/w. */
3280 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3281 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3282 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3285 DRM_INFO("Fake missed irq on %s\n",
3287 wake_up_all(&ring->irq_queue);
3289 /* Safeguard against driver failure */
3290 ring->hangcheck.score += BUSY;
3294 /* We always increment the hangcheck score
3295 * if the ring is busy and still processing
3296 * the same request, so that no single request
3297 * can run indefinitely (such as a chain of
3298 * batches). The only time we do not increment
3299 * the hangcheck score on this ring, if this
3300 * ring is in a legitimate wait for another
3301 * ring. In that case the waiting ring is a
3302 * victim and we want to be sure we catch the
3303 * right culprit. Then every time we do kick
3304 * the ring, add a small increment to the
3305 * score so that we can catch a batch that is
3306 * being repeatedly kicked and so responsible
3307 * for stalling the machine.
3309 ring->hangcheck.action = ring_stuck(ring,
3312 switch (ring->hangcheck.action) {
3313 case HANGCHECK_IDLE:
3314 case HANGCHECK_WAIT:
3315 case HANGCHECK_ACTIVE:
3317 case HANGCHECK_ACTIVE_LOOP:
3318 ring->hangcheck.score += BUSY;
3320 case HANGCHECK_KICK:
3321 ring->hangcheck.score += KICK;
3323 case HANGCHECK_HUNG:
3324 ring->hangcheck.score += HUNG;
3330 ring->hangcheck.action = HANGCHECK_ACTIVE;
3332 /* Gradually reduce the count so that we catch DoS
3333 * attempts across multiple batches.
3335 if (ring->hangcheck.score > 0)
3336 ring->hangcheck.score--;
3338 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3341 ring->hangcheck.seqno = seqno;
3342 ring->hangcheck.acthd = acthd;
3346 for_each_ring(ring, dev_priv, i) {
3347 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3348 DRM_INFO("%s on %s\n",
3349 stuck[i] ? "stuck" : "no progress",
3356 return i915_handle_error(dev, true, "Ring hung");
3359 /* Reset timer case chip hangs without another request
3361 i915_queue_hangcheck(dev);
3364 void i915_queue_hangcheck(struct drm_device *dev)
3366 struct drm_i915_private *dev_priv = dev->dev_private;
3367 if (!i915.enable_hangcheck)
3370 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3371 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3374 static void ibx_irq_reset(struct drm_device *dev)
3376 struct drm_i915_private *dev_priv = dev->dev_private;
3378 if (HAS_PCH_NOP(dev))
3381 GEN5_IRQ_RESET(SDE);
3383 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3384 I915_WRITE(SERR_INT, 0xffffffff);
3388 * SDEIER is also touched by the interrupt handler to work around missed PCH
3389 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3390 * instead we unconditionally enable all PCH interrupt sources here, but then
3391 * only unmask them as needed with SDEIMR.
3393 * This function needs to be called before interrupts are enabled.
3395 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3397 struct drm_i915_private *dev_priv = dev->dev_private;
3399 if (HAS_PCH_NOP(dev))
3402 WARN_ON(I915_READ(SDEIER) != 0);
3403 I915_WRITE(SDEIER, 0xffffffff);
3404 POSTING_READ(SDEIER);
3407 static void gen5_gt_irq_reset(struct drm_device *dev)
3409 struct drm_i915_private *dev_priv = dev->dev_private;
3412 if (INTEL_INFO(dev)->gen >= 6)
3413 GEN5_IRQ_RESET(GEN6_PM);
3418 static void ironlake_irq_reset(struct drm_device *dev)
3420 struct drm_i915_private *dev_priv = dev->dev_private;
3422 I915_WRITE(HWSTAM, 0xffffffff);
3426 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3428 gen5_gt_irq_reset(dev);
3433 static void valleyview_irq_preinstall(struct drm_device *dev)
3435 struct drm_i915_private *dev_priv = dev->dev_private;
3439 I915_WRITE(VLV_IMR, 0);
3440 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3441 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3442 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3445 I915_WRITE(GTIIR, I915_READ(GTIIR));
3446 I915_WRITE(GTIIR, I915_READ(GTIIR));
3448 gen5_gt_irq_reset(dev);
3450 I915_WRITE(DPINVGTT, 0xff);
3452 I915_WRITE(PORT_HOTPLUG_EN, 0);
3453 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3455 I915_WRITE(PIPESTAT(pipe), 0xffff);
3456 I915_WRITE(VLV_IIR, 0xffffffff);
3457 I915_WRITE(VLV_IMR, 0xffffffff);
3458 I915_WRITE(VLV_IER, 0x0);
3459 POSTING_READ(VLV_IER);
3462 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3464 GEN8_IRQ_RESET_NDX(GT, 0);
3465 GEN8_IRQ_RESET_NDX(GT, 1);
3466 GEN8_IRQ_RESET_NDX(GT, 2);
3467 GEN8_IRQ_RESET_NDX(GT, 3);
3470 static void gen8_irq_reset(struct drm_device *dev)
3472 struct drm_i915_private *dev_priv = dev->dev_private;
3475 I915_WRITE(GEN8_MASTER_IRQ, 0);
3476 POSTING_READ(GEN8_MASTER_IRQ);
3478 gen8_gt_irq_reset(dev_priv);
3481 if (intel_display_power_enabled(dev_priv,
3482 POWER_DOMAIN_PIPE(pipe)))
3483 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3485 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3486 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3487 GEN5_IRQ_RESET(GEN8_PCU_);
3492 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3494 unsigned long irqflags;
3496 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3497 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3498 ~dev_priv->de_irq_mask[PIPE_B]);
3499 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3500 ~dev_priv->de_irq_mask[PIPE_C]);
3501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3504 static void cherryview_irq_preinstall(struct drm_device *dev)
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3509 I915_WRITE(GEN8_MASTER_IRQ, 0);
3510 POSTING_READ(GEN8_MASTER_IRQ);
3512 gen8_gt_irq_reset(dev_priv);
3514 GEN5_IRQ_RESET(GEN8_PCU_);
3516 POSTING_READ(GEN8_PCU_IIR);
3518 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3520 I915_WRITE(PORT_HOTPLUG_EN, 0);
3521 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3524 I915_WRITE(PIPESTAT(pipe), 0xffff);
3526 I915_WRITE(VLV_IMR, 0xffffffff);
3527 I915_WRITE(VLV_IER, 0x0);
3528 I915_WRITE(VLV_IIR, 0xffffffff);
3529 POSTING_READ(VLV_IIR);
3532 static void ibx_hpd_irq_setup(struct drm_device *dev)
3534 struct drm_i915_private *dev_priv = dev->dev_private;
3535 struct intel_encoder *intel_encoder;
3536 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3538 if (HAS_PCH_IBX(dev)) {
3539 hotplug_irqs = SDE_HOTPLUG_MASK;
3540 for_each_intel_encoder(dev, intel_encoder)
3541 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3542 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3544 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3545 for_each_intel_encoder(dev, intel_encoder)
3546 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3547 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3550 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3553 * Enable digital hotplug on the PCH, and configure the DP short pulse
3554 * duration to 2ms (which is the minimum in the Display Port spec)
3556 * This register is the same on all known PCH chips.
3558 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3559 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3560 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3561 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3562 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3563 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3566 static void ibx_irq_postinstall(struct drm_device *dev)
3568 struct drm_i915_private *dev_priv = dev->dev_private;
3571 if (HAS_PCH_NOP(dev))
3574 if (HAS_PCH_IBX(dev))
3575 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3577 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3579 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3580 I915_WRITE(SDEIMR, ~mask);
3583 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3585 struct drm_i915_private *dev_priv = dev->dev_private;
3586 u32 pm_irqs, gt_irqs;
3588 pm_irqs = gt_irqs = 0;
3590 dev_priv->gt_irq_mask = ~0;
3591 if (HAS_L3_DPF(dev)) {
3592 /* L3 parity interrupt is always unmasked. */
3593 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3594 gt_irqs |= GT_PARITY_ERROR(dev);
3597 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3599 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3600 ILK_BSD_USER_INTERRUPT;
3602 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3605 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3607 if (INTEL_INFO(dev)->gen >= 6) {
3608 pm_irqs |= dev_priv->pm_rps_events;
3611 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3613 dev_priv->pm_irq_mask = 0xffffffff;
3614 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3618 static int ironlake_irq_postinstall(struct drm_device *dev)
3620 unsigned long irqflags;
3621 struct drm_i915_private *dev_priv = dev->dev_private;
3622 u32 display_mask, extra_mask;
3624 if (INTEL_INFO(dev)->gen >= 7) {
3625 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3626 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3627 DE_PLANEB_FLIP_DONE_IVB |
3628 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3629 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3630 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3632 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3633 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3635 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3637 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3638 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3641 dev_priv->irq_mask = ~display_mask;
3643 I915_WRITE(HWSTAM, 0xeffe);
3645 ibx_irq_pre_postinstall(dev);
3647 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3649 gen5_gt_irq_postinstall(dev);
3651 ibx_irq_postinstall(dev);
3653 if (IS_IRONLAKE_M(dev)) {
3654 /* Enable PCU event interrupts
3656 * spinlocking not required here for correctness since interrupt
3657 * setup is guaranteed to run in single-threaded context. But we
3658 * need it to make the assert_spin_locked happy. */
3659 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3660 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3661 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3667 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3672 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3673 PIPE_FIFO_UNDERRUN_STATUS;
3675 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3676 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3677 POSTING_READ(PIPESTAT(PIPE_A));
3679 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3680 PIPE_CRC_DONE_INTERRUPT_STATUS;
3682 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3683 PIPE_GMBUS_INTERRUPT_STATUS);
3684 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3686 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3687 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3688 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3689 dev_priv->irq_mask &= ~iir_mask;
3691 I915_WRITE(VLV_IIR, iir_mask);
3692 I915_WRITE(VLV_IIR, iir_mask);
3693 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3694 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3695 POSTING_READ(VLV_IER);
3698 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3703 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3704 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3705 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3707 dev_priv->irq_mask |= iir_mask;
3708 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3709 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3710 I915_WRITE(VLV_IIR, iir_mask);
3711 I915_WRITE(VLV_IIR, iir_mask);
3712 POSTING_READ(VLV_IIR);
3714 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3715 PIPE_CRC_DONE_INTERRUPT_STATUS;
3717 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3718 PIPE_GMBUS_INTERRUPT_STATUS);
3719 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3721 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3722 PIPE_FIFO_UNDERRUN_STATUS;
3723 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3724 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3725 POSTING_READ(PIPESTAT(PIPE_A));
3728 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3730 assert_spin_locked(&dev_priv->irq_lock);
3732 if (dev_priv->display_irqs_enabled)
3735 dev_priv->display_irqs_enabled = true;
3737 if (dev_priv->dev->irq_enabled)
3738 valleyview_display_irqs_install(dev_priv);
3741 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3743 assert_spin_locked(&dev_priv->irq_lock);
3745 if (!dev_priv->display_irqs_enabled)
3748 dev_priv->display_irqs_enabled = false;
3750 if (dev_priv->dev->irq_enabled)
3751 valleyview_display_irqs_uninstall(dev_priv);
3754 static int valleyview_irq_postinstall(struct drm_device *dev)
3756 struct drm_i915_private *dev_priv = dev->dev_private;
3757 unsigned long irqflags;
3759 dev_priv->irq_mask = ~0;
3761 I915_WRITE(PORT_HOTPLUG_EN, 0);
3762 POSTING_READ(PORT_HOTPLUG_EN);
3764 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3765 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3766 I915_WRITE(VLV_IIR, 0xffffffff);
3767 POSTING_READ(VLV_IER);
3769 /* Interrupt setup is already guaranteed to be single-threaded, this is
3770 * just to make the assert_spin_locked check happy. */
3771 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3772 if (dev_priv->display_irqs_enabled)
3773 valleyview_display_irqs_install(dev_priv);
3774 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3776 I915_WRITE(VLV_IIR, 0xffffffff);
3777 I915_WRITE(VLV_IIR, 0xffffffff);
3779 gen5_gt_irq_postinstall(dev);
3781 /* ack & enable invalid PTE error interrupts */
3782 #if 0 /* FIXME: add support to irq handler for checking these bits */
3783 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3784 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3787 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3792 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3796 /* These are interrupts we'll toggle with the ring mask register */
3797 uint32_t gt_interrupts[] = {
3798 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3799 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3800 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3801 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3802 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3803 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3804 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3805 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3806 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3808 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3809 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3812 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3813 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3815 dev_priv->pm_irq_mask = 0xffffffff;
3818 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3820 struct drm_device *dev = dev_priv->dev;
3821 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3822 GEN8_PIPE_CDCLK_CRC_DONE |
3823 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3824 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3825 GEN8_PIPE_FIFO_UNDERRUN;
3827 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3828 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3829 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3832 if (intel_display_power_enabled(dev_priv,
3833 POWER_DOMAIN_PIPE(pipe)))
3834 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3835 dev_priv->de_irq_mask[pipe],
3838 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3841 static int gen8_irq_postinstall(struct drm_device *dev)
3843 struct drm_i915_private *dev_priv = dev->dev_private;
3845 ibx_irq_pre_postinstall(dev);
3847 gen8_gt_irq_postinstall(dev_priv);
3848 gen8_de_irq_postinstall(dev_priv);
3850 ibx_irq_postinstall(dev);
3852 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3853 POSTING_READ(GEN8_MASTER_IRQ);
3858 static int cherryview_irq_postinstall(struct drm_device *dev)
3860 struct drm_i915_private *dev_priv = dev->dev_private;
3861 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3862 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3863 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3864 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3865 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3866 PIPE_CRC_DONE_INTERRUPT_STATUS;
3867 unsigned long irqflags;
3871 * Leave vblank interrupts masked initially. enable/disable will
3872 * toggle them based on usage.
3874 dev_priv->irq_mask = ~enable_mask;
3877 I915_WRITE(PIPESTAT(pipe), 0xffff);
3879 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3880 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3882 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3883 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3885 I915_WRITE(VLV_IIR, 0xffffffff);
3886 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3887 I915_WRITE(VLV_IER, enable_mask);
3889 gen8_gt_irq_postinstall(dev_priv);
3891 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3892 POSTING_READ(GEN8_MASTER_IRQ);
3897 static void gen8_irq_uninstall(struct drm_device *dev)
3899 struct drm_i915_private *dev_priv = dev->dev_private;
3904 intel_hpd_irq_uninstall(dev_priv);
3906 gen8_irq_reset(dev);
3909 static void valleyview_irq_uninstall(struct drm_device *dev)
3911 struct drm_i915_private *dev_priv = dev->dev_private;
3912 unsigned long irqflags;
3918 I915_WRITE(VLV_MASTER_IER, 0);
3920 intel_hpd_irq_uninstall(dev_priv);
3923 I915_WRITE(PIPESTAT(pipe), 0xffff);
3925 I915_WRITE(HWSTAM, 0xffffffff);
3926 I915_WRITE(PORT_HOTPLUG_EN, 0);
3927 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3929 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3930 if (dev_priv->display_irqs_enabled)
3931 valleyview_display_irqs_uninstall(dev_priv);
3932 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3934 dev_priv->irq_mask = 0;
3936 I915_WRITE(VLV_IIR, 0xffffffff);
3937 I915_WRITE(VLV_IMR, 0xffffffff);
3938 I915_WRITE(VLV_IER, 0x0);
3939 POSTING_READ(VLV_IER);
3942 static void cherryview_irq_uninstall(struct drm_device *dev)
3944 struct drm_i915_private *dev_priv = dev->dev_private;
3950 I915_WRITE(GEN8_MASTER_IRQ, 0);
3951 POSTING_READ(GEN8_MASTER_IRQ);
3953 #define GEN8_IRQ_FINI_NDX(type, which) \
3955 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3956 I915_WRITE(GEN8_##type##_IER(which), 0); \
3957 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3958 POSTING_READ(GEN8_##type##_IIR(which)); \
3959 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3962 #define GEN8_IRQ_FINI(type) \
3964 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3965 I915_WRITE(GEN8_##type##_IER, 0); \
3966 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3967 POSTING_READ(GEN8_##type##_IIR); \
3968 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3971 GEN8_IRQ_FINI_NDX(GT, 0);
3972 GEN8_IRQ_FINI_NDX(GT, 1);
3973 GEN8_IRQ_FINI_NDX(GT, 2);
3974 GEN8_IRQ_FINI_NDX(GT, 3);
3978 #undef GEN8_IRQ_FINI
3979 #undef GEN8_IRQ_FINI_NDX
3981 I915_WRITE(PORT_HOTPLUG_EN, 0);
3982 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3985 I915_WRITE(PIPESTAT(pipe), 0xffff);
3987 I915_WRITE(VLV_IMR, 0xffffffff);
3988 I915_WRITE(VLV_IER, 0x0);
3989 I915_WRITE(VLV_IIR, 0xffffffff);
3990 POSTING_READ(VLV_IIR);
3993 static void ironlake_irq_uninstall(struct drm_device *dev)
3995 struct drm_i915_private *dev_priv = dev->dev_private;
4000 intel_hpd_irq_uninstall(dev_priv);
4002 ironlake_irq_reset(dev);
4005 static void i8xx_irq_preinstall(struct drm_device * dev)
4007 struct drm_i915_private *dev_priv = dev->dev_private;
4011 I915_WRITE(PIPESTAT(pipe), 0);
4012 I915_WRITE16(IMR, 0xffff);
4013 I915_WRITE16(IER, 0x0);
4014 POSTING_READ16(IER);
4017 static int i8xx_irq_postinstall(struct drm_device *dev)
4019 struct drm_i915_private *dev_priv = dev->dev_private;
4020 unsigned long irqflags;
4023 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4025 /* Unmask the interrupts that we always want on. */
4026 dev_priv->irq_mask =
4027 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4028 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4029 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4030 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4031 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4032 I915_WRITE16(IMR, dev_priv->irq_mask);
4035 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4036 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4037 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4038 I915_USER_INTERRUPT);
4039 POSTING_READ16(IER);
4041 /* Interrupt setup is already guaranteed to be single-threaded, this is
4042 * just to make the assert_spin_locked check happy. */
4043 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4044 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4045 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4046 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4052 * Returns true when a page flip has completed.
4054 static bool i8xx_handle_vblank(struct drm_device *dev,
4055 int plane, int pipe, u32 iir)
4057 struct drm_i915_private *dev_priv = dev->dev_private;
4058 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4060 if (!intel_pipe_handle_vblank(dev, pipe))
4063 if ((iir & flip_pending) == 0)
4066 intel_prepare_page_flip(dev, plane);
4068 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4069 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4070 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4071 * the flip is completed (no longer pending). Since this doesn't raise
4072 * an interrupt per se, we watch for the change at vblank.
4074 if (I915_READ16(ISR) & flip_pending)
4077 intel_finish_page_flip(dev, pipe);
4082 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4084 struct drm_device *dev = arg;
4085 struct drm_i915_private *dev_priv = dev->dev_private;
4088 unsigned long irqflags;
4091 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4092 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4094 iir = I915_READ16(IIR);
4098 while (iir & ~flip_mask) {
4099 /* Can't rely on pipestat interrupt bit in iir as it might
4100 * have been cleared after the pipestat interrupt was received.
4101 * It doesn't set the bit in iir again, but it still produces
4102 * interrupts (for non-MSI).
4104 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4105 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4106 i915_handle_error(dev, false,
4107 "Command parser error, iir 0x%08x",
4110 for_each_pipe(pipe) {
4111 int reg = PIPESTAT(pipe);
4112 pipe_stats[pipe] = I915_READ(reg);
4115 * Clear the PIPE*STAT regs before the IIR
4117 if (pipe_stats[pipe] & 0x8000ffff)
4118 I915_WRITE(reg, pipe_stats[pipe]);
4120 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4122 I915_WRITE16(IIR, iir & ~flip_mask);
4123 new_iir = I915_READ16(IIR); /* Flush posted writes */
4125 i915_update_dri1_breadcrumb(dev);
4127 if (iir & I915_USER_INTERRUPT)
4128 notify_ring(dev, &dev_priv->ring[RCS]);
4130 for_each_pipe(pipe) {
4135 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4136 i8xx_handle_vblank(dev, plane, pipe, iir))
4137 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4139 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4140 i9xx_pipe_crc_irq_handler(dev, pipe);
4142 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4143 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4144 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4153 static void i8xx_irq_uninstall(struct drm_device * dev)
4155 struct drm_i915_private *dev_priv = dev->dev_private;
4158 for_each_pipe(pipe) {
4159 /* Clear enable bits; then clear status bits */
4160 I915_WRITE(PIPESTAT(pipe), 0);
4161 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4163 I915_WRITE16(IMR, 0xffff);
4164 I915_WRITE16(IER, 0x0);
4165 I915_WRITE16(IIR, I915_READ16(IIR));
4168 static void i915_irq_preinstall(struct drm_device * dev)
4170 struct drm_i915_private *dev_priv = dev->dev_private;
4173 if (I915_HAS_HOTPLUG(dev)) {
4174 I915_WRITE(PORT_HOTPLUG_EN, 0);
4175 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4178 I915_WRITE16(HWSTAM, 0xeffe);
4180 I915_WRITE(PIPESTAT(pipe), 0);
4181 I915_WRITE(IMR, 0xffffffff);
4182 I915_WRITE(IER, 0x0);
4186 static int i915_irq_postinstall(struct drm_device *dev)
4188 struct drm_i915_private *dev_priv = dev->dev_private;
4190 unsigned long irqflags;
4192 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4194 /* Unmask the interrupts that we always want on. */
4195 dev_priv->irq_mask =
4196 ~(I915_ASLE_INTERRUPT |
4197 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4198 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4199 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4200 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4201 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4204 I915_ASLE_INTERRUPT |
4205 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4206 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4207 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4208 I915_USER_INTERRUPT;
4210 if (I915_HAS_HOTPLUG(dev)) {
4211 I915_WRITE(PORT_HOTPLUG_EN, 0);
4212 POSTING_READ(PORT_HOTPLUG_EN);
4214 /* Enable in IER... */
4215 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4216 /* and unmask in IMR */
4217 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4220 I915_WRITE(IMR, dev_priv->irq_mask);
4221 I915_WRITE(IER, enable_mask);
4224 i915_enable_asle_pipestat(dev);
4226 /* Interrupt setup is already guaranteed to be single-threaded, this is
4227 * just to make the assert_spin_locked check happy. */
4228 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4229 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4230 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4231 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4237 * Returns true when a page flip has completed.
4239 static bool i915_handle_vblank(struct drm_device *dev,
4240 int plane, int pipe, u32 iir)
4242 struct drm_i915_private *dev_priv = dev->dev_private;
4243 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4245 if (!intel_pipe_handle_vblank(dev, pipe))
4248 if ((iir & flip_pending) == 0)
4251 intel_prepare_page_flip(dev, plane);
4253 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4254 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4255 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4256 * the flip is completed (no longer pending). Since this doesn't raise
4257 * an interrupt per se, we watch for the change at vblank.
4259 if (I915_READ(ISR) & flip_pending)
4262 intel_finish_page_flip(dev, pipe);
4267 static irqreturn_t i915_irq_handler(int irq, void *arg)
4269 struct drm_device *dev = arg;
4270 struct drm_i915_private *dev_priv = dev->dev_private;
4271 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4272 unsigned long irqflags;
4274 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4275 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4276 int pipe, ret = IRQ_NONE;
4278 iir = I915_READ(IIR);
4280 bool irq_received = (iir & ~flip_mask) != 0;
4281 bool blc_event = false;
4283 /* Can't rely on pipestat interrupt bit in iir as it might
4284 * have been cleared after the pipestat interrupt was received.
4285 * It doesn't set the bit in iir again, but it still produces
4286 * interrupts (for non-MSI).
4288 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4289 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4290 i915_handle_error(dev, false,
4291 "Command parser error, iir 0x%08x",
4294 for_each_pipe(pipe) {
4295 int reg = PIPESTAT(pipe);
4296 pipe_stats[pipe] = I915_READ(reg);
4298 /* Clear the PIPE*STAT regs before the IIR */
4299 if (pipe_stats[pipe] & 0x8000ffff) {
4300 I915_WRITE(reg, pipe_stats[pipe]);
4301 irq_received = true;
4304 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4309 /* Consume port. Then clear IIR or we'll miss events */
4310 if (I915_HAS_HOTPLUG(dev) &&
4311 iir & I915_DISPLAY_PORT_INTERRUPT)
4312 i9xx_hpd_irq_handler(dev);
4314 I915_WRITE(IIR, iir & ~flip_mask);
4315 new_iir = I915_READ(IIR); /* Flush posted writes */
4317 if (iir & I915_USER_INTERRUPT)
4318 notify_ring(dev, &dev_priv->ring[RCS]);
4320 for_each_pipe(pipe) {
4325 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4326 i915_handle_vblank(dev, plane, pipe, iir))
4327 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4329 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4332 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4333 i9xx_pipe_crc_irq_handler(dev, pipe);
4335 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4336 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4337 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4340 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4341 intel_opregion_asle_intr(dev);
4343 /* With MSI, interrupts are only generated when iir
4344 * transitions from zero to nonzero. If another bit got
4345 * set while we were handling the existing iir bits, then
4346 * we would never get another interrupt.
4348 * This is fine on non-MSI as well, as if we hit this path
4349 * we avoid exiting the interrupt handler only to generate
4352 * Note that for MSI this could cause a stray interrupt report
4353 * if an interrupt landed in the time between writing IIR and
4354 * the posting read. This should be rare enough to never
4355 * trigger the 99% of 100,000 interrupts test for disabling
4360 } while (iir & ~flip_mask);
4362 i915_update_dri1_breadcrumb(dev);
4367 static void i915_irq_uninstall(struct drm_device * dev)
4369 struct drm_i915_private *dev_priv = dev->dev_private;
4372 intel_hpd_irq_uninstall(dev_priv);
4374 if (I915_HAS_HOTPLUG(dev)) {
4375 I915_WRITE(PORT_HOTPLUG_EN, 0);
4376 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4379 I915_WRITE16(HWSTAM, 0xffff);
4380 for_each_pipe(pipe) {
4381 /* Clear enable bits; then clear status bits */
4382 I915_WRITE(PIPESTAT(pipe), 0);
4383 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4385 I915_WRITE(IMR, 0xffffffff);
4386 I915_WRITE(IER, 0x0);
4388 I915_WRITE(IIR, I915_READ(IIR));
4391 static void i965_irq_preinstall(struct drm_device * dev)
4393 struct drm_i915_private *dev_priv = dev->dev_private;
4396 I915_WRITE(PORT_HOTPLUG_EN, 0);
4397 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4399 I915_WRITE(HWSTAM, 0xeffe);
4401 I915_WRITE(PIPESTAT(pipe), 0);
4402 I915_WRITE(IMR, 0xffffffff);
4403 I915_WRITE(IER, 0x0);
4407 static int i965_irq_postinstall(struct drm_device *dev)
4409 struct drm_i915_private *dev_priv = dev->dev_private;
4412 unsigned long irqflags;
4414 /* Unmask the interrupts that we always want on. */
4415 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4416 I915_DISPLAY_PORT_INTERRUPT |
4417 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4418 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4419 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4420 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4421 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4423 enable_mask = ~dev_priv->irq_mask;
4424 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4425 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4426 enable_mask |= I915_USER_INTERRUPT;
4429 enable_mask |= I915_BSD_USER_INTERRUPT;
4431 /* Interrupt setup is already guaranteed to be single-threaded, this is
4432 * just to make the assert_spin_locked check happy. */
4433 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4434 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4435 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4436 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4437 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4440 * Enable some error detection, note the instruction error mask
4441 * bit is reserved, so we leave it masked.
4444 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4445 GM45_ERROR_MEM_PRIV |
4446 GM45_ERROR_CP_PRIV |
4447 I915_ERROR_MEMORY_REFRESH);
4449 error_mask = ~(I915_ERROR_PAGE_TABLE |
4450 I915_ERROR_MEMORY_REFRESH);
4452 I915_WRITE(EMR, error_mask);
4454 I915_WRITE(IMR, dev_priv->irq_mask);
4455 I915_WRITE(IER, enable_mask);
4458 I915_WRITE(PORT_HOTPLUG_EN, 0);
4459 POSTING_READ(PORT_HOTPLUG_EN);
4461 i915_enable_asle_pipestat(dev);
4466 static void i915_hpd_irq_setup(struct drm_device *dev)
4468 struct drm_i915_private *dev_priv = dev->dev_private;
4469 struct intel_encoder *intel_encoder;
4472 assert_spin_locked(&dev_priv->irq_lock);
4474 if (I915_HAS_HOTPLUG(dev)) {
4475 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4476 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4477 /* Note HDMI and DP share hotplug bits */
4478 /* enable bits are the same for all generations */
4479 for_each_intel_encoder(dev, intel_encoder)
4480 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4481 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4482 /* Programming the CRT detection parameters tends
4483 to generate a spurious hotplug event about three
4484 seconds later. So just do it once.
4487 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4488 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4489 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4491 /* Ignore TV since it's buggy */
4492 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4496 static irqreturn_t i965_irq_handler(int irq, void *arg)
4498 struct drm_device *dev = arg;
4499 struct drm_i915_private *dev_priv = dev->dev_private;
4501 u32 pipe_stats[I915_MAX_PIPES];
4502 unsigned long irqflags;
4503 int ret = IRQ_NONE, pipe;
4505 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4506 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4508 iir = I915_READ(IIR);
4511 bool irq_received = (iir & ~flip_mask) != 0;
4512 bool blc_event = false;
4514 /* Can't rely on pipestat interrupt bit in iir as it might
4515 * have been cleared after the pipestat interrupt was received.
4516 * It doesn't set the bit in iir again, but it still produces
4517 * interrupts (for non-MSI).
4519 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4520 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4521 i915_handle_error(dev, false,
4522 "Command parser error, iir 0x%08x",
4525 for_each_pipe(pipe) {
4526 int reg = PIPESTAT(pipe);
4527 pipe_stats[pipe] = I915_READ(reg);
4530 * Clear the PIPE*STAT regs before the IIR
4532 if (pipe_stats[pipe] & 0x8000ffff) {
4533 I915_WRITE(reg, pipe_stats[pipe]);
4534 irq_received = true;
4537 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4544 /* Consume port. Then clear IIR or we'll miss events */
4545 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4546 i9xx_hpd_irq_handler(dev);
4548 I915_WRITE(IIR, iir & ~flip_mask);
4549 new_iir = I915_READ(IIR); /* Flush posted writes */
4551 if (iir & I915_USER_INTERRUPT)
4552 notify_ring(dev, &dev_priv->ring[RCS]);
4553 if (iir & I915_BSD_USER_INTERRUPT)
4554 notify_ring(dev, &dev_priv->ring[VCS]);
4556 for_each_pipe(pipe) {
4557 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4558 i915_handle_vblank(dev, pipe, pipe, iir))
4559 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4561 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4564 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4565 i9xx_pipe_crc_irq_handler(dev, pipe);
4567 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4568 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4569 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4572 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4573 intel_opregion_asle_intr(dev);
4575 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4576 gmbus_irq_handler(dev);
4578 /* With MSI, interrupts are only generated when iir
4579 * transitions from zero to nonzero. If another bit got
4580 * set while we were handling the existing iir bits, then
4581 * we would never get another interrupt.
4583 * This is fine on non-MSI as well, as if we hit this path
4584 * we avoid exiting the interrupt handler only to generate
4587 * Note that for MSI this could cause a stray interrupt report
4588 * if an interrupt landed in the time between writing IIR and
4589 * the posting read. This should be rare enough to never
4590 * trigger the 99% of 100,000 interrupts test for disabling
4596 i915_update_dri1_breadcrumb(dev);
4601 static void i965_irq_uninstall(struct drm_device * dev)
4603 struct drm_i915_private *dev_priv = dev->dev_private;
4609 intel_hpd_irq_uninstall(dev_priv);
4611 I915_WRITE(PORT_HOTPLUG_EN, 0);
4612 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4614 I915_WRITE(HWSTAM, 0xffffffff);
4616 I915_WRITE(PIPESTAT(pipe), 0);
4617 I915_WRITE(IMR, 0xffffffff);
4618 I915_WRITE(IER, 0x0);
4621 I915_WRITE(PIPESTAT(pipe),
4622 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4623 I915_WRITE(IIR, I915_READ(IIR));
4626 static void intel_hpd_irq_reenable(unsigned long data)
4628 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
4629 struct drm_device *dev = dev_priv->dev;
4630 struct drm_mode_config *mode_config = &dev->mode_config;
4631 unsigned long irqflags;
4634 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4635 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4636 struct drm_connector *connector;
4638 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4641 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4643 list_for_each_entry(connector, &mode_config->connector_list, head) {
4644 struct intel_connector *intel_connector = to_intel_connector(connector);
4646 if (intel_connector->encoder->hpd_pin == i) {
4647 if (connector->polled != intel_connector->polled)
4648 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4650 connector->polled = intel_connector->polled;
4651 if (!connector->polled)
4652 connector->polled = DRM_CONNECTOR_POLL_HPD;
4656 if (dev_priv->display.hpd_irq_setup)
4657 dev_priv->display.hpd_irq_setup(dev);
4658 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4661 void intel_irq_init(struct drm_device *dev)
4663 struct drm_i915_private *dev_priv = dev->dev_private;
4665 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4666 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4667 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4668 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4669 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4671 /* Let's track the enabled rps events */
4672 if (IS_VALLEYVIEW(dev))
4673 /* WaGsvRC0ResidenncyMethod:VLV */
4674 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4676 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4678 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4679 i915_hangcheck_elapsed,
4680 (unsigned long) dev);
4681 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
4682 (unsigned long) dev_priv);
4684 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4686 /* Haven't installed the IRQ handler yet */
4687 dev_priv->pm._irqs_disabled = true;
4690 dev->max_vblank_count = 0;
4691 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4692 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4693 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4694 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4696 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4697 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4700 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4701 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4702 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4705 if (IS_CHERRYVIEW(dev)) {
4706 dev->driver->irq_handler = cherryview_irq_handler;
4707 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4708 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4709 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4710 dev->driver->enable_vblank = valleyview_enable_vblank;
4711 dev->driver->disable_vblank = valleyview_disable_vblank;
4712 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4713 } else if (IS_VALLEYVIEW(dev)) {
4714 dev->driver->irq_handler = valleyview_irq_handler;
4715 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4716 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4717 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4718 dev->driver->enable_vblank = valleyview_enable_vblank;
4719 dev->driver->disable_vblank = valleyview_disable_vblank;
4720 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4721 } else if (IS_GEN8(dev)) {
4722 dev->driver->irq_handler = gen8_irq_handler;
4723 dev->driver->irq_preinstall = gen8_irq_reset;
4724 dev->driver->irq_postinstall = gen8_irq_postinstall;
4725 dev->driver->irq_uninstall = gen8_irq_uninstall;
4726 dev->driver->enable_vblank = gen8_enable_vblank;
4727 dev->driver->disable_vblank = gen8_disable_vblank;
4728 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4729 } else if (HAS_PCH_SPLIT(dev)) {
4730 dev->driver->irq_handler = ironlake_irq_handler;
4731 dev->driver->irq_preinstall = ironlake_irq_reset;
4732 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4733 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4734 dev->driver->enable_vblank = ironlake_enable_vblank;
4735 dev->driver->disable_vblank = ironlake_disable_vblank;
4736 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4738 if (INTEL_INFO(dev)->gen == 2) {
4739 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4740 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4741 dev->driver->irq_handler = i8xx_irq_handler;
4742 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4743 } else if (INTEL_INFO(dev)->gen == 3) {
4744 dev->driver->irq_preinstall = i915_irq_preinstall;
4745 dev->driver->irq_postinstall = i915_irq_postinstall;
4746 dev->driver->irq_uninstall = i915_irq_uninstall;
4747 dev->driver->irq_handler = i915_irq_handler;
4748 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4750 dev->driver->irq_preinstall = i965_irq_preinstall;
4751 dev->driver->irq_postinstall = i965_irq_postinstall;
4752 dev->driver->irq_uninstall = i965_irq_uninstall;
4753 dev->driver->irq_handler = i965_irq_handler;
4754 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4756 dev->driver->enable_vblank = i915_enable_vblank;
4757 dev->driver->disable_vblank = i915_disable_vblank;
4761 void intel_hpd_init(struct drm_device *dev)
4763 struct drm_i915_private *dev_priv = dev->dev_private;
4764 struct drm_mode_config *mode_config = &dev->mode_config;
4765 struct drm_connector *connector;
4766 unsigned long irqflags;
4769 for (i = 1; i < HPD_NUM_PINS; i++) {
4770 dev_priv->hpd_stats[i].hpd_cnt = 0;
4771 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4773 list_for_each_entry(connector, &mode_config->connector_list, head) {
4774 struct intel_connector *intel_connector = to_intel_connector(connector);
4775 connector->polled = intel_connector->polled;
4776 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4777 connector->polled = DRM_CONNECTOR_POLL_HPD;
4778 if (intel_connector->mst_port)
4779 connector->polled = DRM_CONNECTOR_POLL_HPD;
4782 /* Interrupt setup is already guaranteed to be single-threaded, this is
4783 * just to make the assert_spin_locked checks happy. */
4784 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4785 if (dev_priv->display.hpd_irq_setup)
4786 dev_priv->display.hpd_irq_setup(dev);
4787 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4790 /* Disable interrupts so we can allow runtime PM. */
4791 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4793 struct drm_i915_private *dev_priv = dev->dev_private;
4795 dev->driver->irq_uninstall(dev);
4796 dev_priv->pm._irqs_disabled = true;
4799 /* Restore interrupts so we can recover from runtime PM. */
4800 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4802 struct drm_i915_private *dev_priv = dev->dev_private;
4804 dev_priv->pm._irqs_disabled = false;
4805 dev->driver->irq_preinstall(dev);
4806 dev->driver->irq_postinstall(dev);