Commit | Line | Data |
---|---|---|
0d6aa60b | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
1da177e4 | 2 | */ |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 | 28 | |
a70491cc JP |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | ||
55367a27 JN |
31 | #include <linux/slab.h> |
32 | #include <linux/sysrq.h> | |
33 | ||
fcd70cd3 | 34 | #include <drm/drm_drv.h> |
55367a27 | 35 | |
2b874a02 | 36 | #include "display/intel_display_irq.h" |
df0566a6 | 37 | #include "display/intel_hotplug.h" |
da38ba98 | 38 | #include "display/intel_hotplug_irq.h" |
df0566a6 | 39 | #include "display/intel_lpe_audio.h" |
7f6947fd | 40 | #include "display/intel_psr_regs.h" |
df0566a6 | 41 | |
b3786b29 | 42 | #include "gt/intel_breadcrumbs.h" |
2239e6df | 43 | #include "gt/intel_gt.h" |
cf1c97dc | 44 | #include "gt/intel_gt_irq.h" |
d762043f | 45 | #include "gt/intel_gt_pm_irq.h" |
0d6419e9 | 46 | #include "gt/intel_gt_regs.h" |
3e7abf81 | 47 | #include "gt/intel_rps.h" |
2239e6df | 48 | |
24524e3f | 49 | #include "i915_driver.h" |
1da177e4 | 50 | #include "i915_drv.h" |
440e2b3d | 51 | #include "i915_irq.h" |
476f62b8 | 52 | #include "i915_reg.h" |
1da177e4 | 53 | |
fca52a55 DV |
54 | /** |
55 | * DOC: interrupt handling | |
56 | * | |
57 | * These functions provide the basic support for enabling and disabling the | |
58 | * interrupt handling support. There's a lot more functionality in i915_irq.c | |
59 | * and related files, but that will be described in separate chapters. | |
60 | */ | |
61 | ||
9c6508b9 TG |
62 | /* |
63 | * Interrupt statistic for PMU. Increments the counter only if the | |
78f48aa6 | 64 | * interrupt originated from the GPU so interrupts from a device which |
9c6508b9 TG |
65 | * shares the interrupt line are not accounted. |
66 | */ | |
67 | static inline void pmu_irq_stats(struct drm_i915_private *i915, | |
68 | irqreturn_t res) | |
69 | { | |
70 | if (unlikely(res != IRQ_HANDLED)) | |
71 | return; | |
72 | ||
73 | /* | |
74 | * A clever compiler translates that into INC. A not so clever one | |
75 | * should at least prevent store tearing. | |
76 | */ | |
77 | WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); | |
78 | } | |
79 | ||
750a9540 | 80 | void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) |
68eb49b1 | 81 | { |
9b635626 JN |
82 | intel_uncore_write(uncore, regs.imr, 0xffffffff); |
83 | intel_uncore_posting_read(uncore, regs.imr); | |
68eb49b1 | 84 | |
9b635626 | 85 | intel_uncore_write(uncore, regs.ier, 0); |
68eb49b1 PZ |
86 | |
87 | /* IIR can theoretically queue up two events. Be paranoid. */ | |
9b635626 JN |
88 | intel_uncore_write(uncore, regs.iir, 0xffffffff); |
89 | intel_uncore_posting_read(uncore, regs.iir); | |
90 | intel_uncore_write(uncore, regs.iir, 0xffffffff); | |
91 | intel_uncore_posting_read(uncore, regs.iir); | |
68eb49b1 PZ |
92 | } |
93 | ||
337ba017 PZ |
94 | /* |
95 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. | |
96 | */ | |
750a9540 | 97 | void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) |
b51a2842 | 98 | { |
65f42cdc | 99 | u32 val = intel_uncore_read(uncore, reg); |
b51a2842 VS |
100 | |
101 | if (val == 0) | |
102 | return; | |
103 | ||
a9f236d1 PB |
104 | drm_WARN(&uncore->i915->drm, 1, |
105 | "Interrupt register 0x%x is not zero: 0x%08x\n", | |
106 | i915_mmio_reg_offset(reg), val); | |
65f42cdc PZ |
107 | intel_uncore_write(uncore, reg, 0xffffffff); |
108 | intel_uncore_posting_read(uncore, reg); | |
109 | intel_uncore_write(uncore, reg, 0xffffffff); | |
110 | intel_uncore_posting_read(uncore, reg); | |
b51a2842 | 111 | } |
337ba017 | 112 | |
750a9540 | 113 | void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, |
9b635626 | 114 | u32 imr_val, u32 ier_val) |
68eb49b1 | 115 | { |
750a9540 | 116 | gen2_assert_iir_is_zero(uncore, regs.iir); |
68eb49b1 | 117 | |
9b635626 JN |
118 | intel_uncore_write(uncore, regs.ier, ier_val); |
119 | intel_uncore_write(uncore, regs.imr, imr_val); | |
120 | intel_uncore_posting_read(uncore, regs.imr); | |
68eb49b1 PZ |
121 | } |
122 | ||
474e1cd6 VS |
123 | void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs) |
124 | { | |
125 | intel_uncore_write(uncore, regs.emr, 0xffffffff); | |
126 | intel_uncore_posting_read(uncore, regs.emr); | |
127 | ||
128 | intel_uncore_write(uncore, regs.eir, 0xffffffff); | |
129 | intel_uncore_posting_read(uncore, regs.eir); | |
130 | intel_uncore_write(uncore, regs.eir, 0xffffffff); | |
131 | intel_uncore_posting_read(uncore, regs.eir); | |
132 | } | |
133 | ||
134 | void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs, | |
135 | u32 emr_val) | |
136 | { | |
137 | intel_uncore_write(uncore, regs.eir, 0xffffffff); | |
138 | intel_uncore_posting_read(uncore, regs.eir); | |
139 | intel_uncore_write(uncore, regs.eir, 0xffffffff); | |
140 | intel_uncore_posting_read(uncore, regs.eir); | |
141 | ||
142 | intel_uncore_write(uncore, regs.emr, emr_val); | |
143 | intel_uncore_posting_read(uncore, regs.emr); | |
144 | } | |
145 | ||
e3689190 | 146 | /** |
74bb98ba | 147 | * ivb_parity_work - Workqueue called when a parity error interrupt |
e3689190 BW |
148 | * occurred. |
149 | * @work: workqueue struct | |
150 | * | |
151 | * Doesn't actually do anything except notify userspace. As a consequence of | |
152 | * this event, userspace should try to remap the bad rows since statistically | |
153 | * it is likely the same row is more likely to go bad again. | |
154 | */ | |
74bb98ba | 155 | static void ivb_parity_work(struct work_struct *work) |
e3689190 | 156 | { |
2d1013dd | 157 | struct drm_i915_private *dev_priv = |
cefcff8f | 158 | container_of(work, typeof(*dev_priv), l3_parity.error_work); |
2cbc876d | 159 | struct intel_gt *gt = to_gt(dev_priv); |
e3689190 | 160 | u32 error_status, row, bank, subbank; |
35a85ac6 | 161 | char *parity_event[6]; |
a9c287c9 JN |
162 | u32 misccpctl; |
163 | u8 slice = 0; | |
e3689190 BW |
164 | |
165 | /* We must turn off DOP level clock gating to access the L3 registers. | |
166 | * In order to prevent a get/put style interface, acquire struct mutex | |
167 | * any time we access those registers. | |
168 | */ | |
91c8a326 | 169 | mutex_lock(&dev_priv->drm.struct_mutex); |
e3689190 | 170 | |
35a85ac6 | 171 | /* If we've screwed up tracking, just let the interrupt fire again */ |
48a1b8d4 | 172 | if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) |
35a85ac6 BW |
173 | goto out; |
174 | ||
f7435467 AH |
175 | misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, |
176 | GEN7_DOP_CLOCK_GATE_ENABLE, 0); | |
2939eb06 | 177 | intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); |
e3689190 | 178 | |
35a85ac6 | 179 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
f0f59a00 | 180 | i915_reg_t reg; |
e3689190 | 181 | |
35a85ac6 | 182 | slice--; |
48a1b8d4 PB |
183 | if (drm_WARN_ON_ONCE(&dev_priv->drm, |
184 | slice >= NUM_L3_SLICES(dev_priv))) | |
35a85ac6 | 185 | break; |
e3689190 | 186 | |
35a85ac6 | 187 | dev_priv->l3_parity.which_slice &= ~(1<<slice); |
e3689190 | 188 | |
6fa1c5f1 | 189 | reg = GEN7_L3CDERRST1(slice); |
e3689190 | 190 | |
2939eb06 | 191 | error_status = intel_uncore_read(&dev_priv->uncore, reg); |
35a85ac6 BW |
192 | row = GEN7_PARITY_ERROR_ROW(error_status); |
193 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
194 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
195 | ||
2939eb06 JN |
196 | intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); |
197 | intel_uncore_posting_read(&dev_priv->uncore, reg); | |
35a85ac6 BW |
198 | |
199 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; | |
200 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
201 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
202 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
203 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); | |
204 | parity_event[5] = NULL; | |
205 | ||
91c8a326 | 206 | kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, |
35a85ac6 | 207 | KOBJ_CHANGE, parity_event); |
e3689190 | 208 | |
a10234fd TU |
209 | drm_dbg(&dev_priv->drm, |
210 | "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", | |
211 | slice, row, bank, subbank); | |
e3689190 | 212 | |
35a85ac6 BW |
213 | kfree(parity_event[4]); |
214 | kfree(parity_event[3]); | |
215 | kfree(parity_event[2]); | |
216 | kfree(parity_event[1]); | |
217 | } | |
e3689190 | 218 | |
2939eb06 | 219 | intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); |
e3689190 | 220 | |
35a85ac6 | 221 | out: |
48a1b8d4 | 222 | drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); |
03d2c54d | 223 | spin_lock_irq(gt->irq_lock); |
cf1c97dc | 224 | gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); |
03d2c54d | 225 | spin_unlock_irq(gt->irq_lock); |
35a85ac6 | 226 | |
91c8a326 | 227 | mutex_unlock(&dev_priv->drm.struct_mutex); |
e3689190 BW |
228 | } |
229 | ||
ff1f525e | 230 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
7e231dbe | 231 | { |
b318b824 | 232 | struct drm_i915_private *dev_priv = arg; |
2b85c4fe | 233 | struct intel_display *display = &dev_priv->display; |
7e231dbe | 234 | irqreturn_t ret = IRQ_NONE; |
7e231dbe | 235 | |
2dd2a883 ID |
236 | if (!intel_irqs_enabled(dev_priv)) |
237 | return IRQ_NONE; | |
238 | ||
1f814dac | 239 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
9102650f | 240 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 241 | |
1e1cace9 | 242 | do { |
6e814800 | 243 | u32 iir, gt_iir, pm_iir; |
c19f5a03 | 244 | u32 eir = 0, dpinvgtt = 0; |
2ecb8ca4 | 245 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
1ae3c34c | 246 | u32 hotplug_status = 0; |
a5e485a9 | 247 | u32 ier = 0; |
3ff60f89 | 248 | |
2939eb06 JN |
249 | gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); |
250 | pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); | |
251 | iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); | |
7e231dbe JB |
252 | |
253 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
1e1cace9 | 254 | break; |
7e231dbe JB |
255 | |
256 | ret = IRQ_HANDLED; | |
257 | ||
a5e485a9 VS |
258 | /* |
259 | * Theory on interrupt generation, based on empirical evidence: | |
260 | * | |
261 | * x = ((VLV_IIR & VLV_IER) || | |
262 | * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && | |
263 | * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); | |
264 | * | |
265 | * A CPU interrupt will only be raised when 'x' has a 0->1 edge. | |
266 | * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to | |
267 | * guarantee the CPU interrupt will be raised again even if we | |
268 | * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR | |
269 | * bits this time around. | |
270 | */ | |
2939eb06 | 271 | intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); |
8cee664d | 272 | ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); |
4a0a0202 VS |
273 | |
274 | if (gt_iir) | |
2939eb06 | 275 | intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); |
4a0a0202 | 276 | if (pm_iir) |
2939eb06 | 277 | intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); |
4a0a0202 | 278 | |
7ce4d1f2 | 279 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
1e40b20e | 280 | hotplug_status = i9xx_hpd_irq_ack(display); |
7ce4d1f2 | 281 | |
c19f5a03 VS |
282 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
283 | vlv_display_error_irq_ack(display, &eir, &dpinvgtt); | |
284 | ||
3ff60f89 | 285 | /* Call regardless, as some status bits might not be |
17b018c2 | 286 | * signalled in IIR */ |
007232f6 | 287 | i9xx_pipestat_irq_ack(display, iir, pipe_stats); |
7ce4d1f2 | 288 | |
eef57324 JA |
289 | if (iir & (I915_LPE_PIPE_A_INTERRUPT | |
290 | I915_LPE_PIPE_B_INTERRUPT)) | |
2b85c4fe | 291 | intel_lpe_audio_irq_handler(display); |
eef57324 | 292 | |
7ce4d1f2 VS |
293 | /* |
294 | * VLV_IIR is single buffered, and reflects the level | |
295 | * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. | |
296 | */ | |
297 | if (iir) | |
2939eb06 | 298 | intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); |
4a0a0202 | 299 | |
2939eb06 JN |
300 | intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); |
301 | intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
1ae3c34c | 302 | |
52894874 | 303 | if (gt_iir) |
2cbc876d | 304 | gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); |
52894874 | 305 | if (pm_iir) |
2cbc876d | 306 | gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); |
52894874 | 307 | |
1ae3c34c | 308 | if (hotplug_status) |
1e40b20e | 309 | i9xx_hpd_irq_handler(display, hotplug_status); |
2ecb8ca4 | 310 | |
c19f5a03 VS |
311 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
312 | vlv_display_error_irq_handler(display, eir, dpinvgtt); | |
313 | ||
007232f6 | 314 | valleyview_pipestat_irq_handler(display, pipe_stats); |
1e1cace9 | 315 | } while (0); |
7e231dbe | 316 | |
9c6508b9 TG |
317 | pmu_irq_stats(dev_priv, ret); |
318 | ||
9102650f | 319 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 320 | |
7e231dbe JB |
321 | return ret; |
322 | } | |
323 | ||
43f328d7 VS |
324 | static irqreturn_t cherryview_irq_handler(int irq, void *arg) |
325 | { | |
b318b824 | 326 | struct drm_i915_private *dev_priv = arg; |
2b85c4fe | 327 | struct intel_display *display = &dev_priv->display; |
43f328d7 | 328 | irqreturn_t ret = IRQ_NONE; |
43f328d7 | 329 | |
2dd2a883 ID |
330 | if (!intel_irqs_enabled(dev_priv)) |
331 | return IRQ_NONE; | |
332 | ||
1f814dac | 333 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
9102650f | 334 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 335 | |
579de73b | 336 | do { |
6e814800 | 337 | u32 master_ctl, iir; |
c19f5a03 | 338 | u32 eir = 0, dpinvgtt = 0; |
2ecb8ca4 | 339 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
1ae3c34c | 340 | u32 hotplug_status = 0; |
a5e485a9 VS |
341 | u32 ier = 0; |
342 | ||
2939eb06 JN |
343 | master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
344 | iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); | |
43f328d7 | 345 | |
8e5fd599 VS |
346 | if (master_ctl == 0 && iir == 0) |
347 | break; | |
43f328d7 | 348 | |
27b6c122 OM |
349 | ret = IRQ_HANDLED; |
350 | ||
a5e485a9 VS |
351 | /* |
352 | * Theory on interrupt generation, based on empirical evidence: | |
353 | * | |
354 | * x = ((VLV_IIR & VLV_IER) || | |
355 | * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && | |
356 | * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); | |
357 | * | |
358 | * A CPU interrupt will only be raised when 'x' has a 0->1 edge. | |
359 | * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to | |
360 | * guarantee the CPU interrupt will be raised again even if we | |
361 | * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL | |
362 | * bits this time around. | |
363 | */ | |
2939eb06 | 364 | intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); |
8cee664d | 365 | ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); |
43f328d7 | 366 | |
2cbc876d | 367 | gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); |
43f328d7 | 368 | |
7ce4d1f2 | 369 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
1e40b20e | 370 | hotplug_status = i9xx_hpd_irq_ack(display); |
7ce4d1f2 | 371 | |
c19f5a03 VS |
372 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
373 | vlv_display_error_irq_ack(display, &eir, &dpinvgtt); | |
374 | ||
27b6c122 | 375 | /* Call regardless, as some status bits might not be |
17b018c2 | 376 | * signalled in IIR */ |
007232f6 | 377 | i9xx_pipestat_irq_ack(display, iir, pipe_stats); |
43f328d7 | 378 | |
eef57324 JA |
379 | if (iir & (I915_LPE_PIPE_A_INTERRUPT | |
380 | I915_LPE_PIPE_B_INTERRUPT | | |
381 | I915_LPE_PIPE_C_INTERRUPT)) | |
2b85c4fe | 382 | intel_lpe_audio_irq_handler(display); |
eef57324 | 383 | |
7ce4d1f2 VS |
384 | /* |
385 | * VLV_IIR is single buffered, and reflects the level | |
386 | * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. | |
387 | */ | |
388 | if (iir) | |
2939eb06 | 389 | intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); |
7ce4d1f2 | 390 | |
2939eb06 JN |
391 | intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); |
392 | intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | |
1ae3c34c VS |
393 | |
394 | if (hotplug_status) | |
1e40b20e | 395 | i9xx_hpd_irq_handler(display, hotplug_status); |
2ecb8ca4 | 396 | |
c19f5a03 VS |
397 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
398 | vlv_display_error_irq_handler(display, eir, dpinvgtt); | |
399 | ||
007232f6 | 400 | valleyview_pipestat_irq_handler(display, pipe_stats); |
579de73b | 401 | } while (0); |
3278f67f | 402 | |
9c6508b9 TG |
403 | pmu_irq_stats(dev_priv, ret); |
404 | ||
9102650f | 405 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 406 | |
43f328d7 VS |
407 | return ret; |
408 | } | |
409 | ||
72c90f62 OM |
410 | /* |
411 | * To handle irqs with the minimum potential races with fresh interrupts, we: | |
412 | * 1 - Disable Master Interrupt Control. | |
413 | * 2 - Find the source(s) of the interrupt. | |
414 | * 3 - Clear the Interrupt Identity bits (IIR). | |
415 | * 4 - Process the interrupt(s) that had bits set in the IIRs. | |
416 | * 5 - Re-enable Master Interrupt Control. | |
417 | */ | |
9eae5e27 | 418 | static irqreturn_t ilk_irq_handler(int irq, void *arg) |
b1f14ad0 | 419 | { |
c48a798a | 420 | struct drm_i915_private *i915 = arg; |
007232f6 | 421 | struct intel_display *display = &i915->display; |
72e9abc3 | 422 | void __iomem * const regs = intel_uncore_regs(&i915->uncore); |
f1af8fc1 | 423 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
0e43406b | 424 | irqreturn_t ret = IRQ_NONE; |
b1f14ad0 | 425 | |
c48a798a | 426 | if (unlikely(!intel_irqs_enabled(i915))) |
2dd2a883 ID |
427 | return IRQ_NONE; |
428 | ||
1f814dac | 429 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
c48a798a | 430 | disable_rpm_wakeref_asserts(&i915->runtime_pm); |
1f814dac | 431 | |
b1f14ad0 | 432 | /* disable master interrupt before clearing iir */ |
c48a798a CW |
433 | de_ier = raw_reg_read(regs, DEIER); |
434 | raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
b1f14ad0 | 435 | |
44498aea PZ |
436 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
437 | * interrupts will will be stored on its back queue, and then we'll be | |
438 | * able to process them after we restore SDEIER (as soon as we restore | |
439 | * it, we'll get an interrupt if SDEIIR still has something to process | |
440 | * due to its back queue). */ | |
c48a798a CW |
441 | if (!HAS_PCH_NOP(i915)) { |
442 | sde_ier = raw_reg_read(regs, SDEIER); | |
443 | raw_reg_write(regs, SDEIER, 0); | |
ab5c608b | 444 | } |
44498aea | 445 | |
72c90f62 OM |
446 | /* Find, clear, then process each source of interrupt */ |
447 | ||
c48a798a | 448 | gt_iir = raw_reg_read(regs, GTIIR); |
0e43406b | 449 | if (gt_iir) { |
c48a798a | 450 | raw_reg_write(regs, GTIIR, gt_iir); |
651e7d48 | 451 | if (GRAPHICS_VER(i915) >= 6) |
2cbc876d | 452 | gen6_gt_irq_handler(to_gt(i915), gt_iir); |
d8fc8a47 | 453 | else |
2cbc876d | 454 | gen5_gt_irq_handler(to_gt(i915), gt_iir); |
c48a798a | 455 | ret = IRQ_HANDLED; |
b1f14ad0 JB |
456 | } |
457 | ||
c48a798a | 458 | de_iir = raw_reg_read(regs, DEIIR); |
0e43406b | 459 | if (de_iir) { |
c48a798a | 460 | raw_reg_write(regs, DEIIR, de_iir); |
373abf1a | 461 | if (DISPLAY_VER(i915) >= 7) |
007232f6 | 462 | ivb_display_irq_handler(display, de_iir); |
f1af8fc1 | 463 | else |
007232f6 | 464 | ilk_display_irq_handler(display, de_iir); |
c48a798a | 465 | ret = IRQ_HANDLED; |
b1f14ad0 JB |
466 | } |
467 | ||
2b874a02 JN |
468 | if (GRAPHICS_VER(i915) >= 6) { |
469 | u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); | |
470 | if (pm_iir) { | |
471 | raw_reg_write(regs, GEN6_PMIIR, pm_iir); | |
472 | gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); | |
473 | ret = IRQ_HANDLED; | |
474 | } | |
475 | } | |
38cc46d7 | 476 | |
2b874a02 JN |
477 | raw_reg_write(regs, DEIER, de_ier); |
478 | if (sde_ier) | |
479 | raw_reg_write(regs, SDEIER, sde_ier); | |
770de83d | 480 | |
2b874a02 | 481 | pmu_irq_stats(i915, ret); |
abd58f01 | 482 | |
2b874a02 JN |
483 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
484 | enable_rpm_wakeref_asserts(&i915->runtime_pm); | |
babde06d | 485 | |
2b874a02 | 486 | return ret; |
f11a0f46 TU |
487 | } |
488 | ||
4376b9c9 MK |
489 | static inline u32 gen8_master_intr_disable(void __iomem * const regs) |
490 | { | |
491 | raw_reg_write(regs, GEN8_MASTER_IRQ, 0); | |
492 | ||
493 | /* | |
494 | * Now with master disabled, get a sample of level indications | |
495 | * for this interrupt. Indications will be cleared on related acks. | |
496 | * New indications can and will light up during processing, | |
497 | * and will generate new interrupt after enabling master. | |
498 | */ | |
499 | return raw_reg_read(regs, GEN8_MASTER_IRQ); | |
500 | } | |
501 | ||
502 | static inline void gen8_master_intr_enable(void __iomem * const regs) | |
503 | { | |
504 | raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | |
505 | } | |
506 | ||
f11a0f46 TU |
507 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
508 | { | |
b318b824 | 509 | struct drm_i915_private *dev_priv = arg; |
007232f6 | 510 | struct intel_display *display = &dev_priv->display; |
72e9abc3 | 511 | void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore); |
f11a0f46 | 512 | u32 master_ctl; |
f11a0f46 TU |
513 | |
514 | if (!intel_irqs_enabled(dev_priv)) | |
515 | return IRQ_NONE; | |
516 | ||
4376b9c9 MK |
517 | master_ctl = gen8_master_intr_disable(regs); |
518 | if (!master_ctl) { | |
519 | gen8_master_intr_enable(regs); | |
f11a0f46 | 520 | return IRQ_NONE; |
4376b9c9 | 521 | } |
f11a0f46 | 522 | |
6cc32f15 | 523 | /* Find, queue (onto bottom-halves), then clear each source */ |
2cbc876d | 524 | gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); |
f0fd96f5 CW |
525 | |
526 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | |
527 | if (master_ctl & ~GEN8_GT_IRQS) { | |
9102650f | 528 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
007232f6 | 529 | gen8_de_irq_handler(display, master_ctl); |
9102650f | 530 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
f0fd96f5 | 531 | } |
f11a0f46 | 532 | |
4376b9c9 | 533 | gen8_master_intr_enable(regs); |
abd58f01 | 534 | |
9c6508b9 TG |
535 | pmu_irq_stats(dev_priv, IRQ_HANDLED); |
536 | ||
55ef72f2 | 537 | return IRQ_HANDLED; |
abd58f01 BW |
538 | } |
539 | ||
81067b71 MK |
540 | static inline u32 gen11_master_intr_disable(void __iomem * const regs) |
541 | { | |
542 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); | |
543 | ||
544 | /* | |
545 | * Now with master disabled, get a sample of level indications | |
546 | * for this interrupt. Indications will be cleared on related acks. | |
547 | * New indications can and will light up during processing, | |
548 | * and will generate new interrupt after enabling master. | |
549 | */ | |
550 | return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); | |
551 | } | |
552 | ||
553 | static inline void gen11_master_intr_enable(void __iomem * const regs) | |
554 | { | |
555 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); | |
556 | } | |
557 | ||
22e26af7 | 558 | static irqreturn_t gen11_irq_handler(int irq, void *arg) |
51951ae7 | 559 | { |
22e26af7 | 560 | struct drm_i915_private *i915 = arg; |
007232f6 | 561 | struct intel_display *display = &i915->display; |
72e9abc3 | 562 | void __iomem * const regs = intel_uncore_regs(&i915->uncore); |
2cbc876d | 563 | struct intel_gt *gt = to_gt(i915); |
51951ae7 | 564 | u32 master_ctl; |
df0d28c1 | 565 | u32 gu_misc_iir; |
51951ae7 MK |
566 | |
567 | if (!intel_irqs_enabled(i915)) | |
568 | return IRQ_NONE; | |
569 | ||
22e26af7 | 570 | master_ctl = gen11_master_intr_disable(regs); |
81067b71 | 571 | if (!master_ctl) { |
22e26af7 | 572 | gen11_master_intr_enable(regs); |
51951ae7 | 573 | return IRQ_NONE; |
81067b71 | 574 | } |
51951ae7 | 575 | |
6cc32f15 | 576 | /* Find, queue (onto bottom-halves), then clear each source */ |
9b77011e | 577 | gen11_gt_irq_handler(gt, master_ctl); |
51951ae7 MK |
578 | |
579 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | |
a3265d85 | 580 | if (master_ctl & GEN11_DISPLAY_IRQ) |
007232f6 | 581 | gen11_display_irq_handler(display); |
51951ae7 | 582 | |
007232f6 | 583 | gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); |
df0d28c1 | 584 | |
22e26af7 | 585 | gen11_master_intr_enable(regs); |
51951ae7 | 586 | |
007232f6 | 587 | gen11_gu_misc_irq_handler(display, gu_misc_iir); |
df0d28c1 | 588 | |
9c6508b9 TG |
589 | pmu_irq_stats(i915, IRQ_HANDLED); |
590 | ||
51951ae7 MK |
591 | return IRQ_HANDLED; |
592 | } | |
593 | ||
22e26af7 | 594 | static inline u32 dg1_master_intr_disable(void __iomem * const regs) |
97b492f5 LDM |
595 | { |
596 | u32 val; | |
597 | ||
598 | /* First disable interrupts */ | |
22e26af7 | 599 | raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); |
97b492f5 LDM |
600 | |
601 | /* Get the indication levels and ack the master unit */ | |
22e26af7 | 602 | val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); |
97b492f5 LDM |
603 | if (unlikely(!val)) |
604 | return 0; | |
605 | ||
22e26af7 | 606 | raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); |
97b492f5 LDM |
607 | |
608 | return val; | |
609 | } | |
610 | ||
611 | static inline void dg1_master_intr_enable(void __iomem * const regs) | |
612 | { | |
22e26af7 | 613 | raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); |
97b492f5 LDM |
614 | } |
615 | ||
616 | static irqreturn_t dg1_irq_handler(int irq, void *arg) | |
617 | { | |
22e26af7 | 618 | struct drm_i915_private * const i915 = arg; |
007232f6 | 619 | struct intel_display *display = &i915->display; |
2cbc876d | 620 | struct intel_gt *gt = to_gt(i915); |
72e9abc3 | 621 | void __iomem * const regs = intel_uncore_regs(gt->uncore); |
22e26af7 PZ |
622 | u32 master_tile_ctl, master_ctl; |
623 | u32 gu_misc_iir; | |
624 | ||
625 | if (!intel_irqs_enabled(i915)) | |
626 | return IRQ_NONE; | |
627 | ||
628 | master_tile_ctl = dg1_master_intr_disable(regs); | |
629 | if (!master_tile_ctl) { | |
630 | dg1_master_intr_enable(regs); | |
631 | return IRQ_NONE; | |
632 | } | |
633 | ||
634 | /* FIXME: we only support tile 0 for now. */ | |
635 | if (master_tile_ctl & DG1_MSTR_TILE(0)) { | |
636 | master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); | |
637 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); | |
638 | } else { | |
a10234fd TU |
639 | drm_err(&i915->drm, "Tile not supported: 0x%08x\n", |
640 | master_tile_ctl); | |
22e26af7 PZ |
641 | dg1_master_intr_enable(regs); |
642 | return IRQ_NONE; | |
643 | } | |
644 | ||
645 | gen11_gt_irq_handler(gt, master_ctl); | |
646 | ||
647 | if (master_ctl & GEN11_DISPLAY_IRQ) | |
007232f6 | 648 | gen11_display_irq_handler(display); |
22e26af7 | 649 | |
007232f6 | 650 | gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); |
22e26af7 PZ |
651 | |
652 | dg1_master_intr_enable(regs); | |
653 | ||
007232f6 | 654 | gen11_gu_misc_irq_handler(display, gu_misc_iir); |
22e26af7 PZ |
655 | |
656 | pmu_irq_stats(i915, IRQ_HANDLED); | |
657 | ||
658 | return IRQ_HANDLED; | |
97b492f5 LDM |
659 | } |
660 | ||
9eae5e27 | 661 | static void ilk_irq_reset(struct drm_i915_private *dev_priv) |
8bb61306 | 662 | { |
27dbba9f | 663 | struct intel_display *display = &dev_priv->display; |
b16b2a2f | 664 | struct intel_uncore *uncore = &dev_priv->uncore; |
8bb61306 | 665 | |
750a9540 | 666 | gen2_irq_reset(uncore, DE_IRQ_REGS); |
e44adb5d CW |
667 | dev_priv->irq_mask = ~0u; |
668 | ||
651e7d48 | 669 | if (GRAPHICS_VER(dev_priv) == 7) |
f0818984 | 670 | intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); |
8bb61306 | 671 | |
fc340442 | 672 | if (IS_HASWELL(dev_priv)) { |
f0818984 TU |
673 | intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); |
674 | intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); | |
fc340442 DV |
675 | } |
676 | ||
2cbc876d | 677 | gen5_gt_irq_reset(to_gt(dev_priv)); |
8bb61306 | 678 | |
27dbba9f | 679 | ibx_display_irq_reset(display); |
8bb61306 VS |
680 | } |
681 | ||
b318b824 | 682 | static void valleyview_irq_reset(struct drm_i915_private *dev_priv) |
7e231dbe | 683 | { |
007232f6 JN |
684 | struct intel_display *display = &dev_priv->display; |
685 | ||
2939eb06 JN |
686 | intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); |
687 | intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); | |
34c7b8a7 | 688 | |
2cbc876d | 689 | gen5_gt_irq_reset(to_gt(dev_priv)); |
7e231dbe | 690 | |
007232f6 | 691 | vlv_display_irq_reset(display); |
7e231dbe JB |
692 | } |
693 | ||
a844cfbe JRS |
694 | static void gen8_irq_reset(struct drm_i915_private *dev_priv) |
695 | { | |
007232f6 | 696 | struct intel_display *display = &dev_priv->display; |
a844cfbe JRS |
697 | struct intel_uncore *uncore = &dev_priv->uncore; |
698 | ||
72e9abc3 | 699 | gen8_master_intr_disable(intel_uncore_regs(uncore)); |
a844cfbe | 700 | |
2cbc876d | 701 | gen8_gt_irq_reset(to_gt(dev_priv)); |
007232f6 | 702 | gen8_display_irq_reset(display); |
750a9540 | 703 | gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); |
abd58f01 | 704 | } |
09f2344d | 705 | |
a3265d85 MR |
706 | static void gen11_irq_reset(struct drm_i915_private *dev_priv) |
707 | { | |
007232f6 | 708 | struct intel_display *display = &dev_priv->display; |
2cbc876d | 709 | struct intel_gt *gt = to_gt(dev_priv); |
fd4d7904 | 710 | struct intel_uncore *uncore = gt->uncore; |
a3265d85 | 711 | |
72e9abc3 | 712 | gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); |
22e26af7 | 713 | |
fd4d7904 | 714 | gen11_gt_irq_reset(gt); |
007232f6 | 715 | gen11_display_irq_reset(display); |
22e26af7 | 716 | |
750a9540 VS |
717 | gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); |
718 | gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); | |
22e26af7 PZ |
719 | } |
720 | ||
721 | static void dg1_irq_reset(struct drm_i915_private *dev_priv) | |
722 | { | |
007232f6 | 723 | struct intel_display *display = &dev_priv->display; |
e2a9f0a3 JN |
724 | struct intel_uncore *uncore = &dev_priv->uncore; |
725 | struct intel_gt *gt; | |
726 | unsigned int i; | |
22e26af7 | 727 | |
72e9abc3 | 728 | dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); |
a3265d85 | 729 | |
e2a9f0a3 JN |
730 | for_each_gt(gt, dev_priv, i) |
731 | gen11_gt_irq_reset(gt); | |
2a57d9cc | 732 | |
007232f6 | 733 | gen11_display_irq_reset(display); |
a3265d85 | 734 | |
750a9540 VS |
735 | gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); |
736 | gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); | |
156adfa5 GS |
737 | |
738 | intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0); | |
a3265d85 MR |
739 | } |
740 | ||
b318b824 | 741 | static void cherryview_irq_reset(struct drm_i915_private *dev_priv) |
43f328d7 | 742 | { |
007232f6 | 743 | struct intel_display *display = &dev_priv->display; |
b16b2a2f | 744 | struct intel_uncore *uncore = &dev_priv->uncore; |
43f328d7 | 745 | |
e58c2cac | 746 | intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); |
2939eb06 | 747 | intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); |
43f328d7 | 748 | |
2cbc876d | 749 | gen8_gt_irq_reset(to_gt(dev_priv)); |
43f328d7 | 750 | |
750a9540 | 751 | gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); |
43f328d7 | 752 | |
007232f6 | 753 | vlv_display_irq_reset(display); |
43f328d7 VS |
754 | } |
755 | ||
da38ba98 | 756 | static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) |
2ea63927 | 757 | { |
007232f6 JN |
758 | struct intel_display *display = &dev_priv->display; |
759 | ||
2cbc876d | 760 | gen5_gt_irq_postinstall(to_gt(dev_priv)); |
a9922912 | 761 | |
007232f6 | 762 | ilk_de_irq_postinstall(display); |
036a4a7d ZW |
763 | } |
764 | ||
b318b824 | 765 | static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) |
0e6c9a9e | 766 | { |
007232f6 JN |
767 | struct intel_display *display = &dev_priv->display; |
768 | ||
2cbc876d | 769 | gen5_gt_irq_postinstall(to_gt(dev_priv)); |
7e231dbe | 770 | |
007232f6 | 771 | vlv_display_irq_postinstall(display); |
ad22d106 | 772 | |
2939eb06 JN |
773 | intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
774 | intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); | |
20afbda2 DV |
775 | } |
776 | ||
b318b824 | 777 | static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) |
abd58f01 | 778 | { |
007232f6 JN |
779 | struct intel_display *display = &dev_priv->display; |
780 | ||
2cbc876d | 781 | gen8_gt_irq_postinstall(to_gt(dev_priv)); |
007232f6 | 782 | gen8_de_irq_postinstall(display); |
abd58f01 | 783 | |
72e9abc3 | 784 | gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore)); |
abd58f01 BW |
785 | } |
786 | ||
b318b824 | 787 | static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) |
51951ae7 | 788 | { |
007232f6 | 789 | struct intel_display *display = &dev_priv->display; |
2cbc876d | 790 | struct intel_gt *gt = to_gt(dev_priv); |
fd4d7904 | 791 | struct intel_uncore *uncore = gt->uncore; |
df0d28c1 | 792 | u32 gu_misc_masked = GEN11_GU_MISC_GSE; |
51951ae7 | 793 | |
fd4d7904 | 794 | gen11_gt_irq_postinstall(gt); |
007232f6 | 795 | gen11_de_irq_postinstall(display); |
51951ae7 | 796 | |
750a9540 | 797 | gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); |
df0d28c1 | 798 | |
72e9abc3 | 799 | gen11_master_intr_enable(intel_uncore_regs(uncore)); |
22e26af7 PZ |
800 | intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); |
801 | } | |
802 | ||
803 | static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) | |
804 | { | |
007232f6 | 805 | struct intel_display *display = &dev_priv->display; |
d1f3b5e9 | 806 | struct intel_uncore *uncore = &dev_priv->uncore; |
22e26af7 | 807 | u32 gu_misc_masked = GEN11_GU_MISC_GSE; |
d1f3b5e9 AS |
808 | struct intel_gt *gt; |
809 | unsigned int i; | |
22e26af7 | 810 | |
d1f3b5e9 AS |
811 | for_each_gt(gt, dev_priv, i) |
812 | gen11_gt_irq_postinstall(gt); | |
22e26af7 | 813 | |
750a9540 | 814 | gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); |
22e26af7 | 815 | |
007232f6 | 816 | dg1_de_irq_postinstall(display); |
22e26af7 | 817 | |
72e9abc3 | 818 | dg1_master_intr_enable(intel_uncore_regs(uncore)); |
fd4d7904 | 819 | intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); |
51951ae7 MK |
820 | } |
821 | ||
b318b824 | 822 | static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) |
43f328d7 | 823 | { |
007232f6 JN |
824 | struct intel_display *display = &dev_priv->display; |
825 | ||
2cbc876d | 826 | gen8_gt_irq_postinstall(to_gt(dev_priv)); |
43f328d7 | 827 | |
007232f6 | 828 | vlv_display_irq_postinstall(display); |
ad22d106 | 829 | |
2939eb06 JN |
830 | intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
831 | intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); | |
43f328d7 VS |
832 | } |
833 | ||
3687ce75 VS |
834 | static u32 i9xx_error_mask(struct drm_i915_private *i915) |
835 | { | |
e7e12f6e VS |
836 | /* |
837 | * On gen2/3 FBC generates (seemingly spurious) | |
838 | * display INVALID_GTT/INVALID_GTT_PTE table errors. | |
839 | * | |
840 | * Also gen3 bspec has this to say: | |
841 | * "DISPA_INVALID_GTT_PTE | |
842 | " [DevNapa] : Reserved. This bit does not reflect the page | |
843 | " table error for the display plane A." | |
844 | * | |
845 | * Unfortunately we can't mask off individual PGTBL_ER bits, | |
846 | * so we just have to mask off all page table errors via EMR. | |
847 | */ | |
848 | if (HAS_FBC(i915)) | |
b6cfae8d | 849 | return I915_ERROR_MEMORY_REFRESH; |
e7e12f6e | 850 | else |
b6cfae8d VS |
851 | return I915_ERROR_PAGE_TABLE | |
852 | I915_ERROR_MEMORY_REFRESH; | |
3687ce75 VS |
853 | } |
854 | ||
78c357dd VS |
855 | static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, |
856 | u32 *eir, u32 *eir_stuck) | |
857 | { | |
858 | u32 emr; | |
859 | ||
839259b8 VS |
860 | *eir = intel_uncore_read(&dev_priv->uncore, EIR); |
861 | intel_uncore_write(&dev_priv->uncore, EIR, *eir); | |
78c357dd | 862 | |
2939eb06 | 863 | *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); |
78c357dd VS |
864 | if (*eir_stuck == 0) |
865 | return; | |
866 | ||
867 | /* | |
868 | * Toggle all EMR bits to make sure we get an edge | |
869 | * in the ISR master error bit if we don't clear | |
870 | * all the EIR bits. Otherwise the edge triggered | |
871 | * IIR on i965/g4x wouldn't notice that an interrupt | |
872 | * is still pending. Also some EIR bits can't be | |
873 | * cleared except by handling the underlying error | |
874 | * (or by a GPU reset) so we mask any bit that | |
875 | * remains set. | |
876 | */ | |
839259b8 VS |
877 | emr = intel_uncore_read(&dev_priv->uncore, EMR); |
878 | intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); | |
2939eb06 | 879 | intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); |
78c357dd VS |
880 | } |
881 | ||
882 | static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, | |
883 | u32 eir, u32 eir_stuck) | |
884 | { | |
a10234fd | 885 | drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir); |
78c357dd VS |
886 | |
887 | if (eir_stuck) | |
00376ccf WK |
888 | drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", |
889 | eir_stuck); | |
d1e89592 VS |
890 | |
891 | drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", | |
892 | intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); | |
78c357dd VS |
893 | } |
894 | ||
b318b824 | 895 | static void i915_irq_reset(struct drm_i915_private *dev_priv) |
a266c7d5 | 896 | { |
007232f6 | 897 | struct intel_display *display = &dev_priv->display; |
b16b2a2f | 898 | struct intel_uncore *uncore = &dev_priv->uncore; |
a266c7d5 | 899 | |
007232f6 | 900 | i9xx_display_irq_reset(display); |
44d9241e | 901 | |
474e1cd6 | 902 | gen2_error_reset(uncore, GEN2_ERROR_REGS); |
750a9540 | 903 | gen2_irq_reset(uncore, GEN2_IRQ_REGS); |
e44adb5d | 904 | dev_priv->irq_mask = ~0u; |
a266c7d5 CW |
905 | } |
906 | ||
b318b824 | 907 | static void i915_irq_postinstall(struct drm_i915_private *dev_priv) |
a266c7d5 | 908 | { |
007232f6 | 909 | struct intel_display *display = &dev_priv->display; |
b16b2a2f | 910 | struct intel_uncore *uncore = &dev_priv->uncore; |
38bde180 | 911 | u32 enable_mask; |
a266c7d5 | 912 | |
b6cfae8d | 913 | gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv)); |
38bde180 | 914 | |
38bde180 | 915 | dev_priv->irq_mask = |
8c827853 | 916 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
16659bc5 VS |
917 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
918 | I915_MASTER_ERROR_INTERRUPT); | |
38bde180 CW |
919 | |
920 | enable_mask = | |
38bde180 CW |
921 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
922 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
16659bc5 | 923 | I915_MASTER_ERROR_INTERRUPT | |
38bde180 CW |
924 | I915_USER_INTERRUPT; |
925 | ||
8c827853 VS |
926 | if (DISPLAY_VER(dev_priv) >= 3) { |
927 | dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; | |
928 | enable_mask |= I915_ASLE_INTERRUPT; | |
929 | } | |
930 | ||
09b9563e | 931 | if (HAS_HOTPLUG(dev_priv)) { |
a266c7d5 | 932 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
3bda3b66 | 933 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
a266c7d5 CW |
934 | } |
935 | ||
750a9540 | 936 | gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); |
a266c7d5 | 937 | |
4c05cef0 | 938 | i915_display_irq_postinstall(display); |
20afbda2 DV |
939 | } |
940 | ||
ff1f525e | 941 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
a266c7d5 | 942 | { |
b318b824 | 943 | struct drm_i915_private *dev_priv = arg; |
1e40b20e | 944 | struct intel_display *display = &dev_priv->display; |
af722d28 | 945 | irqreturn_t ret = IRQ_NONE; |
a266c7d5 | 946 | |
2dd2a883 ID |
947 | if (!intel_irqs_enabled(dev_priv)) |
948 | return IRQ_NONE; | |
949 | ||
1f814dac | 950 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
9102650f | 951 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 952 | |
38bde180 | 953 | do { |
eb64343c | 954 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
78c357dd | 955 | u32 eir = 0, eir_stuck = 0; |
af722d28 VS |
956 | u32 hotplug_status = 0; |
957 | u32 iir; | |
a266c7d5 | 958 | |
2939eb06 | 959 | iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); |
af722d28 VS |
960 | if (iir == 0) |
961 | break; | |
962 | ||
963 | ret = IRQ_HANDLED; | |
964 | ||
09b9563e | 965 | if (HAS_HOTPLUG(dev_priv) && |
af722d28 | 966 | iir & I915_DISPLAY_PORT_INTERRUPT) |
1e40b20e | 967 | hotplug_status = i9xx_hpd_irq_ack(display); |
a266c7d5 | 968 | |
eb64343c | 969 | /* Call regardless, as some status bits might not be |
17b018c2 | 970 | * signalled in IIR */ |
007232f6 | 971 | i9xx_pipestat_irq_ack(display, iir, pipe_stats); |
a266c7d5 | 972 | |
78c357dd VS |
973 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
974 | i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); | |
975 | ||
2939eb06 | 976 | intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); |
a266c7d5 | 977 | |
a266c7d5 | 978 | if (iir & I915_USER_INTERRUPT) |
2cbc876d | 979 | intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); |
a266c7d5 | 980 | |
78c357dd VS |
981 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
982 | i9xx_error_irq_handler(dev_priv, eir, eir_stuck); | |
a266c7d5 | 983 | |
af722d28 | 984 | if (hotplug_status) |
1e40b20e | 985 | i9xx_hpd_irq_handler(display, hotplug_status); |
af722d28 | 986 | |
007232f6 | 987 | i915_pipestat_irq_handler(display, iir, pipe_stats); |
af722d28 | 988 | } while (0); |
a266c7d5 | 989 | |
9c6508b9 TG |
990 | pmu_irq_stats(dev_priv, ret); |
991 | ||
9102650f | 992 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 993 | |
a266c7d5 CW |
994 | return ret; |
995 | } | |
996 | ||
b318b824 | 997 | static void i965_irq_reset(struct drm_i915_private *dev_priv) |
a266c7d5 | 998 | { |
007232f6 | 999 | struct intel_display *display = &dev_priv->display; |
b16b2a2f | 1000 | struct intel_uncore *uncore = &dev_priv->uncore; |
a266c7d5 | 1001 | |
007232f6 | 1002 | i9xx_display_irq_reset(display); |
44d9241e | 1003 | |
474e1cd6 | 1004 | gen2_error_reset(uncore, GEN2_ERROR_REGS); |
750a9540 | 1005 | gen2_irq_reset(uncore, GEN2_IRQ_REGS); |
e44adb5d | 1006 | dev_priv->irq_mask = ~0u; |
a266c7d5 CW |
1007 | } |
1008 | ||
3687ce75 | 1009 | static u32 i965_error_mask(struct drm_i915_private *i915) |
a266c7d5 | 1010 | { |
045cebd2 VS |
1011 | /* |
1012 | * Enable some error detection, note the instruction error mask | |
1013 | * bit is reserved, so we leave it masked. | |
e7e12f6e VS |
1014 | * |
1015 | * i965 FBC no longer generates spurious GTT errors, | |
1016 | * so we can always enable the page table errors. | |
045cebd2 | 1017 | */ |
3687ce75 | 1018 | if (IS_G4X(i915)) |
b6cfae8d VS |
1019 | return GM45_ERROR_PAGE_TABLE | |
1020 | GM45_ERROR_MEM_PRIV | | |
1021 | GM45_ERROR_CP_PRIV | | |
1022 | I915_ERROR_MEMORY_REFRESH; | |
3687ce75 | 1023 | else |
b6cfae8d VS |
1024 | return I915_ERROR_PAGE_TABLE | |
1025 | I915_ERROR_MEMORY_REFRESH; | |
3687ce75 VS |
1026 | } |
1027 | ||
1028 | static void i965_irq_postinstall(struct drm_i915_private *dev_priv) | |
1029 | { | |
007232f6 | 1030 | struct intel_display *display = &dev_priv->display; |
3687ce75 VS |
1031 | struct intel_uncore *uncore = &dev_priv->uncore; |
1032 | u32 enable_mask; | |
1033 | ||
b6cfae8d | 1034 | gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv)); |
045cebd2 | 1035 | |
c30bb1fd VS |
1036 | dev_priv->irq_mask = |
1037 | ~(I915_ASLE_INTERRUPT | | |
1038 | I915_DISPLAY_PORT_INTERRUPT | | |
1039 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
1040 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
78c357dd | 1041 | I915_MASTER_ERROR_INTERRUPT); |
bbba0a97 | 1042 | |
c30bb1fd VS |
1043 | enable_mask = |
1044 | I915_ASLE_INTERRUPT | | |
1045 | I915_DISPLAY_PORT_INTERRUPT | | |
1046 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
1047 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
78c357dd | 1048 | I915_MASTER_ERROR_INTERRUPT | |
c30bb1fd | 1049 | I915_USER_INTERRUPT; |
bbba0a97 | 1050 | |
91d14251 | 1051 | if (IS_G4X(dev_priv)) |
bbba0a97 | 1052 | enable_mask |= I915_BSD_USER_INTERRUPT; |
a266c7d5 | 1053 | |
750a9540 | 1054 | gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); |
c30bb1fd | 1055 | |
0c61417b | 1056 | i965_display_irq_postinstall(display); |
20afbda2 DV |
1057 | } |
1058 | ||
ff1f525e | 1059 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
a266c7d5 | 1060 | { |
b318b824 | 1061 | struct drm_i915_private *dev_priv = arg; |
1e40b20e | 1062 | struct intel_display *display = &dev_priv->display; |
af722d28 | 1063 | irqreturn_t ret = IRQ_NONE; |
a266c7d5 | 1064 | |
2dd2a883 ID |
1065 | if (!intel_irqs_enabled(dev_priv)) |
1066 | return IRQ_NONE; | |
1067 | ||
1f814dac | 1068 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
9102650f | 1069 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1070 | |
af722d28 | 1071 | do { |
eb64343c | 1072 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
78c357dd | 1073 | u32 eir = 0, eir_stuck = 0; |
af722d28 VS |
1074 | u32 hotplug_status = 0; |
1075 | u32 iir; | |
a266c7d5 | 1076 | |
2939eb06 | 1077 | iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); |
af722d28 | 1078 | if (iir == 0) |
a266c7d5 CW |
1079 | break; |
1080 | ||
1081 | ret = IRQ_HANDLED; | |
1082 | ||
af722d28 | 1083 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
1e40b20e | 1084 | hotplug_status = i9xx_hpd_irq_ack(display); |
af722d28 VS |
1085 | |
1086 | /* Call regardless, as some status bits might not be | |
17b018c2 | 1087 | * signalled in IIR */ |
007232f6 | 1088 | i9xx_pipestat_irq_ack(display, iir, pipe_stats); |
a266c7d5 | 1089 | |
78c357dd VS |
1090 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1091 | i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); | |
1092 | ||
2939eb06 | 1093 | intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); |
a266c7d5 | 1094 | |
a266c7d5 | 1095 | if (iir & I915_USER_INTERRUPT) |
2cbc876d | 1096 | intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], |
0669a6e1 | 1097 | iir); |
af722d28 | 1098 | |
a266c7d5 | 1099 | if (iir & I915_BSD_USER_INTERRUPT) |
2cbc876d | 1100 | intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], |
0669a6e1 | 1101 | iir >> 25); |
a266c7d5 | 1102 | |
78c357dd VS |
1103 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1104 | i9xx_error_irq_handler(dev_priv, eir, eir_stuck); | |
515ac2bb | 1105 | |
af722d28 | 1106 | if (hotplug_status) |
1e40b20e | 1107 | i9xx_hpd_irq_handler(display, hotplug_status); |
af722d28 | 1108 | |
007232f6 | 1109 | i965_pipestat_irq_handler(display, iir, pipe_stats); |
af722d28 | 1110 | } while (0); |
a266c7d5 | 1111 | |
9c6508b9 TG |
1112 | pmu_irq_stats(dev_priv, IRQ_HANDLED); |
1113 | ||
9102650f | 1114 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1115 | |
a266c7d5 CW |
1116 | return ret; |
1117 | } | |
1118 | ||
fca52a55 DV |
1119 | /** |
1120 | * intel_irq_init - initializes irq support | |
1121 | * @dev_priv: i915 device instance | |
1122 | * | |
1123 | * This function initializes all the irq support including work items, timers | |
1124 | * and all the vtables. It does not setup the interrupt itself though. | |
1125 | */ | |
b963291c | 1126 | void intel_irq_init(struct drm_i915_private *dev_priv) |
f71d4af4 | 1127 | { |
cefcff8f | 1128 | int i; |
8b2e326d | 1129 | |
74bb98ba | 1130 | INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); |
cefcff8f JL |
1131 | for (i = 0; i < MAX_L3_SLICES; ++i) |
1132 | dev_priv->l3_parity.remap_info[i] = NULL; | |
8b2e326d | 1133 | |
633023a4 | 1134 | /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ |
651e7d48 | 1135 | if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) |
2cbc876d | 1136 | to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; |
f71d4af4 | 1137 | } |
20afbda2 | 1138 | |
cefcff8f JL |
1139 | /** |
1140 | * intel_irq_fini - deinitializes IRQ support | |
1141 | * @i915: i915 device instance | |
1142 | * | |
1143 | * This function deinitializes all the IRQ support. | |
1144 | */ | |
1145 | void intel_irq_fini(struct drm_i915_private *i915) | |
1146 | { | |
1147 | int i; | |
1148 | ||
1149 | for (i = 0; i < MAX_L3_SLICES; ++i) | |
1150 | kfree(i915->l3_parity.remap_info[i]); | |
1151 | } | |
1152 | ||
b318b824 VS |
1153 | static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) |
1154 | { | |
1155 | if (HAS_GMCH(dev_priv)) { | |
1156 | if (IS_CHERRYVIEW(dev_priv)) | |
1157 | return cherryview_irq_handler; | |
1158 | else if (IS_VALLEYVIEW(dev_priv)) | |
1159 | return valleyview_irq_handler; | |
651e7d48 | 1160 | else if (GRAPHICS_VER(dev_priv) == 4) |
b318b824 | 1161 | return i965_irq_handler; |
b318b824 | 1162 | else |
8c827853 | 1163 | return i915_irq_handler; |
b318b824 | 1164 | } else { |
22e26af7 | 1165 | if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) |
97b492f5 | 1166 | return dg1_irq_handler; |
22e26af7 | 1167 | else if (GRAPHICS_VER(dev_priv) >= 11) |
b318b824 | 1168 | return gen11_irq_handler; |
651e7d48 | 1169 | else if (GRAPHICS_VER(dev_priv) >= 8) |
b318b824 VS |
1170 | return gen8_irq_handler; |
1171 | else | |
9eae5e27 | 1172 | return ilk_irq_handler; |
b318b824 VS |
1173 | } |
1174 | } | |
1175 | ||
1176 | static void intel_irq_reset(struct drm_i915_private *dev_priv) | |
1177 | { | |
1178 | if (HAS_GMCH(dev_priv)) { | |
1179 | if (IS_CHERRYVIEW(dev_priv)) | |
1180 | cherryview_irq_reset(dev_priv); | |
1181 | else if (IS_VALLEYVIEW(dev_priv)) | |
1182 | valleyview_irq_reset(dev_priv); | |
651e7d48 | 1183 | else if (GRAPHICS_VER(dev_priv) == 4) |
b318b824 | 1184 | i965_irq_reset(dev_priv); |
b318b824 | 1185 | else |
8c827853 | 1186 | i915_irq_reset(dev_priv); |
b318b824 | 1187 | } else { |
22e26af7 PZ |
1188 | if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) |
1189 | dg1_irq_reset(dev_priv); | |
1190 | else if (GRAPHICS_VER(dev_priv) >= 11) | |
b318b824 | 1191 | gen11_irq_reset(dev_priv); |
651e7d48 | 1192 | else if (GRAPHICS_VER(dev_priv) >= 8) |
b318b824 VS |
1193 | gen8_irq_reset(dev_priv); |
1194 | else | |
9eae5e27 | 1195 | ilk_irq_reset(dev_priv); |
b318b824 VS |
1196 | } |
1197 | } | |
1198 | ||
1199 | static void intel_irq_postinstall(struct drm_i915_private *dev_priv) | |
1200 | { | |
1201 | if (HAS_GMCH(dev_priv)) { | |
1202 | if (IS_CHERRYVIEW(dev_priv)) | |
1203 | cherryview_irq_postinstall(dev_priv); | |
1204 | else if (IS_VALLEYVIEW(dev_priv)) | |
1205 | valleyview_irq_postinstall(dev_priv); | |
651e7d48 | 1206 | else if (GRAPHICS_VER(dev_priv) == 4) |
b318b824 | 1207 | i965_irq_postinstall(dev_priv); |
b318b824 | 1208 | else |
8c827853 | 1209 | i915_irq_postinstall(dev_priv); |
b318b824 | 1210 | } else { |
22e26af7 PZ |
1211 | if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) |
1212 | dg1_irq_postinstall(dev_priv); | |
1213 | else if (GRAPHICS_VER(dev_priv) >= 11) | |
b318b824 | 1214 | gen11_irq_postinstall(dev_priv); |
651e7d48 | 1215 | else if (GRAPHICS_VER(dev_priv) >= 8) |
b318b824 VS |
1216 | gen8_irq_postinstall(dev_priv); |
1217 | else | |
9eae5e27 | 1218 | ilk_irq_postinstall(dev_priv); |
b318b824 VS |
1219 | } |
1220 | } | |
1221 | ||
fca52a55 DV |
1222 | /** |
1223 | * intel_irq_install - enables the hardware interrupt | |
1224 | * @dev_priv: i915 device instance | |
1225 | * | |
1226 | * This function enables the hardware interrupt handling, but leaves the hotplug | |
1227 | * handling still disabled. It is called after intel_irq_init(). | |
1228 | * | |
1229 | * In the driver load and resume code we need working interrupts in a few places | |
1230 | * but don't want to deal with the hassle of concurrent probe and hotplug | |
1231 | * workers. Hence the split into this two-stage approach. | |
1232 | */ | |
2aeb7d3a DV |
1233 | int intel_irq_install(struct drm_i915_private *dev_priv) |
1234 | { | |
8ff5446a | 1235 | int irq = to_pci_dev(dev_priv->drm.dev)->irq; |
b318b824 VS |
1236 | int ret; |
1237 | ||
2aeb7d3a DV |
1238 | /* |
1239 | * We enable some interrupt sources in our postinstall hooks, so mark | |
1240 | * interrupts as enabled _before_ actually enabling them to avoid | |
1241 | * special cases in our ordering checks. | |
1242 | */ | |
acc7a9b2 | 1243 | dev_priv->irqs_enabled = true; |
2aeb7d3a | 1244 | |
b318b824 VS |
1245 | intel_irq_reset(dev_priv); |
1246 | ||
1247 | ret = request_irq(irq, intel_irq_handler(dev_priv), | |
1248 | IRQF_SHARED, DRIVER_NAME, dev_priv); | |
1249 | if (ret < 0) { | |
acc7a9b2 | 1250 | dev_priv->irqs_enabled = false; |
b318b824 VS |
1251 | return ret; |
1252 | } | |
1253 | ||
1254 | intel_irq_postinstall(dev_priv); | |
1255 | ||
1256 | return ret; | |
2aeb7d3a DV |
1257 | } |
1258 | ||
fca52a55 | 1259 | /** |
381ab12d | 1260 | * intel_irq_uninstall - finalizes all irq handling |
fca52a55 DV |
1261 | * @dev_priv: i915 device instance |
1262 | * | |
1263 | * This stops interrupt and hotplug handling and unregisters and frees all | |
1264 | * resources acquired in the init functions. | |
1265 | */ | |
2aeb7d3a DV |
1266 | void intel_irq_uninstall(struct drm_i915_private *dev_priv) |
1267 | { | |
4cd502aa | 1268 | struct intel_display *display = &dev_priv->display; |
8ff5446a | 1269 | int irq = to_pci_dev(dev_priv->drm.dev)->irq; |
b318b824 | 1270 | |
0b7e9ddb | 1271 | if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled)) |
b318b824 VS |
1272 | return; |
1273 | ||
b318b824 VS |
1274 | intel_irq_reset(dev_priv); |
1275 | ||
1276 | free_irq(irq, dev_priv); | |
1277 | ||
4cd502aa | 1278 | intel_hpd_cancel_work(display); |
acc7a9b2 | 1279 | dev_priv->irqs_enabled = false; |
2aeb7d3a DV |
1280 | } |
1281 | ||
fca52a55 | 1282 | /** |
3de5774c RV |
1283 | * intel_irq_suspend - Suspend interrupts |
1284 | * @i915: i915 device instance | |
fca52a55 | 1285 | * |
3de5774c | 1286 | * This function is used to disable interrupts at runtime. |
fca52a55 | 1287 | */ |
3de5774c | 1288 | void intel_irq_suspend(struct drm_i915_private *i915) |
c67a470b | 1289 | { |
3de5774c RV |
1290 | intel_irq_reset(i915); |
1291 | i915->irqs_enabled = false; | |
1292 | intel_synchronize_irq(i915); | |
c67a470b PZ |
1293 | } |
1294 | ||
fca52a55 | 1295 | /** |
3de5774c RV |
1296 | * intel_irq_resume - Resume interrupts |
1297 | * @i915: i915 device instance | |
fca52a55 | 1298 | * |
3de5774c | 1299 | * This function is used to enable interrupts at runtime. |
fca52a55 | 1300 | */ |
3de5774c | 1301 | void intel_irq_resume(struct drm_i915_private *i915) |
c67a470b | 1302 | { |
3de5774c RV |
1303 | i915->irqs_enabled = true; |
1304 | intel_irq_reset(i915); | |
1305 | intel_irq_postinstall(i915); | |
c67a470b | 1306 | } |
d64575ee JN |
1307 | |
1308 | bool intel_irqs_enabled(struct drm_i915_private *dev_priv) | |
1309 | { | |
acc7a9b2 | 1310 | return dev_priv->irqs_enabled; |
d64575ee JN |
1311 | } |
1312 | ||
1313 | void intel_synchronize_irq(struct drm_i915_private *i915) | |
1314 | { | |
8ff5446a | 1315 | synchronize_irq(to_pci_dev(i915->drm.dev)->irq); |
d64575ee | 1316 | } |
320ad343 TZ |
1317 | |
1318 | void intel_synchronize_hardirq(struct drm_i915_private *i915) | |
1319 | { | |
1320 | synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); | |
1321 | } |