Commit | Line | Data |
---|---|---|
0d6aa60b | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
1da177e4 | 2 | */ |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 | 28 | |
a70491cc JP |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | ||
63eeaf38 | 31 | #include <linux/sysrq.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
760285e7 DH |
33 | #include <drm/drmP.h> |
34 | #include <drm/i915_drm.h> | |
1da177e4 | 35 | #include "i915_drv.h" |
1c5d22f7 | 36 | #include "i915_trace.h" |
79e53945 | 37 | #include "intel_drv.h" |
1da177e4 | 38 | |
e5868a31 EE |
39 | static const u32 hpd_ibx[] = { |
40 | [HPD_CRT] = SDE_CRT_HOTPLUG, | |
41 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | |
42 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | |
43 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | |
44 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | |
45 | }; | |
46 | ||
47 | static const u32 hpd_cpt[] = { | |
48 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | |
73c352a2 | 49 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
e5868a31 EE |
50 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
51 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | |
52 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | |
53 | }; | |
54 | ||
55 | static const u32 hpd_mask_i915[] = { | |
56 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | |
57 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | |
58 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | |
59 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | |
60 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | |
61 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | |
62 | }; | |
63 | ||
64 | static const u32 hpd_status_gen4[] = { | |
65 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
66 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | |
67 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | |
68 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
69 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
70 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
71 | }; | |
72 | ||
e5868a31 EE |
73 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
74 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
75 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | |
76 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | |
77 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
78 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
79 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
80 | }; | |
81 | ||
036a4a7d | 82 | /* For display hotplug interrupt */ |
995b6762 | 83 | static void |
f2b115e6 | 84 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
036a4a7d | 85 | { |
4bc9d430 DV |
86 | assert_spin_locked(&dev_priv->irq_lock); |
87 | ||
1ec14ad3 CW |
88 | if ((dev_priv->irq_mask & mask) != 0) { |
89 | dev_priv->irq_mask &= ~mask; | |
90 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 91 | POSTING_READ(DEIMR); |
036a4a7d ZW |
92 | } |
93 | } | |
94 | ||
0ff9800a | 95 | static void |
f2b115e6 | 96 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
036a4a7d | 97 | { |
4bc9d430 DV |
98 | assert_spin_locked(&dev_priv->irq_lock); |
99 | ||
1ec14ad3 CW |
100 | if ((dev_priv->irq_mask & mask) != mask) { |
101 | dev_priv->irq_mask |= mask; | |
102 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 103 | POSTING_READ(DEIMR); |
036a4a7d ZW |
104 | } |
105 | } | |
106 | ||
8664281b PZ |
107 | static bool ivb_can_enable_err_int(struct drm_device *dev) |
108 | { | |
109 | struct drm_i915_private *dev_priv = dev->dev_private; | |
110 | struct intel_crtc *crtc; | |
111 | enum pipe pipe; | |
112 | ||
4bc9d430 DV |
113 | assert_spin_locked(&dev_priv->irq_lock); |
114 | ||
8664281b PZ |
115 | for_each_pipe(pipe) { |
116 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
117 | ||
118 | if (crtc->cpu_fifo_underrun_disabled) | |
119 | return false; | |
120 | } | |
121 | ||
122 | return true; | |
123 | } | |
124 | ||
125 | static bool cpt_can_enable_serr_int(struct drm_device *dev) | |
126 | { | |
127 | struct drm_i915_private *dev_priv = dev->dev_private; | |
128 | enum pipe pipe; | |
129 | struct intel_crtc *crtc; | |
130 | ||
fee884ed DV |
131 | assert_spin_locked(&dev_priv->irq_lock); |
132 | ||
8664281b PZ |
133 | for_each_pipe(pipe) { |
134 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
135 | ||
136 | if (crtc->pch_fifo_underrun_disabled) | |
137 | return false; | |
138 | } | |
139 | ||
140 | return true; | |
141 | } | |
142 | ||
143 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, | |
144 | enum pipe pipe, bool enable) | |
145 | { | |
146 | struct drm_i915_private *dev_priv = dev->dev_private; | |
147 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : | |
148 | DE_PIPEB_FIFO_UNDERRUN; | |
149 | ||
150 | if (enable) | |
151 | ironlake_enable_display_irq(dev_priv, bit); | |
152 | else | |
153 | ironlake_disable_display_irq(dev_priv, bit); | |
154 | } | |
155 | ||
156 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, | |
7336df65 | 157 | enum pipe pipe, bool enable) |
8664281b PZ |
158 | { |
159 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8664281b | 160 | if (enable) { |
7336df65 DV |
161 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); |
162 | ||
8664281b PZ |
163 | if (!ivb_can_enable_err_int(dev)) |
164 | return; | |
165 | ||
8664281b PZ |
166 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
167 | } else { | |
7336df65 DV |
168 | bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); |
169 | ||
170 | /* Change the state _after_ we've read out the current one. */ | |
8664281b | 171 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
7336df65 DV |
172 | |
173 | if (!was_enabled && | |
174 | (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { | |
175 | DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", | |
176 | pipe_name(pipe)); | |
177 | } | |
8664281b PZ |
178 | } |
179 | } | |
180 | ||
fee884ed DV |
181 | /** |
182 | * ibx_display_interrupt_update - update SDEIMR | |
183 | * @dev_priv: driver private | |
184 | * @interrupt_mask: mask of interrupt bits to update | |
185 | * @enabled_irq_mask: mask of interrupt bits to enable | |
186 | */ | |
187 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |
188 | uint32_t interrupt_mask, | |
189 | uint32_t enabled_irq_mask) | |
190 | { | |
191 | uint32_t sdeimr = I915_READ(SDEIMR); | |
192 | sdeimr &= ~interrupt_mask; | |
193 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | |
194 | ||
195 | assert_spin_locked(&dev_priv->irq_lock); | |
196 | ||
197 | I915_WRITE(SDEIMR, sdeimr); | |
198 | POSTING_READ(SDEIMR); | |
199 | } | |
200 | #define ibx_enable_display_interrupt(dev_priv, bits) \ | |
201 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) | |
202 | #define ibx_disable_display_interrupt(dev_priv, bits) \ | |
203 | ibx_display_interrupt_update((dev_priv), (bits), 0) | |
204 | ||
de28075d DV |
205 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, |
206 | enum transcoder pch_transcoder, | |
8664281b PZ |
207 | bool enable) |
208 | { | |
8664281b | 209 | struct drm_i915_private *dev_priv = dev->dev_private; |
de28075d DV |
210 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? |
211 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; | |
8664281b PZ |
212 | |
213 | if (enable) | |
fee884ed | 214 | ibx_enable_display_interrupt(dev_priv, bit); |
8664281b | 215 | else |
fee884ed | 216 | ibx_disable_display_interrupt(dev_priv, bit); |
8664281b PZ |
217 | } |
218 | ||
219 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, | |
220 | enum transcoder pch_transcoder, | |
221 | bool enable) | |
222 | { | |
223 | struct drm_i915_private *dev_priv = dev->dev_private; | |
224 | ||
225 | if (enable) { | |
1dd246fb DV |
226 | I915_WRITE(SERR_INT, |
227 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); | |
228 | ||
8664281b PZ |
229 | if (!cpt_can_enable_serr_int(dev)) |
230 | return; | |
231 | ||
fee884ed | 232 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
8664281b | 233 | } else { |
1dd246fb DV |
234 | uint32_t tmp = I915_READ(SERR_INT); |
235 | bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); | |
236 | ||
237 | /* Change the state _after_ we've read out the current one. */ | |
fee884ed | 238 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
1dd246fb DV |
239 | |
240 | if (!was_enabled && | |
241 | (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { | |
242 | DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", | |
243 | transcoder_name(pch_transcoder)); | |
244 | } | |
8664281b | 245 | } |
8664281b PZ |
246 | } |
247 | ||
248 | /** | |
249 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
250 | * @dev: drm device | |
251 | * @pipe: pipe | |
252 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
253 | * | |
254 | * This function makes us disable or enable CPU fifo underruns for a specific | |
255 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun | |
256 | * reporting for one pipe may also disable all the other CPU error interruts for | |
257 | * the other pipes, due to the fact that there's just one interrupt mask/enable | |
258 | * bit for all the pipes. | |
259 | * | |
260 | * Returns the previous state of underrun reporting. | |
261 | */ | |
262 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |
263 | enum pipe pipe, bool enable) | |
264 | { | |
265 | struct drm_i915_private *dev_priv = dev->dev_private; | |
266 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
267 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
268 | unsigned long flags; | |
269 | bool ret; | |
270 | ||
271 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
272 | ||
273 | ret = !intel_crtc->cpu_fifo_underrun_disabled; | |
274 | ||
275 | if (enable == ret) | |
276 | goto done; | |
277 | ||
278 | intel_crtc->cpu_fifo_underrun_disabled = !enable; | |
279 | ||
280 | if (IS_GEN5(dev) || IS_GEN6(dev)) | |
281 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); | |
282 | else if (IS_GEN7(dev)) | |
7336df65 | 283 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); |
8664281b PZ |
284 | |
285 | done: | |
286 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
287 | return ret; | |
288 | } | |
289 | ||
290 | /** | |
291 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
292 | * @dev: drm device | |
293 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) | |
294 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
295 | * | |
296 | * This function makes us disable or enable PCH fifo underruns for a specific | |
297 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO | |
298 | * underrun reporting for one transcoder may also disable all the other PCH | |
299 | * error interruts for the other transcoders, due to the fact that there's just | |
300 | * one interrupt mask/enable bit for all the transcoders. | |
301 | * | |
302 | * Returns the previous state of underrun reporting. | |
303 | */ | |
304 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | |
305 | enum transcoder pch_transcoder, | |
306 | bool enable) | |
307 | { | |
308 | struct drm_i915_private *dev_priv = dev->dev_private; | |
de28075d DV |
309 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; |
310 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
8664281b PZ |
311 | unsigned long flags; |
312 | bool ret; | |
313 | ||
de28075d DV |
314 | /* |
315 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT | |
316 | * has only one pch transcoder A that all pipes can use. To avoid racy | |
317 | * pch transcoder -> pipe lookups from interrupt code simply store the | |
318 | * underrun statistics in crtc A. Since we never expose this anywhere | |
319 | * nor use it outside of the fifo underrun code here using the "wrong" | |
320 | * crtc on LPT won't cause issues. | |
321 | */ | |
8664281b PZ |
322 | |
323 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
324 | ||
325 | ret = !intel_crtc->pch_fifo_underrun_disabled; | |
326 | ||
327 | if (enable == ret) | |
328 | goto done; | |
329 | ||
330 | intel_crtc->pch_fifo_underrun_disabled = !enable; | |
331 | ||
332 | if (HAS_PCH_IBX(dev)) | |
de28075d | 333 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
8664281b PZ |
334 | else |
335 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); | |
336 | ||
337 | done: | |
338 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
339 | return ret; | |
340 | } | |
341 | ||
342 | ||
7c463586 KP |
343 | void |
344 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
345 | { | |
46c06a30 VS |
346 | u32 reg = PIPESTAT(pipe); |
347 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | |
7c463586 | 348 | |
b79480ba DV |
349 | assert_spin_locked(&dev_priv->irq_lock); |
350 | ||
46c06a30 VS |
351 | if ((pipestat & mask) == mask) |
352 | return; | |
353 | ||
354 | /* Enable the interrupt, clear any pending status */ | |
355 | pipestat |= mask | (mask >> 16); | |
356 | I915_WRITE(reg, pipestat); | |
357 | POSTING_READ(reg); | |
7c463586 KP |
358 | } |
359 | ||
360 | void | |
361 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
362 | { | |
46c06a30 VS |
363 | u32 reg = PIPESTAT(pipe); |
364 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | |
7c463586 | 365 | |
b79480ba DV |
366 | assert_spin_locked(&dev_priv->irq_lock); |
367 | ||
46c06a30 VS |
368 | if ((pipestat & mask) == 0) |
369 | return; | |
370 | ||
371 | pipestat &= ~mask; | |
372 | I915_WRITE(reg, pipestat); | |
373 | POSTING_READ(reg); | |
7c463586 KP |
374 | } |
375 | ||
01c66889 | 376 | /** |
f49e38dd | 377 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
01c66889 | 378 | */ |
f49e38dd | 379 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
01c66889 | 380 | { |
1ec14ad3 CW |
381 | drm_i915_private_t *dev_priv = dev->dev_private; |
382 | unsigned long irqflags; | |
383 | ||
f49e38dd JN |
384 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
385 | return; | |
386 | ||
1ec14ad3 | 387 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
01c66889 | 388 | |
f898780b JN |
389 | i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); |
390 | if (INTEL_INFO(dev)->gen >= 4) | |
391 | i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); | |
1ec14ad3 CW |
392 | |
393 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
01c66889 ZY |
394 | } |
395 | ||
0a3e67a4 JB |
396 | /** |
397 | * i915_pipe_enabled - check if a pipe is enabled | |
398 | * @dev: DRM device | |
399 | * @pipe: pipe to check | |
400 | * | |
401 | * Reading certain registers when the pipe is disabled can hang the chip. | |
402 | * Use this routine to make sure the PLL is running and the pipe is active | |
403 | * before reading such registers if unsure. | |
404 | */ | |
405 | static int | |
406 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
407 | { | |
408 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
702e7a56 | 409 | |
a01025af DV |
410 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
411 | /* Locking is horribly broken here, but whatever. */ | |
412 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
413 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
71f8ba6b | 414 | |
a01025af DV |
415 | return intel_crtc->active; |
416 | } else { | |
417 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | |
418 | } | |
0a3e67a4 JB |
419 | } |
420 | ||
42f52ef8 KP |
421 | /* Called from drm generic code, passed a 'crtc', which |
422 | * we use as a pipe index | |
423 | */ | |
f71d4af4 | 424 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
425 | { |
426 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
427 | unsigned long high_frame; | |
428 | unsigned long low_frame; | |
5eddb70b | 429 | u32 high1, high2, low; |
0a3e67a4 JB |
430 | |
431 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 432 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 433 | "pipe %c\n", pipe_name(pipe)); |
0a3e67a4 JB |
434 | return 0; |
435 | } | |
436 | ||
9db4a9c7 JB |
437 | high_frame = PIPEFRAME(pipe); |
438 | low_frame = PIPEFRAMEPIXEL(pipe); | |
5eddb70b | 439 | |
0a3e67a4 JB |
440 | /* |
441 | * High & low register fields aren't synchronized, so make sure | |
442 | * we get a low value that's stable across two reads of the high | |
443 | * register. | |
444 | */ | |
445 | do { | |
5eddb70b CW |
446 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
447 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; | |
448 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
0a3e67a4 JB |
449 | } while (high1 != high2); |
450 | ||
5eddb70b CW |
451 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
452 | low >>= PIPE_FRAME_LOW_SHIFT; | |
453 | return (high1 << 8) | low; | |
0a3e67a4 JB |
454 | } |
455 | ||
f71d4af4 | 456 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
9880b7a5 JB |
457 | { |
458 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 459 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
9880b7a5 JB |
460 | |
461 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 462 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 463 | "pipe %c\n", pipe_name(pipe)); |
9880b7a5 JB |
464 | return 0; |
465 | } | |
466 | ||
467 | return I915_READ(reg); | |
468 | } | |
469 | ||
f71d4af4 | 470 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
0af7e4df MK |
471 | int *vpos, int *hpos) |
472 | { | |
473 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
474 | u32 vbl = 0, position = 0; | |
475 | int vbl_start, vbl_end, htotal, vtotal; | |
476 | bool in_vbl = true; | |
477 | int ret = 0; | |
fe2b8f9d PZ |
478 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
479 | pipe); | |
0af7e4df MK |
480 | |
481 | if (!i915_pipe_enabled(dev, pipe)) { | |
482 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | |
9db4a9c7 | 483 | "pipe %c\n", pipe_name(pipe)); |
0af7e4df MK |
484 | return 0; |
485 | } | |
486 | ||
487 | /* Get vtotal. */ | |
fe2b8f9d | 488 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
0af7e4df MK |
489 | |
490 | if (INTEL_INFO(dev)->gen >= 4) { | |
491 | /* No obvious pixelcount register. Only query vertical | |
492 | * scanout position from Display scan line register. | |
493 | */ | |
494 | position = I915_READ(PIPEDSL(pipe)); | |
495 | ||
496 | /* Decode into vertical scanout position. Don't have | |
497 | * horizontal scanout position. | |
498 | */ | |
499 | *vpos = position & 0x1fff; | |
500 | *hpos = 0; | |
501 | } else { | |
502 | /* Have access to pixelcount since start of frame. | |
503 | * We can split this into vertical and horizontal | |
504 | * scanout position. | |
505 | */ | |
506 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | |
507 | ||
fe2b8f9d | 508 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
0af7e4df MK |
509 | *vpos = position / htotal; |
510 | *hpos = position - (*vpos * htotal); | |
511 | } | |
512 | ||
513 | /* Query vblank area. */ | |
fe2b8f9d | 514 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
0af7e4df MK |
515 | |
516 | /* Test position against vblank region. */ | |
517 | vbl_start = vbl & 0x1fff; | |
518 | vbl_end = (vbl >> 16) & 0x1fff; | |
519 | ||
520 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | |
521 | in_vbl = false; | |
522 | ||
523 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | |
524 | if (in_vbl && (*vpos >= vbl_start)) | |
525 | *vpos = *vpos - vtotal; | |
526 | ||
527 | /* Readouts valid? */ | |
528 | if (vbl > 0) | |
529 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | |
530 | ||
531 | /* In vblank? */ | |
532 | if (in_vbl) | |
533 | ret |= DRM_SCANOUTPOS_INVBL; | |
534 | ||
535 | return ret; | |
536 | } | |
537 | ||
f71d4af4 | 538 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
0af7e4df MK |
539 | int *max_error, |
540 | struct timeval *vblank_time, | |
541 | unsigned flags) | |
542 | { | |
4041b853 | 543 | struct drm_crtc *crtc; |
0af7e4df | 544 | |
7eb552ae | 545 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
4041b853 | 546 | DRM_ERROR("Invalid crtc %d\n", pipe); |
0af7e4df MK |
547 | return -EINVAL; |
548 | } | |
549 | ||
550 | /* Get drm_crtc to timestamp: */ | |
4041b853 CW |
551 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
552 | if (crtc == NULL) { | |
553 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
554 | return -EINVAL; | |
555 | } | |
556 | ||
557 | if (!crtc->enabled) { | |
558 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
559 | return -EBUSY; | |
560 | } | |
0af7e4df MK |
561 | |
562 | /* Helper routine in DRM core does all the work: */ | |
4041b853 CW |
563 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
564 | vblank_time, flags, | |
565 | crtc); | |
0af7e4df MK |
566 | } |
567 | ||
321a1b30 EE |
568 | static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) |
569 | { | |
570 | enum drm_connector_status old_status; | |
571 | ||
572 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
573 | old_status = connector->status; | |
574 | ||
575 | connector->status = connector->funcs->detect(connector, false); | |
576 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", | |
577 | connector->base.id, | |
578 | drm_get_connector_name(connector), | |
579 | old_status, connector->status); | |
580 | return (old_status != connector->status); | |
581 | } | |
582 | ||
5ca58282 JB |
583 | /* |
584 | * Handle hotplug events outside the interrupt handler proper. | |
585 | */ | |
ac4c16c5 EE |
586 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
587 | ||
5ca58282 JB |
588 | static void i915_hotplug_work_func(struct work_struct *work) |
589 | { | |
590 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
591 | hotplug_work); | |
592 | struct drm_device *dev = dev_priv->dev; | |
c31c4ba3 | 593 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed EE |
594 | struct intel_connector *intel_connector; |
595 | struct intel_encoder *intel_encoder; | |
596 | struct drm_connector *connector; | |
597 | unsigned long irqflags; | |
598 | bool hpd_disabled = false; | |
321a1b30 | 599 | bool changed = false; |
142e2398 | 600 | u32 hpd_event_bits; |
4ef69c7a | 601 | |
52d7eced DV |
602 | /* HPD irq before everything is fully set up. */ |
603 | if (!dev_priv->enable_hotplug_processing) | |
604 | return; | |
605 | ||
a65e34c7 | 606 | mutex_lock(&mode_config->mutex); |
e67189ab JB |
607 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
608 | ||
cd569aed | 609 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
142e2398 EE |
610 | |
611 | hpd_event_bits = dev_priv->hpd_event_bits; | |
612 | dev_priv->hpd_event_bits = 0; | |
cd569aed EE |
613 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
614 | intel_connector = to_intel_connector(connector); | |
615 | intel_encoder = intel_connector->encoder; | |
616 | if (intel_encoder->hpd_pin > HPD_NONE && | |
617 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | |
618 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | |
619 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
620 | "switching from hotplug detection to polling\n", | |
621 | drm_get_connector_name(connector)); | |
622 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | |
623 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
624 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
625 | hpd_disabled = true; | |
626 | } | |
142e2398 EE |
627 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
628 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
629 | drm_get_connector_name(connector), intel_encoder->hpd_pin); | |
630 | } | |
cd569aed EE |
631 | } |
632 | /* if there were no outputs to poll, poll was disabled, | |
633 | * therefore make sure it's enabled when disabling HPD on | |
634 | * some connectors */ | |
ac4c16c5 | 635 | if (hpd_disabled) { |
cd569aed | 636 | drm_kms_helper_poll_enable(dev); |
ac4c16c5 EE |
637 | mod_timer(&dev_priv->hotplug_reenable_timer, |
638 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | |
639 | } | |
cd569aed EE |
640 | |
641 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
642 | ||
321a1b30 EE |
643 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
644 | intel_connector = to_intel_connector(connector); | |
645 | intel_encoder = intel_connector->encoder; | |
646 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
647 | if (intel_encoder->hot_plug) | |
648 | intel_encoder->hot_plug(intel_encoder); | |
649 | if (intel_hpd_irq_event(dev, connector)) | |
650 | changed = true; | |
651 | } | |
652 | } | |
40ee3381 KP |
653 | mutex_unlock(&mode_config->mutex); |
654 | ||
321a1b30 EE |
655 | if (changed) |
656 | drm_kms_helper_hotplug_event(dev); | |
5ca58282 JB |
657 | } |
658 | ||
d0ecd7e2 | 659 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
f97108d1 JB |
660 | { |
661 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b5b72e89 | 662 | u32 busy_up, busy_down, max_avg, min_avg; |
9270388e | 663 | u8 new_delay; |
9270388e | 664 | |
d0ecd7e2 | 665 | spin_lock(&mchdev_lock); |
f97108d1 | 666 | |
73edd18f DV |
667 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
668 | ||
20e4d407 | 669 | new_delay = dev_priv->ips.cur_delay; |
9270388e | 670 | |
7648fa99 | 671 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
b5b72e89 MG |
672 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
673 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
f97108d1 JB |
674 | max_avg = I915_READ(RCBMAXAVG); |
675 | min_avg = I915_READ(RCBMINAVG); | |
676 | ||
677 | /* Handle RCS change request from hw */ | |
b5b72e89 | 678 | if (busy_up > max_avg) { |
20e4d407 DV |
679 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
680 | new_delay = dev_priv->ips.cur_delay - 1; | |
681 | if (new_delay < dev_priv->ips.max_delay) | |
682 | new_delay = dev_priv->ips.max_delay; | |
b5b72e89 | 683 | } else if (busy_down < min_avg) { |
20e4d407 DV |
684 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
685 | new_delay = dev_priv->ips.cur_delay + 1; | |
686 | if (new_delay > dev_priv->ips.min_delay) | |
687 | new_delay = dev_priv->ips.min_delay; | |
f97108d1 JB |
688 | } |
689 | ||
7648fa99 | 690 | if (ironlake_set_drps(dev, new_delay)) |
20e4d407 | 691 | dev_priv->ips.cur_delay = new_delay; |
f97108d1 | 692 | |
d0ecd7e2 | 693 | spin_unlock(&mchdev_lock); |
9270388e | 694 | |
f97108d1 JB |
695 | return; |
696 | } | |
697 | ||
549f7365 CW |
698 | static void notify_ring(struct drm_device *dev, |
699 | struct intel_ring_buffer *ring) | |
700 | { | |
475553de CW |
701 | if (ring->obj == NULL) |
702 | return; | |
703 | ||
b2eadbc8 | 704 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
9862e600 | 705 | |
549f7365 | 706 | wake_up_all(&ring->irq_queue); |
10cd45b6 | 707 | i915_queue_hangcheck(dev); |
549f7365 CW |
708 | } |
709 | ||
4912d041 | 710 | static void gen6_pm_rps_work(struct work_struct *work) |
3b8d8d91 | 711 | { |
4912d041 | 712 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
c6a828d3 | 713 | rps.work); |
4912d041 | 714 | u32 pm_iir, pm_imr; |
7b9e0ae6 | 715 | u8 new_delay; |
4912d041 | 716 | |
59cdb63d | 717 | spin_lock_irq(&dev_priv->irq_lock); |
c6a828d3 DV |
718 | pm_iir = dev_priv->rps.pm_iir; |
719 | dev_priv->rps.pm_iir = 0; | |
4912d041 | 720 | pm_imr = I915_READ(GEN6_PMIMR); |
4848405c BW |
721 | /* Make sure not to corrupt PMIMR state used by ringbuffer code */ |
722 | I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); | |
59cdb63d | 723 | spin_unlock_irq(&dev_priv->irq_lock); |
3b8d8d91 | 724 | |
4848405c | 725 | if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) |
3b8d8d91 JB |
726 | return; |
727 | ||
4fc688ce | 728 | mutex_lock(&dev_priv->rps.hw_lock); |
7b9e0ae6 | 729 | |
7425034a | 730 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
c6a828d3 | 731 | new_delay = dev_priv->rps.cur_delay + 1; |
7425034a VS |
732 | |
733 | /* | |
734 | * For better performance, jump directly | |
735 | * to RPe if we're below it. | |
736 | */ | |
737 | if (IS_VALLEYVIEW(dev_priv->dev) && | |
738 | dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) | |
739 | new_delay = dev_priv->rps.rpe_delay; | |
740 | } else | |
c6a828d3 | 741 | new_delay = dev_priv->rps.cur_delay - 1; |
3b8d8d91 | 742 | |
79249636 BW |
743 | /* sysfs frequency interfaces may have snuck in while servicing the |
744 | * interrupt | |
745 | */ | |
d8289c9e VS |
746 | if (new_delay >= dev_priv->rps.min_delay && |
747 | new_delay <= dev_priv->rps.max_delay) { | |
0a073b84 JB |
748 | if (IS_VALLEYVIEW(dev_priv->dev)) |
749 | valleyview_set_rps(dev_priv->dev, new_delay); | |
750 | else | |
751 | gen6_set_rps(dev_priv->dev, new_delay); | |
79249636 | 752 | } |
3b8d8d91 | 753 | |
52ceb908 JB |
754 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
755 | /* | |
756 | * On VLV, when we enter RC6 we may not be at the minimum | |
757 | * voltage level, so arm a timer to check. It should only | |
758 | * fire when there's activity or once after we've entered | |
759 | * RC6, and then won't be re-armed until the next RPS interrupt. | |
760 | */ | |
761 | mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, | |
762 | msecs_to_jiffies(100)); | |
763 | } | |
764 | ||
4fc688ce | 765 | mutex_unlock(&dev_priv->rps.hw_lock); |
3b8d8d91 JB |
766 | } |
767 | ||
e3689190 BW |
768 | |
769 | /** | |
770 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
771 | * occurred. | |
772 | * @work: workqueue struct | |
773 | * | |
774 | * Doesn't actually do anything except notify userspace. As a consequence of | |
775 | * this event, userspace should try to remap the bad rows since statistically | |
776 | * it is likely the same row is more likely to go bad again. | |
777 | */ | |
778 | static void ivybridge_parity_work(struct work_struct *work) | |
779 | { | |
780 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
a4da4fa4 | 781 | l3_parity.error_work); |
e3689190 BW |
782 | u32 error_status, row, bank, subbank; |
783 | char *parity_event[5]; | |
784 | uint32_t misccpctl; | |
785 | unsigned long flags; | |
786 | ||
787 | /* We must turn off DOP level clock gating to access the L3 registers. | |
788 | * In order to prevent a get/put style interface, acquire struct mutex | |
789 | * any time we access those registers. | |
790 | */ | |
791 | mutex_lock(&dev_priv->dev->struct_mutex); | |
792 | ||
793 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
794 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
795 | POSTING_READ(GEN7_MISCCPCTL); | |
796 | ||
797 | error_status = I915_READ(GEN7_L3CDERRST1); | |
798 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
799 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
800 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
801 | ||
802 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | | |
803 | GEN7_L3CDERRST1_ENABLE); | |
804 | POSTING_READ(GEN7_L3CDERRST1); | |
805 | ||
806 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
807 | ||
808 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
cc609d5d | 809 | dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
e3689190 BW |
810 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
811 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
812 | ||
813 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
814 | ||
815 | parity_event[0] = "L3_PARITY_ERROR=1"; | |
816 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
817 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
818 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
819 | parity_event[4] = NULL; | |
820 | ||
821 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, | |
822 | KOBJ_CHANGE, parity_event); | |
823 | ||
824 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", | |
825 | row, bank, subbank); | |
826 | ||
827 | kfree(parity_event[3]); | |
828 | kfree(parity_event[2]); | |
829 | kfree(parity_event[1]); | |
830 | } | |
831 | ||
d0ecd7e2 | 832 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev) |
e3689190 BW |
833 | { |
834 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e3689190 | 835 | |
e1ef7cc2 | 836 | if (!HAS_L3_GPU_CACHE(dev)) |
e3689190 BW |
837 | return; |
838 | ||
d0ecd7e2 | 839 | spin_lock(&dev_priv->irq_lock); |
cc609d5d | 840 | dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
e3689190 | 841 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
d0ecd7e2 | 842 | spin_unlock(&dev_priv->irq_lock); |
e3689190 | 843 | |
a4da4fa4 | 844 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
e3689190 BW |
845 | } |
846 | ||
e7b4c6b1 DV |
847 | static void snb_gt_irq_handler(struct drm_device *dev, |
848 | struct drm_i915_private *dev_priv, | |
849 | u32 gt_iir) | |
850 | { | |
851 | ||
cc609d5d BW |
852 | if (gt_iir & |
853 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 854 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 855 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
e7b4c6b1 | 856 | notify_ring(dev, &dev_priv->ring[VCS]); |
cc609d5d | 857 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
e7b4c6b1 DV |
858 | notify_ring(dev, &dev_priv->ring[BCS]); |
859 | ||
cc609d5d BW |
860 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
861 | GT_BSD_CS_ERROR_INTERRUPT | | |
862 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | |
e7b4c6b1 DV |
863 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); |
864 | i915_handle_error(dev, false); | |
865 | } | |
e3689190 | 866 | |
cc609d5d | 867 | if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) |
d0ecd7e2 | 868 | ivybridge_parity_error_irq_handler(dev); |
e7b4c6b1 DV |
869 | } |
870 | ||
baf02a1f | 871 | /* Legacy way of handling PM interrupts */ |
d0ecd7e2 DV |
872 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, |
873 | u32 pm_iir) | |
fc6826d1 | 874 | { |
fc6826d1 CW |
875 | /* |
876 | * IIR bits should never already be set because IMR should | |
877 | * prevent an interrupt from being shown in IIR. The warning | |
878 | * displays a case where we've unsafely cleared | |
c6a828d3 | 879 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same |
fc6826d1 CW |
880 | * type is not a problem, it displays a problem in the logic. |
881 | * | |
c6a828d3 | 882 | * The mask bit in IMR is cleared by dev_priv->rps.work. |
fc6826d1 CW |
883 | */ |
884 | ||
59cdb63d | 885 | spin_lock(&dev_priv->irq_lock); |
c6a828d3 DV |
886 | dev_priv->rps.pm_iir |= pm_iir; |
887 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); | |
fc6826d1 | 888 | POSTING_READ(GEN6_PMIMR); |
59cdb63d | 889 | spin_unlock(&dev_priv->irq_lock); |
fc6826d1 | 890 | |
c6a828d3 | 891 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
fc6826d1 CW |
892 | } |
893 | ||
b543fb04 EE |
894 | #define HPD_STORM_DETECT_PERIOD 1000 |
895 | #define HPD_STORM_THRESHOLD 5 | |
896 | ||
10a504de | 897 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
22062dba DV |
898 | u32 hotplug_trigger, |
899 | const u32 *hpd) | |
b543fb04 EE |
900 | { |
901 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b543fb04 | 902 | int i; |
10a504de | 903 | bool storm_detected = false; |
b543fb04 | 904 | |
91d131d2 DV |
905 | if (!hotplug_trigger) |
906 | return; | |
907 | ||
b5ea2d56 | 908 | spin_lock(&dev_priv->irq_lock); |
b543fb04 | 909 | for (i = 1; i < HPD_NUM_PINS; i++) { |
821450c6 | 910 | |
b543fb04 EE |
911 | if (!(hpd[i] & hotplug_trigger) || |
912 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | |
913 | continue; | |
914 | ||
bc5ead8c | 915 | dev_priv->hpd_event_bits |= (1 << i); |
b543fb04 EE |
916 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
917 | dev_priv->hpd_stats[i].hpd_last_jiffies | |
918 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | |
919 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | |
920 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
921 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | |
922 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | |
142e2398 | 923 | dev_priv->hpd_event_bits &= ~(1 << i); |
b543fb04 | 924 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
10a504de | 925 | storm_detected = true; |
b543fb04 EE |
926 | } else { |
927 | dev_priv->hpd_stats[i].hpd_cnt++; | |
928 | } | |
929 | } | |
930 | ||
10a504de DV |
931 | if (storm_detected) |
932 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 933 | spin_unlock(&dev_priv->irq_lock); |
5876fa0d DV |
934 | |
935 | queue_work(dev_priv->wq, | |
936 | &dev_priv->hotplug_work); | |
b543fb04 EE |
937 | } |
938 | ||
515ac2bb DV |
939 | static void gmbus_irq_handler(struct drm_device *dev) |
940 | { | |
28c70f16 DV |
941 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
942 | ||
28c70f16 | 943 | wake_up_all(&dev_priv->gmbus_wait_queue); |
515ac2bb DV |
944 | } |
945 | ||
ce99c256 DV |
946 | static void dp_aux_irq_handler(struct drm_device *dev) |
947 | { | |
9ee32fea DV |
948 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
949 | ||
9ee32fea | 950 | wake_up_all(&dev_priv->gmbus_wait_queue); |
ce99c256 DV |
951 | } |
952 | ||
d0ecd7e2 | 953 | /* Unlike gen6_rps_irq_handler() from which this function is originally derived, |
baf02a1f BW |
954 | * we must be able to deal with other PM interrupts. This is complicated because |
955 | * of the way in which we use the masks to defer the RPS work (which for | |
956 | * posterity is necessary because of forcewake). | |
957 | */ | |
958 | static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, | |
959 | u32 pm_iir) | |
960 | { | |
41a05a3a | 961 | if (pm_iir & GEN6_PM_RPS_EVENTS) { |
59cdb63d | 962 | spin_lock(&dev_priv->irq_lock); |
41a05a3a | 963 | dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; |
baf02a1f BW |
964 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); |
965 | /* never want to mask useful interrupts. (also posting read) */ | |
4848405c | 966 | WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); |
59cdb63d | 967 | spin_unlock(&dev_priv->irq_lock); |
2adbee62 DV |
968 | |
969 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
baf02a1f | 970 | } |
baf02a1f | 971 | |
41a05a3a DV |
972 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) |
973 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | |
12638c57 | 974 | |
41a05a3a DV |
975 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { |
976 | DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); | |
977 | i915_handle_error(dev_priv->dev, false); | |
12638c57 | 978 | } |
baf02a1f BW |
979 | } |
980 | ||
ff1f525e | 981 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
7e231dbe JB |
982 | { |
983 | struct drm_device *dev = (struct drm_device *) arg; | |
984 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
985 | u32 iir, gt_iir, pm_iir; | |
986 | irqreturn_t ret = IRQ_NONE; | |
987 | unsigned long irqflags; | |
988 | int pipe; | |
989 | u32 pipe_stats[I915_MAX_PIPES]; | |
7e231dbe JB |
990 | |
991 | atomic_inc(&dev_priv->irq_received); | |
992 | ||
7e231dbe JB |
993 | while (true) { |
994 | iir = I915_READ(VLV_IIR); | |
995 | gt_iir = I915_READ(GTIIR); | |
996 | pm_iir = I915_READ(GEN6_PMIIR); | |
997 | ||
998 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
999 | goto out; | |
1000 | ||
1001 | ret = IRQ_HANDLED; | |
1002 | ||
e7b4c6b1 | 1003 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
7e231dbe JB |
1004 | |
1005 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1006 | for_each_pipe(pipe) { | |
1007 | int reg = PIPESTAT(pipe); | |
1008 | pipe_stats[pipe] = I915_READ(reg); | |
1009 | ||
1010 | /* | |
1011 | * Clear the PIPE*STAT regs before the IIR | |
1012 | */ | |
1013 | if (pipe_stats[pipe] & 0x8000ffff) { | |
1014 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
1015 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
1016 | pipe_name(pipe)); | |
1017 | I915_WRITE(reg, pipe_stats[pipe]); | |
1018 | } | |
1019 | } | |
1020 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1021 | ||
31acc7f5 JB |
1022 | for_each_pipe(pipe) { |
1023 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) | |
1024 | drm_handle_vblank(dev, pipe); | |
1025 | ||
1026 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { | |
1027 | intel_prepare_page_flip(dev, pipe); | |
1028 | intel_finish_page_flip(dev, pipe); | |
1029 | } | |
1030 | } | |
1031 | ||
7e231dbe JB |
1032 | /* Consume port. Then clear IIR or we'll miss events */ |
1033 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | |
1034 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
b543fb04 | 1035 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
7e231dbe JB |
1036 | |
1037 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
1038 | hotplug_status); | |
91d131d2 DV |
1039 | |
1040 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | |
1041 | ||
7e231dbe JB |
1042 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
1043 | I915_READ(PORT_HOTPLUG_STAT); | |
1044 | } | |
1045 | ||
515ac2bb DV |
1046 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
1047 | gmbus_irq_handler(dev); | |
7e231dbe | 1048 | |
4848405c | 1049 | if (pm_iir & GEN6_PM_RPS_EVENTS) |
d0ecd7e2 | 1050 | gen6_rps_irq_handler(dev_priv, pm_iir); |
7e231dbe JB |
1051 | |
1052 | I915_WRITE(GTIIR, gt_iir); | |
1053 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
1054 | I915_WRITE(VLV_IIR, iir); | |
1055 | } | |
1056 | ||
1057 | out: | |
1058 | return ret; | |
1059 | } | |
1060 | ||
23e81d69 | 1061 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
776ad806 JB |
1062 | { |
1063 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 1064 | int pipe; |
b543fb04 | 1065 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
776ad806 | 1066 | |
91d131d2 DV |
1067 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
1068 | ||
cfc33bf7 VS |
1069 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1070 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | |
1071 | SDE_AUDIO_POWER_SHIFT); | |
776ad806 | 1072 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
cfc33bf7 VS |
1073 | port_name(port)); |
1074 | } | |
776ad806 | 1075 | |
ce99c256 DV |
1076 | if (pch_iir & SDE_AUX_MASK) |
1077 | dp_aux_irq_handler(dev); | |
1078 | ||
776ad806 | 1079 | if (pch_iir & SDE_GMBUS) |
515ac2bb | 1080 | gmbus_irq_handler(dev); |
776ad806 JB |
1081 | |
1082 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
1083 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
1084 | ||
1085 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
1086 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
1087 | ||
1088 | if (pch_iir & SDE_POISON) | |
1089 | DRM_ERROR("PCH poison interrupt\n"); | |
1090 | ||
9db4a9c7 JB |
1091 | if (pch_iir & SDE_FDI_MASK) |
1092 | for_each_pipe(pipe) | |
1093 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
1094 | pipe_name(pipe), | |
1095 | I915_READ(FDI_RX_IIR(pipe))); | |
776ad806 JB |
1096 | |
1097 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
1098 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
1099 | ||
1100 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
1101 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
1102 | ||
776ad806 | 1103 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
8664281b PZ |
1104 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
1105 | false)) | |
1106 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); | |
1107 | ||
1108 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
1109 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
1110 | false)) | |
1111 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); | |
1112 | } | |
1113 | ||
1114 | static void ivb_err_int_handler(struct drm_device *dev) | |
1115 | { | |
1116 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1117 | u32 err_int = I915_READ(GEN7_ERR_INT); | |
1118 | ||
de032bf4 PZ |
1119 | if (err_int & ERR_INT_POISON) |
1120 | DRM_ERROR("Poison interrupt\n"); | |
1121 | ||
8664281b PZ |
1122 | if (err_int & ERR_INT_FIFO_UNDERRUN_A) |
1123 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | |
1124 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | |
1125 | ||
1126 | if (err_int & ERR_INT_FIFO_UNDERRUN_B) | |
1127 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | |
1128 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | |
1129 | ||
1130 | if (err_int & ERR_INT_FIFO_UNDERRUN_C) | |
1131 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) | |
1132 | DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); | |
1133 | ||
1134 | I915_WRITE(GEN7_ERR_INT, err_int); | |
1135 | } | |
1136 | ||
1137 | static void cpt_serr_int_handler(struct drm_device *dev) | |
1138 | { | |
1139 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1140 | u32 serr_int = I915_READ(SERR_INT); | |
1141 | ||
de032bf4 PZ |
1142 | if (serr_int & SERR_INT_POISON) |
1143 | DRM_ERROR("PCH poison interrupt\n"); | |
1144 | ||
8664281b PZ |
1145 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
1146 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | |
1147 | false)) | |
1148 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); | |
1149 | ||
1150 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | |
1151 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
1152 | false)) | |
1153 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); | |
1154 | ||
1155 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | |
1156 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, | |
1157 | false)) | |
1158 | DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); | |
1159 | ||
1160 | I915_WRITE(SERR_INT, serr_int); | |
776ad806 JB |
1161 | } |
1162 | ||
23e81d69 AJ |
1163 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
1164 | { | |
1165 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1166 | int pipe; | |
b543fb04 | 1167 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
23e81d69 | 1168 | |
91d131d2 DV |
1169 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
1170 | ||
cfc33bf7 VS |
1171 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
1172 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
1173 | SDE_AUDIO_POWER_SHIFT_CPT); | |
1174 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | |
1175 | port_name(port)); | |
1176 | } | |
23e81d69 AJ |
1177 | |
1178 | if (pch_iir & SDE_AUX_MASK_CPT) | |
ce99c256 | 1179 | dp_aux_irq_handler(dev); |
23e81d69 AJ |
1180 | |
1181 | if (pch_iir & SDE_GMBUS_CPT) | |
515ac2bb | 1182 | gmbus_irq_handler(dev); |
23e81d69 AJ |
1183 | |
1184 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
1185 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
1186 | ||
1187 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
1188 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
1189 | ||
1190 | if (pch_iir & SDE_FDI_MASK_CPT) | |
1191 | for_each_pipe(pipe) | |
1192 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
1193 | pipe_name(pipe), | |
1194 | I915_READ(FDI_RX_IIR(pipe))); | |
8664281b PZ |
1195 | |
1196 | if (pch_iir & SDE_ERROR_CPT) | |
1197 | cpt_serr_int_handler(dev); | |
23e81d69 AJ |
1198 | } |
1199 | ||
c008bc6e PZ |
1200 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1201 | { | |
1202 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1203 | ||
1204 | if (de_iir & DE_AUX_CHANNEL_A) | |
1205 | dp_aux_irq_handler(dev); | |
1206 | ||
1207 | if (de_iir & DE_GSE) | |
1208 | intel_opregion_asle_intr(dev); | |
1209 | ||
1210 | if (de_iir & DE_PIPEA_VBLANK) | |
1211 | drm_handle_vblank(dev, 0); | |
1212 | ||
1213 | if (de_iir & DE_PIPEB_VBLANK) | |
1214 | drm_handle_vblank(dev, 1); | |
1215 | ||
1216 | if (de_iir & DE_POISON) | |
1217 | DRM_ERROR("Poison interrupt\n"); | |
1218 | ||
1219 | if (de_iir & DE_PIPEA_FIFO_UNDERRUN) | |
1220 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | |
1221 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | |
1222 | ||
1223 | if (de_iir & DE_PIPEB_FIFO_UNDERRUN) | |
1224 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | |
1225 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | |
1226 | ||
1227 | if (de_iir & DE_PLANEA_FLIP_DONE) { | |
1228 | intel_prepare_page_flip(dev, 0); | |
1229 | intel_finish_page_flip_plane(dev, 0); | |
1230 | } | |
1231 | ||
1232 | if (de_iir & DE_PLANEB_FLIP_DONE) { | |
1233 | intel_prepare_page_flip(dev, 1); | |
1234 | intel_finish_page_flip_plane(dev, 1); | |
1235 | } | |
1236 | ||
1237 | /* check event from PCH */ | |
1238 | if (de_iir & DE_PCH_EVENT) { | |
1239 | u32 pch_iir = I915_READ(SDEIIR); | |
1240 | ||
1241 | if (HAS_PCH_CPT(dev)) | |
1242 | cpt_irq_handler(dev, pch_iir); | |
1243 | else | |
1244 | ibx_irq_handler(dev, pch_iir); | |
1245 | ||
1246 | /* should clear PCH hotplug event before clear CPU irq */ | |
1247 | I915_WRITE(SDEIIR, pch_iir); | |
1248 | } | |
1249 | ||
1250 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | |
1251 | ironlake_rps_change_irq_handler(dev); | |
1252 | } | |
1253 | ||
9719fb98 PZ |
1254 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1255 | { | |
1256 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1257 | int i; | |
1258 | ||
1259 | if (de_iir & DE_ERR_INT_IVB) | |
1260 | ivb_err_int_handler(dev); | |
1261 | ||
1262 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | |
1263 | dp_aux_irq_handler(dev); | |
1264 | ||
1265 | if (de_iir & DE_GSE_IVB) | |
1266 | intel_opregion_asle_intr(dev); | |
1267 | ||
1268 | for (i = 0; i < 3; i++) { | |
1269 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | |
1270 | drm_handle_vblank(dev, i); | |
1271 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | |
1272 | intel_prepare_page_flip(dev, i); | |
1273 | intel_finish_page_flip_plane(dev, i); | |
1274 | } | |
1275 | } | |
1276 | ||
1277 | /* check event from PCH */ | |
1278 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | |
1279 | u32 pch_iir = I915_READ(SDEIIR); | |
1280 | ||
1281 | cpt_irq_handler(dev, pch_iir); | |
1282 | ||
1283 | /* clear PCH hotplug event before clear CPU irq */ | |
1284 | I915_WRITE(SDEIIR, pch_iir); | |
1285 | } | |
1286 | } | |
1287 | ||
ff1f525e | 1288 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) |
b1f14ad0 JB |
1289 | { |
1290 | struct drm_device *dev = (struct drm_device *) arg; | |
1291 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
ab5c608b | 1292 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; |
0e43406b | 1293 | irqreturn_t ret = IRQ_NONE; |
b1f14ad0 JB |
1294 | |
1295 | atomic_inc(&dev_priv->irq_received); | |
1296 | ||
8664281b PZ |
1297 | /* We get interrupts on unclaimed registers, so check for this before we |
1298 | * do any I915_{READ,WRITE}. */ | |
1299 | if (IS_HASWELL(dev) && | |
1300 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | |
1301 | DRM_ERROR("Unclaimed register before interrupt\n"); | |
1302 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | |
1303 | } | |
1304 | ||
b1f14ad0 JB |
1305 | /* disable master interrupt before clearing iir */ |
1306 | de_ier = I915_READ(DEIER); | |
1307 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
b1f14ad0 | 1308 | |
44498aea PZ |
1309 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1310 | * interrupts will will be stored on its back queue, and then we'll be | |
1311 | * able to process them after we restore SDEIER (as soon as we restore | |
1312 | * it, we'll get an interrupt if SDEIIR still has something to process | |
1313 | * due to its back queue). */ | |
ab5c608b BW |
1314 | if (!HAS_PCH_NOP(dev)) { |
1315 | sde_ier = I915_READ(SDEIER); | |
1316 | I915_WRITE(SDEIER, 0); | |
1317 | POSTING_READ(SDEIER); | |
1318 | } | |
44498aea | 1319 | |
8664281b PZ |
1320 | /* On Haswell, also mask ERR_INT because we don't want to risk |
1321 | * generating "unclaimed register" interrupts from inside the interrupt | |
1322 | * handler. */ | |
4bc9d430 DV |
1323 | if (IS_HASWELL(dev)) { |
1324 | spin_lock(&dev_priv->irq_lock); | |
8664281b | 1325 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
4bc9d430 DV |
1326 | spin_unlock(&dev_priv->irq_lock); |
1327 | } | |
8664281b | 1328 | |
b1f14ad0 | 1329 | gt_iir = I915_READ(GTIIR); |
0e43406b CW |
1330 | if (gt_iir) { |
1331 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
1332 | I915_WRITE(GTIIR, gt_iir); | |
1333 | ret = IRQ_HANDLED; | |
b1f14ad0 JB |
1334 | } |
1335 | ||
0e43406b CW |
1336 | de_iir = I915_READ(DEIIR); |
1337 | if (de_iir) { | |
9719fb98 | 1338 | ivb_display_irq_handler(dev, de_iir); |
b615b57a | 1339 | |
0e43406b CW |
1340 | I915_WRITE(DEIIR, de_iir); |
1341 | ret = IRQ_HANDLED; | |
b1f14ad0 JB |
1342 | } |
1343 | ||
0e43406b CW |
1344 | pm_iir = I915_READ(GEN6_PMIIR); |
1345 | if (pm_iir) { | |
baf02a1f BW |
1346 | if (IS_HASWELL(dev)) |
1347 | hsw_pm_irq_handler(dev_priv, pm_iir); | |
4848405c | 1348 | else if (pm_iir & GEN6_PM_RPS_EVENTS) |
d0ecd7e2 | 1349 | gen6_rps_irq_handler(dev_priv, pm_iir); |
0e43406b CW |
1350 | I915_WRITE(GEN6_PMIIR, pm_iir); |
1351 | ret = IRQ_HANDLED; | |
1352 | } | |
b1f14ad0 | 1353 | |
4bc9d430 DV |
1354 | if (IS_HASWELL(dev)) { |
1355 | spin_lock(&dev_priv->irq_lock); | |
1356 | if (ivb_can_enable_err_int(dev)) | |
1357 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); | |
1358 | spin_unlock(&dev_priv->irq_lock); | |
1359 | } | |
8664281b | 1360 | |
b1f14ad0 JB |
1361 | I915_WRITE(DEIER, de_ier); |
1362 | POSTING_READ(DEIER); | |
ab5c608b BW |
1363 | if (!HAS_PCH_NOP(dev)) { |
1364 | I915_WRITE(SDEIER, sde_ier); | |
1365 | POSTING_READ(SDEIER); | |
1366 | } | |
b1f14ad0 JB |
1367 | |
1368 | return ret; | |
1369 | } | |
1370 | ||
e7b4c6b1 DV |
1371 | static void ilk_gt_irq_handler(struct drm_device *dev, |
1372 | struct drm_i915_private *dev_priv, | |
1373 | u32 gt_iir) | |
1374 | { | |
cc609d5d BW |
1375 | if (gt_iir & |
1376 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 1377 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 1378 | if (gt_iir & ILK_BSD_USER_INTERRUPT) |
e7b4c6b1 DV |
1379 | notify_ring(dev, &dev_priv->ring[VCS]); |
1380 | } | |
1381 | ||
ff1f525e | 1382 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
036a4a7d | 1383 | { |
4697995b | 1384 | struct drm_device *dev = (struct drm_device *) arg; |
036a4a7d ZW |
1385 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1386 | int ret = IRQ_NONE; | |
27b9188e | 1387 | u32 de_iir, gt_iir, de_ier, sde_ier; |
881f47b6 | 1388 | |
4697995b JB |
1389 | atomic_inc(&dev_priv->irq_received); |
1390 | ||
2d109a84 ZN |
1391 | /* disable master interrupt before clearing iir */ |
1392 | de_ier = I915_READ(DEIER); | |
1393 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
3143a2bf | 1394 | POSTING_READ(DEIER); |
2d109a84 | 1395 | |
44498aea PZ |
1396 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1397 | * interrupts will will be stored on its back queue, and then we'll be | |
1398 | * able to process them after we restore SDEIER (as soon as we restore | |
1399 | * it, we'll get an interrupt if SDEIIR still has something to process | |
1400 | * due to its back queue). */ | |
1401 | sde_ier = I915_READ(SDEIER); | |
1402 | I915_WRITE(SDEIER, 0); | |
1403 | POSTING_READ(SDEIER); | |
1404 | ||
036a4a7d | 1405 | gt_iir = I915_READ(GTIIR); |
27b9188e PZ |
1406 | if (gt_iir) { |
1407 | if (IS_GEN5(dev)) | |
1408 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
1409 | else | |
1410 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
1411 | I915_WRITE(GTIIR, gt_iir); | |
1412 | ret = IRQ_HANDLED; | |
1413 | } | |
01c66889 | 1414 | |
27b9188e PZ |
1415 | de_iir = I915_READ(DEIIR); |
1416 | if (de_iir) { | |
c008bc6e | 1417 | ilk_display_irq_handler(dev, de_iir); |
27b9188e PZ |
1418 | I915_WRITE(DEIIR, de_iir); |
1419 | ret = IRQ_HANDLED; | |
1420 | } | |
f97108d1 | 1421 | |
27b9188e PZ |
1422 | if (IS_GEN6(dev)) { |
1423 | u32 pm_iir = I915_READ(GEN6_PMIIR); | |
1424 | if (pm_iir) { | |
1425 | if (pm_iir & GEN6_PM_RPS_EVENTS) | |
1426 | gen6_rps_irq_handler(dev_priv, pm_iir); | |
1427 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
1428 | ret = IRQ_HANDLED; | |
1429 | } | |
1430 | } | |
c7c85101 | 1431 | |
2d109a84 | 1432 | I915_WRITE(DEIER, de_ier); |
3143a2bf | 1433 | POSTING_READ(DEIER); |
44498aea PZ |
1434 | I915_WRITE(SDEIER, sde_ier); |
1435 | POSTING_READ(SDEIER); | |
2d109a84 | 1436 | |
036a4a7d ZW |
1437 | return ret; |
1438 | } | |
1439 | ||
8a905236 JB |
1440 | /** |
1441 | * i915_error_work_func - do process context error handling work | |
1442 | * @work: work struct | |
1443 | * | |
1444 | * Fire an error uevent so userspace can see that a hang or error | |
1445 | * was detected. | |
1446 | */ | |
1447 | static void i915_error_work_func(struct work_struct *work) | |
1448 | { | |
1f83fee0 DV |
1449 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
1450 | work); | |
1451 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, | |
1452 | gpu_error); | |
8a905236 | 1453 | struct drm_device *dev = dev_priv->dev; |
f69061be | 1454 | struct intel_ring_buffer *ring; |
f316a42c BG |
1455 | char *error_event[] = { "ERROR=1", NULL }; |
1456 | char *reset_event[] = { "RESET=1", NULL }; | |
1457 | char *reset_done_event[] = { "ERROR=0", NULL }; | |
f69061be | 1458 | int i, ret; |
8a905236 | 1459 | |
f316a42c BG |
1460 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
1461 | ||
7db0ba24 DV |
1462 | /* |
1463 | * Note that there's only one work item which does gpu resets, so we | |
1464 | * need not worry about concurrent gpu resets potentially incrementing | |
1465 | * error->reset_counter twice. We only need to take care of another | |
1466 | * racing irq/hangcheck declaring the gpu dead for a second time. A | |
1467 | * quick check for that is good enough: schedule_work ensures the | |
1468 | * correct ordering between hang detection and this work item, and since | |
1469 | * the reset in-progress bit is only ever set by code outside of this | |
1470 | * work we don't need to worry about any other races. | |
1471 | */ | |
1472 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | |
f803aa55 | 1473 | DRM_DEBUG_DRIVER("resetting chip\n"); |
7db0ba24 DV |
1474 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, |
1475 | reset_event); | |
1f83fee0 | 1476 | |
f69061be DV |
1477 | ret = i915_reset(dev); |
1478 | ||
1479 | if (ret == 0) { | |
1480 | /* | |
1481 | * After all the gem state is reset, increment the reset | |
1482 | * counter and wake up everyone waiting for the reset to | |
1483 | * complete. | |
1484 | * | |
1485 | * Since unlock operations are a one-sided barrier only, | |
1486 | * we need to insert a barrier here to order any seqno | |
1487 | * updates before | |
1488 | * the counter increment. | |
1489 | */ | |
1490 | smp_mb__before_atomic_inc(); | |
1491 | atomic_inc(&dev_priv->gpu_error.reset_counter); | |
1492 | ||
1493 | kobject_uevent_env(&dev->primary->kdev.kobj, | |
1494 | KOBJ_CHANGE, reset_done_event); | |
1f83fee0 DV |
1495 | } else { |
1496 | atomic_set(&error->reset_counter, I915_WEDGED); | |
f316a42c | 1497 | } |
1f83fee0 | 1498 | |
f69061be DV |
1499 | for_each_ring(ring, dev_priv, i) |
1500 | wake_up_all(&ring->irq_queue); | |
1501 | ||
96a02917 VS |
1502 | intel_display_handle_reset(dev); |
1503 | ||
1f83fee0 | 1504 | wake_up_all(&dev_priv->gpu_error.reset_queue); |
f316a42c | 1505 | } |
8a905236 JB |
1506 | } |
1507 | ||
35aed2e6 | 1508 | static void i915_report_and_clear_eir(struct drm_device *dev) |
8a905236 JB |
1509 | { |
1510 | struct drm_i915_private *dev_priv = dev->dev_private; | |
bd9854f9 | 1511 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
8a905236 | 1512 | u32 eir = I915_READ(EIR); |
050ee91f | 1513 | int pipe, i; |
8a905236 | 1514 | |
35aed2e6 CW |
1515 | if (!eir) |
1516 | return; | |
8a905236 | 1517 | |
a70491cc | 1518 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
8a905236 | 1519 | |
bd9854f9 BW |
1520 | i915_get_extra_instdone(dev, instdone); |
1521 | ||
8a905236 JB |
1522 | if (IS_G4X(dev)) { |
1523 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
1524 | u32 ipeir = I915_READ(IPEIR_I965); | |
1525 | ||
a70491cc JP |
1526 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
1527 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
050ee91f BW |
1528 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
1529 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a70491cc | 1530 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 1531 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 1532 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 1533 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
1534 | } |
1535 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
1536 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
1537 | pr_err("page table error\n"); |
1538 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 1539 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 1540 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
1541 | } |
1542 | } | |
1543 | ||
a6c45cf0 | 1544 | if (!IS_GEN2(dev)) { |
8a905236 JB |
1545 | if (eir & I915_ERROR_PAGE_TABLE) { |
1546 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
1547 | pr_err("page table error\n"); |
1548 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 1549 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 1550 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
1551 | } |
1552 | } | |
1553 | ||
1554 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
a70491cc | 1555 | pr_err("memory refresh error:\n"); |
9db4a9c7 | 1556 | for_each_pipe(pipe) |
a70491cc | 1557 | pr_err("pipe %c stat: 0x%08x\n", |
9db4a9c7 | 1558 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
8a905236 JB |
1559 | /* pipestat has already been acked */ |
1560 | } | |
1561 | if (eir & I915_ERROR_INSTRUCTION) { | |
a70491cc JP |
1562 | pr_err("instruction error\n"); |
1563 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
050ee91f BW |
1564 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
1565 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a6c45cf0 | 1566 | if (INTEL_INFO(dev)->gen < 4) { |
8a905236 JB |
1567 | u32 ipeir = I915_READ(IPEIR); |
1568 | ||
a70491cc JP |
1569 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
1570 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
a70491cc | 1571 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
8a905236 | 1572 | I915_WRITE(IPEIR, ipeir); |
3143a2bf | 1573 | POSTING_READ(IPEIR); |
8a905236 JB |
1574 | } else { |
1575 | u32 ipeir = I915_READ(IPEIR_I965); | |
1576 | ||
a70491cc JP |
1577 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
1578 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
a70491cc | 1579 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 1580 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 1581 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 1582 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
1583 | } |
1584 | } | |
1585 | ||
1586 | I915_WRITE(EIR, eir); | |
3143a2bf | 1587 | POSTING_READ(EIR); |
8a905236 JB |
1588 | eir = I915_READ(EIR); |
1589 | if (eir) { | |
1590 | /* | |
1591 | * some errors might have become stuck, | |
1592 | * mask them. | |
1593 | */ | |
1594 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
1595 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
1596 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
1597 | } | |
35aed2e6 CW |
1598 | } |
1599 | ||
1600 | /** | |
1601 | * i915_handle_error - handle an error interrupt | |
1602 | * @dev: drm device | |
1603 | * | |
1604 | * Do some basic checking of regsiter state at error interrupt time and | |
1605 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
1606 | * sure we get a record and make it available in debugfs. Fire a uevent | |
1607 | * so userspace knows something bad happened (should trigger collection | |
1608 | * of a ring dump etc.). | |
1609 | */ | |
527f9e90 | 1610 | void i915_handle_error(struct drm_device *dev, bool wedged) |
35aed2e6 CW |
1611 | { |
1612 | struct drm_i915_private *dev_priv = dev->dev_private; | |
b4519513 CW |
1613 | struct intel_ring_buffer *ring; |
1614 | int i; | |
35aed2e6 CW |
1615 | |
1616 | i915_capture_error_state(dev); | |
1617 | i915_report_and_clear_eir(dev); | |
8a905236 | 1618 | |
ba1234d1 | 1619 | if (wedged) { |
f69061be DV |
1620 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
1621 | &dev_priv->gpu_error.reset_counter); | |
ba1234d1 | 1622 | |
11ed50ec | 1623 | /* |
1f83fee0 DV |
1624 | * Wakeup waiting processes so that the reset work item |
1625 | * doesn't deadlock trying to grab various locks. | |
11ed50ec | 1626 | */ |
b4519513 CW |
1627 | for_each_ring(ring, dev_priv, i) |
1628 | wake_up_all(&ring->irq_queue); | |
11ed50ec BG |
1629 | } |
1630 | ||
99584db3 | 1631 | queue_work(dev_priv->wq, &dev_priv->gpu_error.work); |
8a905236 JB |
1632 | } |
1633 | ||
21ad8330 | 1634 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
4e5359cd SF |
1635 | { |
1636 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1637 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
1638 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
05394f39 | 1639 | struct drm_i915_gem_object *obj; |
4e5359cd SF |
1640 | struct intel_unpin_work *work; |
1641 | unsigned long flags; | |
1642 | bool stall_detected; | |
1643 | ||
1644 | /* Ignore early vblank irqs */ | |
1645 | if (intel_crtc == NULL) | |
1646 | return; | |
1647 | ||
1648 | spin_lock_irqsave(&dev->event_lock, flags); | |
1649 | work = intel_crtc->unpin_work; | |
1650 | ||
e7d841ca CW |
1651 | if (work == NULL || |
1652 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || | |
1653 | !work->enable_stall_check) { | |
4e5359cd SF |
1654 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
1655 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
1656 | return; | |
1657 | } | |
1658 | ||
1659 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | |
05394f39 | 1660 | obj = work->pending_flip_obj; |
a6c45cf0 | 1661 | if (INTEL_INFO(dev)->gen >= 4) { |
9db4a9c7 | 1662 | int dspsurf = DSPSURF(intel_crtc->plane); |
446f2545 | 1663 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
f343c5f6 | 1664 | i915_gem_obj_ggtt_offset(obj); |
4e5359cd | 1665 | } else { |
9db4a9c7 | 1666 | int dspaddr = DSPADDR(intel_crtc->plane); |
f343c5f6 | 1667 | stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + |
01f2c773 | 1668 | crtc->y * crtc->fb->pitches[0] + |
4e5359cd SF |
1669 | crtc->x * crtc->fb->bits_per_pixel/8); |
1670 | } | |
1671 | ||
1672 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
1673 | ||
1674 | if (stall_detected) { | |
1675 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | |
1676 | intel_prepare_page_flip(dev, intel_crtc->plane); | |
1677 | } | |
1678 | } | |
1679 | ||
42f52ef8 KP |
1680 | /* Called from drm generic code, passed 'crtc' which |
1681 | * we use as a pipe index | |
1682 | */ | |
f71d4af4 | 1683 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
1684 | { |
1685 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e9d21d7f | 1686 | unsigned long irqflags; |
71e0ffa5 | 1687 | |
5eddb70b | 1688 | if (!i915_pipe_enabled(dev, pipe)) |
71e0ffa5 | 1689 | return -EINVAL; |
0a3e67a4 | 1690 | |
1ec14ad3 | 1691 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 1692 | if (INTEL_INFO(dev)->gen >= 4) |
7c463586 KP |
1693 | i915_enable_pipestat(dev_priv, pipe, |
1694 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
e9d21d7f | 1695 | else |
7c463586 KP |
1696 | i915_enable_pipestat(dev_priv, pipe, |
1697 | PIPE_VBLANK_INTERRUPT_ENABLE); | |
8692d00e CW |
1698 | |
1699 | /* maintain vblank delivery even in deep C-states */ | |
1700 | if (dev_priv->info->gen == 3) | |
6b26c86d | 1701 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
1ec14ad3 | 1702 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
8692d00e | 1703 | |
0a3e67a4 JB |
1704 | return 0; |
1705 | } | |
1706 | ||
f71d4af4 | 1707 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
f796cf8f JB |
1708 | { |
1709 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1710 | unsigned long irqflags; | |
1711 | ||
1712 | if (!i915_pipe_enabled(dev, pipe)) | |
1713 | return -EINVAL; | |
1714 | ||
1715 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1716 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | |
0206e353 | 1717 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
f796cf8f JB |
1718 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1719 | ||
1720 | return 0; | |
1721 | } | |
1722 | ||
f71d4af4 | 1723 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) |
b1f14ad0 JB |
1724 | { |
1725 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1726 | unsigned long irqflags; | |
1727 | ||
1728 | if (!i915_pipe_enabled(dev, pipe)) | |
1729 | return -EINVAL; | |
1730 | ||
1731 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b615b57a CW |
1732 | ironlake_enable_display_irq(dev_priv, |
1733 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); | |
b1f14ad0 JB |
1734 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1735 | ||
1736 | return 0; | |
1737 | } | |
1738 | ||
7e231dbe JB |
1739 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
1740 | { | |
1741 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1742 | unsigned long irqflags; | |
31acc7f5 | 1743 | u32 imr; |
7e231dbe JB |
1744 | |
1745 | if (!i915_pipe_enabled(dev, pipe)) | |
1746 | return -EINVAL; | |
1747 | ||
1748 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7e231dbe | 1749 | imr = I915_READ(VLV_IMR); |
31acc7f5 | 1750 | if (pipe == 0) |
7e231dbe | 1751 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
31acc7f5 | 1752 | else |
7e231dbe | 1753 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
7e231dbe | 1754 | I915_WRITE(VLV_IMR, imr); |
31acc7f5 JB |
1755 | i915_enable_pipestat(dev_priv, pipe, |
1756 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
7e231dbe JB |
1757 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1758 | ||
1759 | return 0; | |
1760 | } | |
1761 | ||
42f52ef8 KP |
1762 | /* Called from drm generic code, passed 'crtc' which |
1763 | * we use as a pipe index | |
1764 | */ | |
f71d4af4 | 1765 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
1766 | { |
1767 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e9d21d7f | 1768 | unsigned long irqflags; |
0a3e67a4 | 1769 | |
1ec14ad3 | 1770 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
8692d00e | 1771 | if (dev_priv->info->gen == 3) |
6b26c86d | 1772 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
8692d00e | 1773 | |
f796cf8f JB |
1774 | i915_disable_pipestat(dev_priv, pipe, |
1775 | PIPE_VBLANK_INTERRUPT_ENABLE | | |
1776 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
1777 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1778 | } | |
1779 | ||
f71d4af4 | 1780 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
f796cf8f JB |
1781 | { |
1782 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1783 | unsigned long irqflags; | |
1784 | ||
1785 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1786 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | |
0206e353 | 1787 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
1ec14ad3 | 1788 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
0a3e67a4 JB |
1789 | } |
1790 | ||
f71d4af4 | 1791 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) |
b1f14ad0 JB |
1792 | { |
1793 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1794 | unsigned long irqflags; | |
1795 | ||
1796 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b615b57a CW |
1797 | ironlake_disable_display_irq(dev_priv, |
1798 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); | |
b1f14ad0 JB |
1799 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1800 | } | |
1801 | ||
7e231dbe JB |
1802 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
1803 | { | |
1804 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1805 | unsigned long irqflags; | |
31acc7f5 | 1806 | u32 imr; |
7e231dbe JB |
1807 | |
1808 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 JB |
1809 | i915_disable_pipestat(dev_priv, pipe, |
1810 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
7e231dbe | 1811 | imr = I915_READ(VLV_IMR); |
31acc7f5 | 1812 | if (pipe == 0) |
7e231dbe | 1813 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
31acc7f5 | 1814 | else |
7e231dbe | 1815 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
7e231dbe | 1816 | I915_WRITE(VLV_IMR, imr); |
7e231dbe JB |
1817 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1818 | } | |
1819 | ||
893eead0 CW |
1820 | static u32 |
1821 | ring_last_seqno(struct intel_ring_buffer *ring) | |
852835f3 | 1822 | { |
893eead0 CW |
1823 | return list_entry(ring->request_list.prev, |
1824 | struct drm_i915_gem_request, list)->seqno; | |
1825 | } | |
1826 | ||
9107e9d2 CW |
1827 | static bool |
1828 | ring_idle(struct intel_ring_buffer *ring, u32 seqno) | |
1829 | { | |
1830 | return (list_empty(&ring->request_list) || | |
1831 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | |
f65d9421 BG |
1832 | } |
1833 | ||
6274f212 CW |
1834 | static struct intel_ring_buffer * |
1835 | semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) | |
a24a11e6 CW |
1836 | { |
1837 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
6274f212 | 1838 | u32 cmd, ipehr, acthd, acthd_min; |
a24a11e6 CW |
1839 | |
1840 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | |
1841 | if ((ipehr & ~(0x3 << 16)) != | |
1842 | (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) | |
6274f212 | 1843 | return NULL; |
a24a11e6 CW |
1844 | |
1845 | /* ACTHD is likely pointing to the dword after the actual command, | |
1846 | * so scan backwards until we find the MBOX. | |
1847 | */ | |
6274f212 | 1848 | acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; |
a24a11e6 CW |
1849 | acthd_min = max((int)acthd - 3 * 4, 0); |
1850 | do { | |
1851 | cmd = ioread32(ring->virtual_start + acthd); | |
1852 | if (cmd == ipehr) | |
1853 | break; | |
1854 | ||
1855 | acthd -= 4; | |
1856 | if (acthd < acthd_min) | |
6274f212 | 1857 | return NULL; |
a24a11e6 CW |
1858 | } while (1); |
1859 | ||
6274f212 CW |
1860 | *seqno = ioread32(ring->virtual_start+acthd+4)+1; |
1861 | return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; | |
a24a11e6 CW |
1862 | } |
1863 | ||
6274f212 CW |
1864 | static int semaphore_passed(struct intel_ring_buffer *ring) |
1865 | { | |
1866 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
1867 | struct intel_ring_buffer *signaller; | |
1868 | u32 seqno, ctl; | |
1869 | ||
1870 | ring->hangcheck.deadlock = true; | |
1871 | ||
1872 | signaller = semaphore_waits_for(ring, &seqno); | |
1873 | if (signaller == NULL || signaller->hangcheck.deadlock) | |
1874 | return -1; | |
1875 | ||
1876 | /* cursory check for an unkickable deadlock */ | |
1877 | ctl = I915_READ_CTL(signaller); | |
1878 | if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) | |
1879 | return -1; | |
1880 | ||
1881 | return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); | |
1882 | } | |
1883 | ||
1884 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | |
1885 | { | |
1886 | struct intel_ring_buffer *ring; | |
1887 | int i; | |
1888 | ||
1889 | for_each_ring(ring, dev_priv, i) | |
1890 | ring->hangcheck.deadlock = false; | |
1891 | } | |
1892 | ||
ad8beaea MK |
1893 | static enum intel_ring_hangcheck_action |
1894 | ring_stuck(struct intel_ring_buffer *ring, u32 acthd) | |
1ec14ad3 CW |
1895 | { |
1896 | struct drm_device *dev = ring->dev; | |
1897 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9107e9d2 CW |
1898 | u32 tmp; |
1899 | ||
6274f212 CW |
1900 | if (ring->hangcheck.acthd != acthd) |
1901 | return active; | |
1902 | ||
9107e9d2 | 1903 | if (IS_GEN2(dev)) |
6274f212 | 1904 | return hung; |
9107e9d2 CW |
1905 | |
1906 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
1907 | * If so we can simply poke the RB_WAIT bit | |
1908 | * and break the hang. This should work on | |
1909 | * all but the second generation chipsets. | |
1910 | */ | |
1911 | tmp = I915_READ_CTL(ring); | |
1ec14ad3 CW |
1912 | if (tmp & RING_WAIT) { |
1913 | DRM_ERROR("Kicking stuck wait on %s\n", | |
1914 | ring->name); | |
1915 | I915_WRITE_CTL(ring, tmp); | |
6274f212 CW |
1916 | return kick; |
1917 | } | |
1918 | ||
1919 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | |
1920 | switch (semaphore_passed(ring)) { | |
1921 | default: | |
1922 | return hung; | |
1923 | case 1: | |
1924 | DRM_ERROR("Kicking stuck semaphore on %s\n", | |
1925 | ring->name); | |
1926 | I915_WRITE_CTL(ring, tmp); | |
1927 | return kick; | |
1928 | case 0: | |
1929 | return wait; | |
1930 | } | |
9107e9d2 | 1931 | } |
ed5cbb03 | 1932 | |
6274f212 | 1933 | return hung; |
ed5cbb03 MK |
1934 | } |
1935 | ||
f65d9421 BG |
1936 | /** |
1937 | * This is called when the chip hasn't reported back with completed | |
05407ff8 MK |
1938 | * batchbuffers in a long time. We keep track per ring seqno progress and |
1939 | * if there are no progress, hangcheck score for that ring is increased. | |
1940 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | |
1941 | * we kick the ring. If we see no progress on three subsequent calls | |
1942 | * we assume chip is wedged and try to fix it by resetting the chip. | |
f65d9421 BG |
1943 | */ |
1944 | void i915_hangcheck_elapsed(unsigned long data) | |
1945 | { | |
1946 | struct drm_device *dev = (struct drm_device *)data; | |
1947 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b4519513 | 1948 | struct intel_ring_buffer *ring; |
b4519513 | 1949 | int i; |
05407ff8 | 1950 | int busy_count = 0, rings_hung = 0; |
9107e9d2 CW |
1951 | bool stuck[I915_NUM_RINGS] = { 0 }; |
1952 | #define BUSY 1 | |
1953 | #define KICK 5 | |
1954 | #define HUNG 20 | |
1955 | #define FIRE 30 | |
893eead0 | 1956 | |
3e0dc6b0 BW |
1957 | if (!i915_enable_hangcheck) |
1958 | return; | |
1959 | ||
b4519513 | 1960 | for_each_ring(ring, dev_priv, i) { |
05407ff8 | 1961 | u32 seqno, acthd; |
9107e9d2 | 1962 | bool busy = true; |
05407ff8 | 1963 | |
6274f212 CW |
1964 | semaphore_clear_deadlocks(dev_priv); |
1965 | ||
05407ff8 MK |
1966 | seqno = ring->get_seqno(ring, false); |
1967 | acthd = intel_ring_get_active_head(ring); | |
b4519513 | 1968 | |
9107e9d2 CW |
1969 | if (ring->hangcheck.seqno == seqno) { |
1970 | if (ring_idle(ring, seqno)) { | |
1971 | if (waitqueue_active(&ring->irq_queue)) { | |
1972 | /* Issue a wake-up to catch stuck h/w. */ | |
1973 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
1974 | ring->name); | |
1975 | wake_up_all(&ring->irq_queue); | |
1976 | ring->hangcheck.score += HUNG; | |
1977 | } else | |
1978 | busy = false; | |
05407ff8 | 1979 | } else { |
9107e9d2 CW |
1980 | int score; |
1981 | ||
6274f212 CW |
1982 | /* We always increment the hangcheck score |
1983 | * if the ring is busy and still processing | |
1984 | * the same request, so that no single request | |
1985 | * can run indefinitely (such as a chain of | |
1986 | * batches). The only time we do not increment | |
1987 | * the hangcheck score on this ring, if this | |
1988 | * ring is in a legitimate wait for another | |
1989 | * ring. In that case the waiting ring is a | |
1990 | * victim and we want to be sure we catch the | |
1991 | * right culprit. Then every time we do kick | |
1992 | * the ring, add a small increment to the | |
1993 | * score so that we can catch a batch that is | |
1994 | * being repeatedly kicked and so responsible | |
1995 | * for stalling the machine. | |
1996 | */ | |
ad8beaea MK |
1997 | ring->hangcheck.action = ring_stuck(ring, |
1998 | acthd); | |
1999 | ||
2000 | switch (ring->hangcheck.action) { | |
6274f212 CW |
2001 | case wait: |
2002 | score = 0; | |
2003 | break; | |
2004 | case active: | |
9107e9d2 | 2005 | score = BUSY; |
6274f212 CW |
2006 | break; |
2007 | case kick: | |
2008 | score = KICK; | |
2009 | break; | |
2010 | case hung: | |
2011 | score = HUNG; | |
2012 | stuck[i] = true; | |
2013 | break; | |
2014 | } | |
9107e9d2 | 2015 | ring->hangcheck.score += score; |
05407ff8 | 2016 | } |
9107e9d2 CW |
2017 | } else { |
2018 | /* Gradually reduce the count so that we catch DoS | |
2019 | * attempts across multiple batches. | |
2020 | */ | |
2021 | if (ring->hangcheck.score > 0) | |
2022 | ring->hangcheck.score--; | |
d1e61e7f CW |
2023 | } |
2024 | ||
05407ff8 MK |
2025 | ring->hangcheck.seqno = seqno; |
2026 | ring->hangcheck.acthd = acthd; | |
9107e9d2 | 2027 | busy_count += busy; |
893eead0 | 2028 | } |
b9201c14 | 2029 | |
92cab734 | 2030 | for_each_ring(ring, dev_priv, i) { |
9107e9d2 | 2031 | if (ring->hangcheck.score > FIRE) { |
acd78c11 | 2032 | DRM_ERROR("%s on %s\n", |
05407ff8 | 2033 | stuck[i] ? "stuck" : "no progress", |
a43adf07 CW |
2034 | ring->name); |
2035 | rings_hung++; | |
92cab734 MK |
2036 | } |
2037 | } | |
2038 | ||
05407ff8 MK |
2039 | if (rings_hung) |
2040 | return i915_handle_error(dev, true); | |
f65d9421 | 2041 | |
05407ff8 MK |
2042 | if (busy_count) |
2043 | /* Reset timer case chip hangs without another request | |
2044 | * being added */ | |
10cd45b6 MK |
2045 | i915_queue_hangcheck(dev); |
2046 | } | |
2047 | ||
2048 | void i915_queue_hangcheck(struct drm_device *dev) | |
2049 | { | |
2050 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2051 | if (!i915_enable_hangcheck) | |
2052 | return; | |
2053 | ||
2054 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | |
2055 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
f65d9421 BG |
2056 | } |
2057 | ||
91738a95 PZ |
2058 | static void ibx_irq_preinstall(struct drm_device *dev) |
2059 | { | |
2060 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2061 | ||
2062 | if (HAS_PCH_NOP(dev)) | |
2063 | return; | |
2064 | ||
2065 | /* south display irq */ | |
2066 | I915_WRITE(SDEIMR, 0xffffffff); | |
2067 | /* | |
2068 | * SDEIER is also touched by the interrupt handler to work around missed | |
2069 | * PCH interrupts. Hence we can't update it after the interrupt handler | |
2070 | * is enabled - instead we unconditionally enable all PCH interrupt | |
2071 | * sources here, but then only unmask them as needed with SDEIMR. | |
2072 | */ | |
2073 | I915_WRITE(SDEIER, 0xffffffff); | |
2074 | POSTING_READ(SDEIER); | |
2075 | } | |
2076 | ||
d18ea1b5 DV |
2077 | static void gen5_gt_irq_preinstall(struct drm_device *dev) |
2078 | { | |
2079 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2080 | ||
2081 | /* and GT */ | |
2082 | I915_WRITE(GTIMR, 0xffffffff); | |
2083 | I915_WRITE(GTIER, 0x0); | |
2084 | POSTING_READ(GTIER); | |
2085 | ||
2086 | if (INTEL_INFO(dev)->gen >= 6) { | |
2087 | /* and PM */ | |
2088 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | |
2089 | I915_WRITE(GEN6_PMIER, 0x0); | |
2090 | POSTING_READ(GEN6_PMIER); | |
2091 | } | |
2092 | } | |
2093 | ||
1da177e4 LT |
2094 | /* drm_dma.h hooks |
2095 | */ | |
f71d4af4 | 2096 | static void ironlake_irq_preinstall(struct drm_device *dev) |
036a4a7d ZW |
2097 | { |
2098 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2099 | ||
4697995b JB |
2100 | atomic_set(&dev_priv->irq_received, 0); |
2101 | ||
036a4a7d | 2102 | I915_WRITE(HWSTAM, 0xeffe); |
bdfcdb63 | 2103 | |
036a4a7d ZW |
2104 | I915_WRITE(DEIMR, 0xffffffff); |
2105 | I915_WRITE(DEIER, 0x0); | |
3143a2bf | 2106 | POSTING_READ(DEIER); |
036a4a7d | 2107 | |
d18ea1b5 | 2108 | gen5_gt_irq_preinstall(dev); |
c650156a | 2109 | |
91738a95 | 2110 | ibx_irq_preinstall(dev); |
7d99163d BW |
2111 | } |
2112 | ||
7e231dbe JB |
2113 | static void valleyview_irq_preinstall(struct drm_device *dev) |
2114 | { | |
2115 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2116 | int pipe; | |
2117 | ||
2118 | atomic_set(&dev_priv->irq_received, 0); | |
2119 | ||
7e231dbe JB |
2120 | /* VLV magic */ |
2121 | I915_WRITE(VLV_IMR, 0); | |
2122 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
2123 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
2124 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
2125 | ||
7e231dbe JB |
2126 | /* and GT */ |
2127 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2128 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
d18ea1b5 DV |
2129 | |
2130 | gen5_gt_irq_preinstall(dev); | |
7e231dbe JB |
2131 | |
2132 | I915_WRITE(DPINVGTT, 0xff); | |
2133 | ||
2134 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2135 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2136 | for_each_pipe(pipe) | |
2137 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2138 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2139 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2140 | I915_WRITE(VLV_IER, 0x0); | |
2141 | POSTING_READ(VLV_IER); | |
2142 | } | |
2143 | ||
82a28bcf | 2144 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
7fe0b973 KP |
2145 | { |
2146 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
82a28bcf DV |
2147 | struct drm_mode_config *mode_config = &dev->mode_config; |
2148 | struct intel_encoder *intel_encoder; | |
fee884ed | 2149 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
82a28bcf DV |
2150 | |
2151 | if (HAS_PCH_IBX(dev)) { | |
fee884ed | 2152 | hotplug_irqs = SDE_HOTPLUG_MASK; |
82a28bcf | 2153 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
cd569aed | 2154 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 2155 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
82a28bcf | 2156 | } else { |
fee884ed | 2157 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
82a28bcf | 2158 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
cd569aed | 2159 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 2160 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
82a28bcf | 2161 | } |
7fe0b973 | 2162 | |
fee884ed | 2163 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
82a28bcf DV |
2164 | |
2165 | /* | |
2166 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
2167 | * duration to 2ms (which is the minimum in the Display Port spec) | |
2168 | * | |
2169 | * This register is the same on all known PCH chips. | |
2170 | */ | |
7fe0b973 KP |
2171 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
2172 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
2173 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
2174 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
2175 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
2176 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
2177 | } | |
2178 | ||
d46da437 PZ |
2179 | static void ibx_irq_postinstall(struct drm_device *dev) |
2180 | { | |
2181 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
82a28bcf | 2182 | u32 mask; |
e5868a31 | 2183 | |
692a04cf DV |
2184 | if (HAS_PCH_NOP(dev)) |
2185 | return; | |
2186 | ||
8664281b PZ |
2187 | if (HAS_PCH_IBX(dev)) { |
2188 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | | |
de032bf4 | 2189 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; |
8664281b PZ |
2190 | } else { |
2191 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; | |
2192 | ||
2193 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | |
2194 | } | |
ab5c608b | 2195 | |
d46da437 PZ |
2196 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2197 | I915_WRITE(SDEIMR, ~mask); | |
d46da437 PZ |
2198 | } |
2199 | ||
0a9a8c91 DV |
2200 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
2201 | { | |
2202 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2203 | u32 pm_irqs, gt_irqs; | |
2204 | ||
2205 | pm_irqs = gt_irqs = 0; | |
2206 | ||
2207 | dev_priv->gt_irq_mask = ~0; | |
2208 | if (HAS_L3_GPU_CACHE(dev)) { | |
2209 | /* L3 parity interrupt is always unmasked. */ | |
2210 | dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | |
2211 | gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | |
2212 | } | |
2213 | ||
2214 | gt_irqs |= GT_RENDER_USER_INTERRUPT; | |
2215 | if (IS_GEN5(dev)) { | |
2216 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | |
2217 | ILK_BSD_USER_INTERRUPT; | |
2218 | } else { | |
2219 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | |
2220 | } | |
2221 | ||
2222 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2223 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
2224 | I915_WRITE(GTIER, gt_irqs); | |
2225 | POSTING_READ(GTIER); | |
2226 | ||
2227 | if (INTEL_INFO(dev)->gen >= 6) { | |
2228 | pm_irqs |= GEN6_PM_RPS_EVENTS; | |
2229 | ||
2230 | if (HAS_VEBOX(dev)) | |
2231 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | |
2232 | ||
2233 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | |
2234 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | |
2235 | I915_WRITE(GEN6_PMIER, pm_irqs); | |
2236 | POSTING_READ(GEN6_PMIER); | |
2237 | } | |
2238 | } | |
2239 | ||
f71d4af4 | 2240 | static int ironlake_irq_postinstall(struct drm_device *dev) |
036a4a7d | 2241 | { |
4bc9d430 DV |
2242 | unsigned long irqflags; |
2243 | ||
036a4a7d ZW |
2244 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2245 | /* enable kind of interrupts always enabled */ | |
013d5aa2 | 2246 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
ce99c256 | 2247 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
8664281b | 2248 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | |
de032bf4 | 2249 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON; |
036a4a7d | 2250 | |
1ec14ad3 | 2251 | dev_priv->irq_mask = ~display_mask; |
036a4a7d ZW |
2252 | |
2253 | /* should always can generate irq */ | |
2254 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
1ec14ad3 | 2255 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
6005ce42 DV |
2256 | I915_WRITE(DEIER, display_mask | |
2257 | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); | |
3143a2bf | 2258 | POSTING_READ(DEIER); |
036a4a7d | 2259 | |
0a9a8c91 | 2260 | gen5_gt_irq_postinstall(dev); |
036a4a7d | 2261 | |
d46da437 | 2262 | ibx_irq_postinstall(dev); |
7fe0b973 | 2263 | |
f97108d1 | 2264 | if (IS_IRONLAKE_M(dev)) { |
6005ce42 DV |
2265 | /* Enable PCU event interrupts |
2266 | * | |
2267 | * spinlocking not required here for correctness since interrupt | |
4bc9d430 DV |
2268 | * setup is guaranteed to run in single-threaded context. But we |
2269 | * need it to make the assert_spin_locked happy. */ | |
2270 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
f97108d1 | 2271 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
4bc9d430 | 2272 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
f97108d1 JB |
2273 | } |
2274 | ||
036a4a7d ZW |
2275 | return 0; |
2276 | } | |
2277 | ||
f71d4af4 | 2278 | static int ivybridge_irq_postinstall(struct drm_device *dev) |
b1f14ad0 JB |
2279 | { |
2280 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2281 | /* enable kind of interrupts always enabled */ | |
b615b57a CW |
2282 | u32 display_mask = |
2283 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | | |
2284 | DE_PLANEC_FLIP_DONE_IVB | | |
2285 | DE_PLANEB_FLIP_DONE_IVB | | |
ce99c256 | 2286 | DE_PLANEA_FLIP_DONE_IVB | |
8664281b PZ |
2287 | DE_AUX_CHANNEL_A_IVB | |
2288 | DE_ERR_INT_IVB; | |
b1f14ad0 | 2289 | |
b1f14ad0 JB |
2290 | dev_priv->irq_mask = ~display_mask; |
2291 | ||
2292 | /* should always can generate irq */ | |
8664281b | 2293 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
b1f14ad0 JB |
2294 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
2295 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
b615b57a CW |
2296 | I915_WRITE(DEIER, |
2297 | display_mask | | |
2298 | DE_PIPEC_VBLANK_IVB | | |
2299 | DE_PIPEB_VBLANK_IVB | | |
2300 | DE_PIPEA_VBLANK_IVB); | |
b1f14ad0 JB |
2301 | POSTING_READ(DEIER); |
2302 | ||
0a9a8c91 | 2303 | gen5_gt_irq_postinstall(dev); |
eda63ffb | 2304 | |
d46da437 | 2305 | ibx_irq_postinstall(dev); |
7fe0b973 | 2306 | |
b1f14ad0 JB |
2307 | return 0; |
2308 | } | |
2309 | ||
7e231dbe JB |
2310 | static int valleyview_irq_postinstall(struct drm_device *dev) |
2311 | { | |
2312 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
7e231dbe | 2313 | u32 enable_mask; |
31acc7f5 | 2314 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
b79480ba | 2315 | unsigned long irqflags; |
7e231dbe JB |
2316 | |
2317 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; | |
31acc7f5 JB |
2318 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
2319 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2320 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
7e231dbe JB |
2321 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
2322 | ||
31acc7f5 JB |
2323 | /* |
2324 | *Leave vblank interrupts masked initially. enable/disable will | |
2325 | * toggle them based on usage. | |
2326 | */ | |
2327 | dev_priv->irq_mask = (~enable_mask) | | |
2328 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2329 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
7e231dbe | 2330 | |
20afbda2 DV |
2331 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2332 | POSTING_READ(PORT_HOTPLUG_EN); | |
2333 | ||
7e231dbe JB |
2334 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
2335 | I915_WRITE(VLV_IER, enable_mask); | |
2336 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2337 | I915_WRITE(PIPESTAT(0), 0xffff); | |
2338 | I915_WRITE(PIPESTAT(1), 0xffff); | |
2339 | POSTING_READ(VLV_IER); | |
2340 | ||
b79480ba DV |
2341 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
2342 | * just to make the assert_spin_locked check happy. */ | |
2343 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2344 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
515ac2bb | 2345 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
31acc7f5 | 2346 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
b79480ba | 2347 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
31acc7f5 | 2348 | |
7e231dbe JB |
2349 | I915_WRITE(VLV_IIR, 0xffffffff); |
2350 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2351 | ||
0a9a8c91 | 2352 | gen5_gt_irq_postinstall(dev); |
7e231dbe JB |
2353 | |
2354 | /* ack & enable invalid PTE error interrupts */ | |
2355 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
2356 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
2357 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
2358 | #endif | |
2359 | ||
2360 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
20afbda2 DV |
2361 | |
2362 | return 0; | |
2363 | } | |
2364 | ||
7e231dbe JB |
2365 | static void valleyview_irq_uninstall(struct drm_device *dev) |
2366 | { | |
2367 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2368 | int pipe; | |
2369 | ||
2370 | if (!dev_priv) | |
2371 | return; | |
2372 | ||
ac4c16c5 EE |
2373 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2374 | ||
7e231dbe JB |
2375 | for_each_pipe(pipe) |
2376 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2377 | ||
2378 | I915_WRITE(HWSTAM, 0xffffffff); | |
2379 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2380 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2381 | for_each_pipe(pipe) | |
2382 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2383 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2384 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2385 | I915_WRITE(VLV_IER, 0x0); | |
2386 | POSTING_READ(VLV_IER); | |
2387 | } | |
2388 | ||
f71d4af4 | 2389 | static void ironlake_irq_uninstall(struct drm_device *dev) |
036a4a7d ZW |
2390 | { |
2391 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
4697995b JB |
2392 | |
2393 | if (!dev_priv) | |
2394 | return; | |
2395 | ||
ac4c16c5 EE |
2396 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2397 | ||
036a4a7d ZW |
2398 | I915_WRITE(HWSTAM, 0xffffffff); |
2399 | ||
2400 | I915_WRITE(DEIMR, 0xffffffff); | |
2401 | I915_WRITE(DEIER, 0x0); | |
2402 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
8664281b PZ |
2403 | if (IS_GEN7(dev)) |
2404 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | |
036a4a7d ZW |
2405 | |
2406 | I915_WRITE(GTIMR, 0xffffffff); | |
2407 | I915_WRITE(GTIER, 0x0); | |
2408 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
192aac1f | 2409 | |
ab5c608b BW |
2410 | if (HAS_PCH_NOP(dev)) |
2411 | return; | |
2412 | ||
192aac1f KP |
2413 | I915_WRITE(SDEIMR, 0xffffffff); |
2414 | I915_WRITE(SDEIER, 0x0); | |
2415 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | |
8664281b PZ |
2416 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) |
2417 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | |
036a4a7d ZW |
2418 | } |
2419 | ||
a266c7d5 | 2420 | static void i8xx_irq_preinstall(struct drm_device * dev) |
1da177e4 LT |
2421 | { |
2422 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 2423 | int pipe; |
91e3738e | 2424 | |
a266c7d5 | 2425 | atomic_set(&dev_priv->irq_received, 0); |
5ca58282 | 2426 | |
9db4a9c7 JB |
2427 | for_each_pipe(pipe) |
2428 | I915_WRITE(PIPESTAT(pipe), 0); | |
a266c7d5 CW |
2429 | I915_WRITE16(IMR, 0xffff); |
2430 | I915_WRITE16(IER, 0x0); | |
2431 | POSTING_READ16(IER); | |
c2798b19 CW |
2432 | } |
2433 | ||
2434 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
2435 | { | |
2436 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2437 | ||
c2798b19 CW |
2438 | I915_WRITE16(EMR, |
2439 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
2440 | ||
2441 | /* Unmask the interrupts that we always want on. */ | |
2442 | dev_priv->irq_mask = | |
2443 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2444 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2445 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2446 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2447 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2448 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
2449 | ||
2450 | I915_WRITE16(IER, | |
2451 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2452 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2453 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2454 | I915_USER_INTERRUPT); | |
2455 | POSTING_READ16(IER); | |
2456 | ||
2457 | return 0; | |
2458 | } | |
2459 | ||
90a72f87 VS |
2460 | /* |
2461 | * Returns true when a page flip has completed. | |
2462 | */ | |
2463 | static bool i8xx_handle_vblank(struct drm_device *dev, | |
2464 | int pipe, u16 iir) | |
2465 | { | |
2466 | drm_i915_private_t *dev_priv = dev->dev_private; | |
2467 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); | |
2468 | ||
2469 | if (!drm_handle_vblank(dev, pipe)) | |
2470 | return false; | |
2471 | ||
2472 | if ((iir & flip_pending) == 0) | |
2473 | return false; | |
2474 | ||
2475 | intel_prepare_page_flip(dev, pipe); | |
2476 | ||
2477 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
2478 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
2479 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
2480 | * the flip is completed (no longer pending). Since this doesn't raise | |
2481 | * an interrupt per se, we watch for the change at vblank. | |
2482 | */ | |
2483 | if (I915_READ16(ISR) & flip_pending) | |
2484 | return false; | |
2485 | ||
2486 | intel_finish_page_flip(dev, pipe); | |
2487 | ||
2488 | return true; | |
2489 | } | |
2490 | ||
ff1f525e | 2491 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
c2798b19 CW |
2492 | { |
2493 | struct drm_device *dev = (struct drm_device *) arg; | |
2494 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
c2798b19 CW |
2495 | u16 iir, new_iir; |
2496 | u32 pipe_stats[2]; | |
2497 | unsigned long irqflags; | |
2498 | int irq_received; | |
2499 | int pipe; | |
2500 | u16 flip_mask = | |
2501 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2502 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
2503 | ||
2504 | atomic_inc(&dev_priv->irq_received); | |
2505 | ||
2506 | iir = I915_READ16(IIR); | |
2507 | if (iir == 0) | |
2508 | return IRQ_NONE; | |
2509 | ||
2510 | while (iir & ~flip_mask) { | |
2511 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2512 | * have been cleared after the pipestat interrupt was received. | |
2513 | * It doesn't set the bit in iir again, but it still produces | |
2514 | * interrupts (for non-MSI). | |
2515 | */ | |
2516 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2517 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2518 | i915_handle_error(dev, false); | |
2519 | ||
2520 | for_each_pipe(pipe) { | |
2521 | int reg = PIPESTAT(pipe); | |
2522 | pipe_stats[pipe] = I915_READ(reg); | |
2523 | ||
2524 | /* | |
2525 | * Clear the PIPE*STAT regs before the IIR | |
2526 | */ | |
2527 | if (pipe_stats[pipe] & 0x8000ffff) { | |
2528 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2529 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2530 | pipe_name(pipe)); | |
2531 | I915_WRITE(reg, pipe_stats[pipe]); | |
2532 | irq_received = 1; | |
2533 | } | |
2534 | } | |
2535 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2536 | ||
2537 | I915_WRITE16(IIR, iir & ~flip_mask); | |
2538 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
2539 | ||
d05c617e | 2540 | i915_update_dri1_breadcrumb(dev); |
c2798b19 CW |
2541 | |
2542 | if (iir & I915_USER_INTERRUPT) | |
2543 | notify_ring(dev, &dev_priv->ring[RCS]); | |
2544 | ||
2545 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && | |
90a72f87 VS |
2546 | i8xx_handle_vblank(dev, 0, iir)) |
2547 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); | |
c2798b19 CW |
2548 | |
2549 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && | |
90a72f87 VS |
2550 | i8xx_handle_vblank(dev, 1, iir)) |
2551 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); | |
c2798b19 CW |
2552 | |
2553 | iir = new_iir; | |
2554 | } | |
2555 | ||
2556 | return IRQ_HANDLED; | |
2557 | } | |
2558 | ||
2559 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
2560 | { | |
2561 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2562 | int pipe; | |
2563 | ||
c2798b19 CW |
2564 | for_each_pipe(pipe) { |
2565 | /* Clear enable bits; then clear status bits */ | |
2566 | I915_WRITE(PIPESTAT(pipe), 0); | |
2567 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
2568 | } | |
2569 | I915_WRITE16(IMR, 0xffff); | |
2570 | I915_WRITE16(IER, 0x0); | |
2571 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
2572 | } | |
2573 | ||
a266c7d5 CW |
2574 | static void i915_irq_preinstall(struct drm_device * dev) |
2575 | { | |
2576 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2577 | int pipe; | |
2578 | ||
2579 | atomic_set(&dev_priv->irq_received, 0); | |
2580 | ||
2581 | if (I915_HAS_HOTPLUG(dev)) { | |
2582 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2583 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2584 | } | |
2585 | ||
00d98ebd | 2586 | I915_WRITE16(HWSTAM, 0xeffe); |
a266c7d5 CW |
2587 | for_each_pipe(pipe) |
2588 | I915_WRITE(PIPESTAT(pipe), 0); | |
2589 | I915_WRITE(IMR, 0xffffffff); | |
2590 | I915_WRITE(IER, 0x0); | |
2591 | POSTING_READ(IER); | |
2592 | } | |
2593 | ||
2594 | static int i915_irq_postinstall(struct drm_device *dev) | |
2595 | { | |
2596 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
38bde180 | 2597 | u32 enable_mask; |
a266c7d5 | 2598 | |
38bde180 CW |
2599 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
2600 | ||
2601 | /* Unmask the interrupts that we always want on. */ | |
2602 | dev_priv->irq_mask = | |
2603 | ~(I915_ASLE_INTERRUPT | | |
2604 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2605 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2606 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2607 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2608 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2609 | ||
2610 | enable_mask = | |
2611 | I915_ASLE_INTERRUPT | | |
2612 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2613 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2614 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2615 | I915_USER_INTERRUPT; | |
2616 | ||
a266c7d5 | 2617 | if (I915_HAS_HOTPLUG(dev)) { |
20afbda2 DV |
2618 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2619 | POSTING_READ(PORT_HOTPLUG_EN); | |
2620 | ||
a266c7d5 CW |
2621 | /* Enable in IER... */ |
2622 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
2623 | /* and unmask in IMR */ | |
2624 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
2625 | } | |
2626 | ||
a266c7d5 CW |
2627 | I915_WRITE(IMR, dev_priv->irq_mask); |
2628 | I915_WRITE(IER, enable_mask); | |
2629 | POSTING_READ(IER); | |
2630 | ||
f49e38dd | 2631 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
2632 | |
2633 | return 0; | |
2634 | } | |
2635 | ||
90a72f87 VS |
2636 | /* |
2637 | * Returns true when a page flip has completed. | |
2638 | */ | |
2639 | static bool i915_handle_vblank(struct drm_device *dev, | |
2640 | int plane, int pipe, u32 iir) | |
2641 | { | |
2642 | drm_i915_private_t *dev_priv = dev->dev_private; | |
2643 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | |
2644 | ||
2645 | if (!drm_handle_vblank(dev, pipe)) | |
2646 | return false; | |
2647 | ||
2648 | if ((iir & flip_pending) == 0) | |
2649 | return false; | |
2650 | ||
2651 | intel_prepare_page_flip(dev, plane); | |
2652 | ||
2653 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
2654 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
2655 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
2656 | * the flip is completed (no longer pending). Since this doesn't raise | |
2657 | * an interrupt per se, we watch for the change at vblank. | |
2658 | */ | |
2659 | if (I915_READ(ISR) & flip_pending) | |
2660 | return false; | |
2661 | ||
2662 | intel_finish_page_flip(dev, pipe); | |
2663 | ||
2664 | return true; | |
2665 | } | |
2666 | ||
ff1f525e | 2667 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
a266c7d5 CW |
2668 | { |
2669 | struct drm_device *dev = (struct drm_device *) arg; | |
2670 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
8291ee90 | 2671 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
a266c7d5 | 2672 | unsigned long irqflags; |
38bde180 CW |
2673 | u32 flip_mask = |
2674 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2675 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
38bde180 | 2676 | int pipe, ret = IRQ_NONE; |
a266c7d5 CW |
2677 | |
2678 | atomic_inc(&dev_priv->irq_received); | |
2679 | ||
2680 | iir = I915_READ(IIR); | |
38bde180 CW |
2681 | do { |
2682 | bool irq_received = (iir & ~flip_mask) != 0; | |
8291ee90 | 2683 | bool blc_event = false; |
a266c7d5 CW |
2684 | |
2685 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2686 | * have been cleared after the pipestat interrupt was received. | |
2687 | * It doesn't set the bit in iir again, but it still produces | |
2688 | * interrupts (for non-MSI). | |
2689 | */ | |
2690 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2691 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2692 | i915_handle_error(dev, false); | |
2693 | ||
2694 | for_each_pipe(pipe) { | |
2695 | int reg = PIPESTAT(pipe); | |
2696 | pipe_stats[pipe] = I915_READ(reg); | |
2697 | ||
38bde180 | 2698 | /* Clear the PIPE*STAT regs before the IIR */ |
a266c7d5 CW |
2699 | if (pipe_stats[pipe] & 0x8000ffff) { |
2700 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2701 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2702 | pipe_name(pipe)); | |
2703 | I915_WRITE(reg, pipe_stats[pipe]); | |
38bde180 | 2704 | irq_received = true; |
a266c7d5 CW |
2705 | } |
2706 | } | |
2707 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2708 | ||
2709 | if (!irq_received) | |
2710 | break; | |
2711 | ||
a266c7d5 CW |
2712 | /* Consume port. Then clear IIR or we'll miss events */ |
2713 | if ((I915_HAS_HOTPLUG(dev)) && | |
2714 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | |
2715 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
b543fb04 | 2716 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
a266c7d5 CW |
2717 | |
2718 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
2719 | hotplug_status); | |
91d131d2 DV |
2720 | |
2721 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | |
2722 | ||
a266c7d5 | 2723 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
38bde180 | 2724 | POSTING_READ(PORT_HOTPLUG_STAT); |
a266c7d5 CW |
2725 | } |
2726 | ||
38bde180 | 2727 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
2728 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
2729 | ||
a266c7d5 CW |
2730 | if (iir & I915_USER_INTERRUPT) |
2731 | notify_ring(dev, &dev_priv->ring[RCS]); | |
a266c7d5 | 2732 | |
a266c7d5 | 2733 | for_each_pipe(pipe) { |
38bde180 CW |
2734 | int plane = pipe; |
2735 | if (IS_MOBILE(dev)) | |
2736 | plane = !plane; | |
90a72f87 | 2737 | |
8291ee90 | 2738 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
2739 | i915_handle_vblank(dev, plane, pipe, iir)) |
2740 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
a266c7d5 CW |
2741 | |
2742 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
2743 | blc_event = true; | |
2744 | } | |
2745 | ||
a266c7d5 CW |
2746 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
2747 | intel_opregion_asle_intr(dev); | |
2748 | ||
2749 | /* With MSI, interrupts are only generated when iir | |
2750 | * transitions from zero to nonzero. If another bit got | |
2751 | * set while we were handling the existing iir bits, then | |
2752 | * we would never get another interrupt. | |
2753 | * | |
2754 | * This is fine on non-MSI as well, as if we hit this path | |
2755 | * we avoid exiting the interrupt handler only to generate | |
2756 | * another one. | |
2757 | * | |
2758 | * Note that for MSI this could cause a stray interrupt report | |
2759 | * if an interrupt landed in the time between writing IIR and | |
2760 | * the posting read. This should be rare enough to never | |
2761 | * trigger the 99% of 100,000 interrupts test for disabling | |
2762 | * stray interrupts. | |
2763 | */ | |
38bde180 | 2764 | ret = IRQ_HANDLED; |
a266c7d5 | 2765 | iir = new_iir; |
38bde180 | 2766 | } while (iir & ~flip_mask); |
a266c7d5 | 2767 | |
d05c617e | 2768 | i915_update_dri1_breadcrumb(dev); |
8291ee90 | 2769 | |
a266c7d5 CW |
2770 | return ret; |
2771 | } | |
2772 | ||
2773 | static void i915_irq_uninstall(struct drm_device * dev) | |
2774 | { | |
2775 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2776 | int pipe; | |
2777 | ||
ac4c16c5 EE |
2778 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2779 | ||
a266c7d5 CW |
2780 | if (I915_HAS_HOTPLUG(dev)) { |
2781 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2782 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2783 | } | |
2784 | ||
00d98ebd | 2785 | I915_WRITE16(HWSTAM, 0xffff); |
55b39755 CW |
2786 | for_each_pipe(pipe) { |
2787 | /* Clear enable bits; then clear status bits */ | |
a266c7d5 | 2788 | I915_WRITE(PIPESTAT(pipe), 0); |
55b39755 CW |
2789 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
2790 | } | |
a266c7d5 CW |
2791 | I915_WRITE(IMR, 0xffffffff); |
2792 | I915_WRITE(IER, 0x0); | |
2793 | ||
a266c7d5 CW |
2794 | I915_WRITE(IIR, I915_READ(IIR)); |
2795 | } | |
2796 | ||
2797 | static void i965_irq_preinstall(struct drm_device * dev) | |
2798 | { | |
2799 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2800 | int pipe; | |
2801 | ||
2802 | atomic_set(&dev_priv->irq_received, 0); | |
2803 | ||
adca4730 CW |
2804 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2805 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
2806 | |
2807 | I915_WRITE(HWSTAM, 0xeffe); | |
2808 | for_each_pipe(pipe) | |
2809 | I915_WRITE(PIPESTAT(pipe), 0); | |
2810 | I915_WRITE(IMR, 0xffffffff); | |
2811 | I915_WRITE(IER, 0x0); | |
2812 | POSTING_READ(IER); | |
2813 | } | |
2814 | ||
2815 | static int i965_irq_postinstall(struct drm_device *dev) | |
2816 | { | |
2817 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
bbba0a97 | 2818 | u32 enable_mask; |
a266c7d5 | 2819 | u32 error_mask; |
b79480ba | 2820 | unsigned long irqflags; |
a266c7d5 | 2821 | |
a266c7d5 | 2822 | /* Unmask the interrupts that we always want on. */ |
bbba0a97 | 2823 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
adca4730 | 2824 | I915_DISPLAY_PORT_INTERRUPT | |
bbba0a97 CW |
2825 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
2826 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2827 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2828 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2829 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2830 | ||
2831 | enable_mask = ~dev_priv->irq_mask; | |
21ad8330 VS |
2832 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
2833 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | |
bbba0a97 CW |
2834 | enable_mask |= I915_USER_INTERRUPT; |
2835 | ||
2836 | if (IS_G4X(dev)) | |
2837 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
a266c7d5 | 2838 | |
b79480ba DV |
2839 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
2840 | * just to make the assert_spin_locked check happy. */ | |
2841 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
515ac2bb | 2842 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
b79480ba | 2843 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
a266c7d5 | 2844 | |
a266c7d5 CW |
2845 | /* |
2846 | * Enable some error detection, note the instruction error mask | |
2847 | * bit is reserved, so we leave it masked. | |
2848 | */ | |
2849 | if (IS_G4X(dev)) { | |
2850 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
2851 | GM45_ERROR_MEM_PRIV | | |
2852 | GM45_ERROR_CP_PRIV | | |
2853 | I915_ERROR_MEMORY_REFRESH); | |
2854 | } else { | |
2855 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
2856 | I915_ERROR_MEMORY_REFRESH); | |
2857 | } | |
2858 | I915_WRITE(EMR, error_mask); | |
2859 | ||
2860 | I915_WRITE(IMR, dev_priv->irq_mask); | |
2861 | I915_WRITE(IER, enable_mask); | |
2862 | POSTING_READ(IER); | |
2863 | ||
20afbda2 DV |
2864 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2865 | POSTING_READ(PORT_HOTPLUG_EN); | |
2866 | ||
f49e38dd | 2867 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
2868 | |
2869 | return 0; | |
2870 | } | |
2871 | ||
bac56d5b | 2872 | static void i915_hpd_irq_setup(struct drm_device *dev) |
20afbda2 DV |
2873 | { |
2874 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e5868a31 | 2875 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed | 2876 | struct intel_encoder *intel_encoder; |
20afbda2 DV |
2877 | u32 hotplug_en; |
2878 | ||
b5ea2d56 DV |
2879 | assert_spin_locked(&dev_priv->irq_lock); |
2880 | ||
bac56d5b EE |
2881 | if (I915_HAS_HOTPLUG(dev)) { |
2882 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
2883 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | |
2884 | /* Note HDMI and DP share hotplug bits */ | |
e5868a31 | 2885 | /* enable bits are the same for all generations */ |
cd569aed EE |
2886 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2887 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | |
2888 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | |
bac56d5b EE |
2889 | /* Programming the CRT detection parameters tends |
2890 | to generate a spurious hotplug event about three | |
2891 | seconds later. So just do it once. | |
2892 | */ | |
2893 | if (IS_G4X(dev)) | |
2894 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
85fc95ba | 2895 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
bac56d5b | 2896 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
a266c7d5 | 2897 | |
bac56d5b EE |
2898 | /* Ignore TV since it's buggy */ |
2899 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
2900 | } | |
a266c7d5 CW |
2901 | } |
2902 | ||
ff1f525e | 2903 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
a266c7d5 CW |
2904 | { |
2905 | struct drm_device *dev = (struct drm_device *) arg; | |
2906 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
a266c7d5 CW |
2907 | u32 iir, new_iir; |
2908 | u32 pipe_stats[I915_MAX_PIPES]; | |
a266c7d5 CW |
2909 | unsigned long irqflags; |
2910 | int irq_received; | |
2911 | int ret = IRQ_NONE, pipe; | |
21ad8330 VS |
2912 | u32 flip_mask = |
2913 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2914 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
a266c7d5 CW |
2915 | |
2916 | atomic_inc(&dev_priv->irq_received); | |
2917 | ||
2918 | iir = I915_READ(IIR); | |
2919 | ||
a266c7d5 | 2920 | for (;;) { |
2c8ba29f CW |
2921 | bool blc_event = false; |
2922 | ||
21ad8330 | 2923 | irq_received = (iir & ~flip_mask) != 0; |
a266c7d5 CW |
2924 | |
2925 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2926 | * have been cleared after the pipestat interrupt was received. | |
2927 | * It doesn't set the bit in iir again, but it still produces | |
2928 | * interrupts (for non-MSI). | |
2929 | */ | |
2930 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2931 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2932 | i915_handle_error(dev, false); | |
2933 | ||
2934 | for_each_pipe(pipe) { | |
2935 | int reg = PIPESTAT(pipe); | |
2936 | pipe_stats[pipe] = I915_READ(reg); | |
2937 | ||
2938 | /* | |
2939 | * Clear the PIPE*STAT regs before the IIR | |
2940 | */ | |
2941 | if (pipe_stats[pipe] & 0x8000ffff) { | |
2942 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2943 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2944 | pipe_name(pipe)); | |
2945 | I915_WRITE(reg, pipe_stats[pipe]); | |
2946 | irq_received = 1; | |
2947 | } | |
2948 | } | |
2949 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2950 | ||
2951 | if (!irq_received) | |
2952 | break; | |
2953 | ||
2954 | ret = IRQ_HANDLED; | |
2955 | ||
2956 | /* Consume port. Then clear IIR or we'll miss events */ | |
adca4730 | 2957 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
a266c7d5 | 2958 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
b543fb04 EE |
2959 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? |
2960 | HOTPLUG_INT_STATUS_G4X : | |
4f7fd709 | 2961 | HOTPLUG_INT_STATUS_I915); |
a266c7d5 CW |
2962 | |
2963 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
2964 | hotplug_status); | |
91d131d2 DV |
2965 | |
2966 | intel_hpd_irq_handler(dev, hotplug_trigger, | |
2967 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); | |
2968 | ||
a266c7d5 CW |
2969 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
2970 | I915_READ(PORT_HOTPLUG_STAT); | |
2971 | } | |
2972 | ||
21ad8330 | 2973 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
2974 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
2975 | ||
a266c7d5 CW |
2976 | if (iir & I915_USER_INTERRUPT) |
2977 | notify_ring(dev, &dev_priv->ring[RCS]); | |
2978 | if (iir & I915_BSD_USER_INTERRUPT) | |
2979 | notify_ring(dev, &dev_priv->ring[VCS]); | |
2980 | ||
a266c7d5 | 2981 | for_each_pipe(pipe) { |
2c8ba29f | 2982 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
2983 | i915_handle_vblank(dev, pipe, pipe, iir)) |
2984 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | |
a266c7d5 CW |
2985 | |
2986 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
2987 | blc_event = true; | |
2988 | } | |
2989 | ||
2990 | ||
2991 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
2992 | intel_opregion_asle_intr(dev); | |
2993 | ||
515ac2bb DV |
2994 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
2995 | gmbus_irq_handler(dev); | |
2996 | ||
a266c7d5 CW |
2997 | /* With MSI, interrupts are only generated when iir |
2998 | * transitions from zero to nonzero. If another bit got | |
2999 | * set while we were handling the existing iir bits, then | |
3000 | * we would never get another interrupt. | |
3001 | * | |
3002 | * This is fine on non-MSI as well, as if we hit this path | |
3003 | * we avoid exiting the interrupt handler only to generate | |
3004 | * another one. | |
3005 | * | |
3006 | * Note that for MSI this could cause a stray interrupt report | |
3007 | * if an interrupt landed in the time between writing IIR and | |
3008 | * the posting read. This should be rare enough to never | |
3009 | * trigger the 99% of 100,000 interrupts test for disabling | |
3010 | * stray interrupts. | |
3011 | */ | |
3012 | iir = new_iir; | |
3013 | } | |
3014 | ||
d05c617e | 3015 | i915_update_dri1_breadcrumb(dev); |
2c8ba29f | 3016 | |
a266c7d5 CW |
3017 | return ret; |
3018 | } | |
3019 | ||
3020 | static void i965_irq_uninstall(struct drm_device * dev) | |
3021 | { | |
3022 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3023 | int pipe; | |
3024 | ||
3025 | if (!dev_priv) | |
3026 | return; | |
3027 | ||
ac4c16c5 EE |
3028 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
3029 | ||
adca4730 CW |
3030 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3031 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
3032 | |
3033 | I915_WRITE(HWSTAM, 0xffffffff); | |
3034 | for_each_pipe(pipe) | |
3035 | I915_WRITE(PIPESTAT(pipe), 0); | |
3036 | I915_WRITE(IMR, 0xffffffff); | |
3037 | I915_WRITE(IER, 0x0); | |
3038 | ||
3039 | for_each_pipe(pipe) | |
3040 | I915_WRITE(PIPESTAT(pipe), | |
3041 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | |
3042 | I915_WRITE(IIR, I915_READ(IIR)); | |
3043 | } | |
3044 | ||
ac4c16c5 EE |
3045 | static void i915_reenable_hotplug_timer_func(unsigned long data) |
3046 | { | |
3047 | drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; | |
3048 | struct drm_device *dev = dev_priv->dev; | |
3049 | struct drm_mode_config *mode_config = &dev->mode_config; | |
3050 | unsigned long irqflags; | |
3051 | int i; | |
3052 | ||
3053 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3054 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | |
3055 | struct drm_connector *connector; | |
3056 | ||
3057 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | |
3058 | continue; | |
3059 | ||
3060 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
3061 | ||
3062 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
3063 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
3064 | ||
3065 | if (intel_connector->encoder->hpd_pin == i) { | |
3066 | if (connector->polled != intel_connector->polled) | |
3067 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
3068 | drm_get_connector_name(connector)); | |
3069 | connector->polled = intel_connector->polled; | |
3070 | if (!connector->polled) | |
3071 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
3072 | } | |
3073 | } | |
3074 | } | |
3075 | if (dev_priv->display.hpd_irq_setup) | |
3076 | dev_priv->display.hpd_irq_setup(dev); | |
3077 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3078 | } | |
3079 | ||
f71d4af4 JB |
3080 | void intel_irq_init(struct drm_device *dev) |
3081 | { | |
8b2e326d CW |
3082 | struct drm_i915_private *dev_priv = dev->dev_private; |
3083 | ||
3084 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | |
99584db3 | 3085 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
c6a828d3 | 3086 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
a4da4fa4 | 3087 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
8b2e326d | 3088 | |
99584db3 DV |
3089 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
3090 | i915_hangcheck_elapsed, | |
61bac78e | 3091 | (unsigned long) dev); |
ac4c16c5 EE |
3092 | setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, |
3093 | (unsigned long) dev_priv); | |
61bac78e | 3094 | |
97a19a24 | 3095 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
9ee32fea | 3096 | |
f71d4af4 JB |
3097 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
3098 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | |
7d4e146f | 3099 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
f71d4af4 JB |
3100 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
3101 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | |
3102 | } | |
3103 | ||
c3613de9 KP |
3104 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3105 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | |
3106 | else | |
3107 | dev->driver->get_vblank_timestamp = NULL; | |
f71d4af4 JB |
3108 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
3109 | ||
7e231dbe JB |
3110 | if (IS_VALLEYVIEW(dev)) { |
3111 | dev->driver->irq_handler = valleyview_irq_handler; | |
3112 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | |
3113 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | |
3114 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | |
3115 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
3116 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
fa00abe0 | 3117 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
4a06e201 | 3118 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
7d99163d | 3119 | /* Share uninstall handlers with ILK/SNB */ |
f71d4af4 | 3120 | dev->driver->irq_handler = ivybridge_irq_handler; |
31694658 | 3121 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
f71d4af4 JB |
3122 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
3123 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
3124 | dev->driver->enable_vblank = ivybridge_enable_vblank; | |
3125 | dev->driver->disable_vblank = ivybridge_disable_vblank; | |
82a28bcf | 3126 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 JB |
3127 | } else if (HAS_PCH_SPLIT(dev)) { |
3128 | dev->driver->irq_handler = ironlake_irq_handler; | |
3129 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | |
3130 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | |
3131 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
3132 | dev->driver->enable_vblank = ironlake_enable_vblank; | |
3133 | dev->driver->disable_vblank = ironlake_disable_vblank; | |
82a28bcf | 3134 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 | 3135 | } else { |
c2798b19 CW |
3136 | if (INTEL_INFO(dev)->gen == 2) { |
3137 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | |
3138 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | |
3139 | dev->driver->irq_handler = i8xx_irq_handler; | |
3140 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | |
a266c7d5 CW |
3141 | } else if (INTEL_INFO(dev)->gen == 3) { |
3142 | dev->driver->irq_preinstall = i915_irq_preinstall; | |
3143 | dev->driver->irq_postinstall = i915_irq_postinstall; | |
3144 | dev->driver->irq_uninstall = i915_irq_uninstall; | |
3145 | dev->driver->irq_handler = i915_irq_handler; | |
20afbda2 | 3146 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 3147 | } else { |
a266c7d5 CW |
3148 | dev->driver->irq_preinstall = i965_irq_preinstall; |
3149 | dev->driver->irq_postinstall = i965_irq_postinstall; | |
3150 | dev->driver->irq_uninstall = i965_irq_uninstall; | |
3151 | dev->driver->irq_handler = i965_irq_handler; | |
bac56d5b | 3152 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 3153 | } |
f71d4af4 JB |
3154 | dev->driver->enable_vblank = i915_enable_vblank; |
3155 | dev->driver->disable_vblank = i915_disable_vblank; | |
3156 | } | |
3157 | } | |
20afbda2 DV |
3158 | |
3159 | void intel_hpd_init(struct drm_device *dev) | |
3160 | { | |
3161 | struct drm_i915_private *dev_priv = dev->dev_private; | |
821450c6 EE |
3162 | struct drm_mode_config *mode_config = &dev->mode_config; |
3163 | struct drm_connector *connector; | |
b5ea2d56 | 3164 | unsigned long irqflags; |
821450c6 | 3165 | int i; |
20afbda2 | 3166 | |
821450c6 EE |
3167 | for (i = 1; i < HPD_NUM_PINS; i++) { |
3168 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
3169 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
3170 | } | |
3171 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
3172 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
3173 | connector->polled = intel_connector->polled; | |
3174 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | |
3175 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
3176 | } | |
b5ea2d56 DV |
3177 | |
3178 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
3179 | * just to make the assert_spin_locked checks happy. */ | |
3180 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
20afbda2 DV |
3181 | if (dev_priv->display.hpd_irq_setup) |
3182 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 3183 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
20afbda2 | 3184 | } |