Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
4 | * | |
5 | * This file contains spurious interrupt handling. | |
6 | */ | |
7 | ||
188fd89d | 8 | #include <linux/jiffies.h> |
1da177e4 LT |
9 | #include <linux/irq.h> |
10 | #include <linux/module.h> | |
1da177e4 | 11 | #include <linux/interrupt.h> |
9e094c17 | 12 | #include <linux/moduleparam.h> |
f84dbb91 | 13 | #include <linux/timer.h> |
1da177e4 | 14 | |
bd151412 TG |
15 | #include "internals.h" |
16 | ||
83d4e6e7 | 17 | static int irqfixup __read_mostly; |
200803df | 18 | |
f84dbb91 | 19 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
24ed960a | 20 | static void poll_spurious_irqs(struct timer_list *unused); |
1d27e3e2 | 21 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs); |
d05c65ff TG |
22 | static int irq_poll_cpu; |
23 | static atomic_t irq_poll_active; | |
f84dbb91 | 24 | |
fe200ae4 TG |
25 | /* |
26 | * We wait here for a poller to finish. | |
27 | * | |
28 | * If the poll runs on this CPU, then we yell loudly and return | |
29 | * false. That will leave the interrupt line disabled in the worst | |
30 | * case, but it should never happen. | |
31 | * | |
32 | * We wait until the poller is done and then recheck disabled and | |
33 | * action (about to be disabled). Only if it's still active, we return | |
34 | * true and let the handler run. | |
35 | */ | |
36 | bool irq_wait_for_poll(struct irq_desc *desc) | |
099368bb | 37 | __must_hold(&desc->lock) |
fe200ae4 TG |
38 | { |
39 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | |
40 | "irq poll in progress on cpu %d for irq %d\n", | |
41 | smp_processor_id(), desc->irq_data.irq)) | |
42 | return false; | |
43 | ||
44 | #ifdef CONFIG_SMP | |
45 | do { | |
46 | raw_spin_unlock(&desc->lock); | |
32f4125e | 47 | while (irqd_irq_inprogress(&desc->irq_data)) |
fe200ae4 TG |
48 | cpu_relax(); |
49 | raw_spin_lock(&desc->lock); | |
a6aeddd1 | 50 | } while (irqd_irq_inprogress(&desc->irq_data)); |
fe200ae4 | 51 | /* Might have been disabled in meantime */ |
32f4125e | 52 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
fe200ae4 TG |
53 | #else |
54 | return false; | |
55 | #endif | |
56 | } | |
57 | ||
0877d662 | 58 | |
200803df AC |
59 | /* |
60 | * Recovery handler for misrouted interrupts. | |
61 | */ | |
c1e5bd8c | 62 | static int try_one_irq(struct irq_desc *desc, bool force) |
200803df | 63 | { |
0877d662 | 64 | irqreturn_t ret = IRQ_NONE; |
f84dbb91 | 65 | struct irqaction *action; |
200803df | 66 | |
239007b8 | 67 | raw_spin_lock(&desc->lock); |
c7259cd7 | 68 | |
b39898cd | 69 | /* |
c5f48c0a | 70 | * PER_CPU, nested thread interrupts and interrupts explicitly |
b39898cd TG |
71 | * marked polled are excluded from polling. |
72 | */ | |
73 | if (irq_settings_is_per_cpu(desc) || | |
74 | irq_settings_is_nested_thread(desc) || | |
75 | irq_settings_is_polled(desc)) | |
c7259cd7 TG |
76 | goto out; |
77 | ||
78 | /* | |
79 | * Do not poll disabled interrupts unless the spurious | |
c5f48c0a | 80 | * disabled poller asks explicitly. |
c7259cd7 | 81 | */ |
32f4125e | 82 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
c7259cd7 TG |
83 | goto out; |
84 | ||
85 | /* | |
86 | * All handlers must agree on IRQF_SHARED, so we test just the | |
e716efde | 87 | * first. |
c7259cd7 TG |
88 | */ |
89 | action = desc->action; | |
90 | if (!action || !(action->flags & IRQF_SHARED) || | |
e716efde | 91 | (action->flags & __IRQF_TIMER)) |
c7259cd7 TG |
92 | goto out; |
93 | ||
f84dbb91 | 94 | /* Already running on another processor */ |
32f4125e | 95 | if (irqd_irq_inprogress(&desc->irq_data)) { |
f84dbb91 EB |
96 | /* |
97 | * Already running: If it is shared get the other | |
98 | * CPU to go looking for our mystery interrupt too | |
99 | */ | |
2a0d6fb3 | 100 | desc->istate |= IRQS_PENDING; |
fa27271b | 101 | goto out; |
c7259cd7 | 102 | } |
fa27271b | 103 | |
0877d662 | 104 | /* Mark it poll in progress */ |
6954b75b | 105 | desc->istate |= IRQS_POLL_INPROGRESS; |
fa27271b | 106 | do { |
0877d662 TG |
107 | if (handle_irq_event(desc) == IRQ_HANDLED) |
108 | ret = IRQ_HANDLED; | |
e716efde | 109 | /* Make sure that there is still a valid action */ |
fa27271b | 110 | action = desc->action; |
2a0d6fb3 | 111 | } while ((desc->istate & IRQS_PENDING) && action); |
6954b75b | 112 | desc->istate &= ~IRQS_POLL_INPROGRESS; |
fa27271b TG |
113 | out: |
114 | raw_spin_unlock(&desc->lock); | |
0877d662 | 115 | return ret == IRQ_HANDLED; |
f84dbb91 EB |
116 | } |
117 | ||
118 | static int misrouted_irq(int irq) | |
119 | { | |
e00585bb | 120 | struct irq_desc *desc; |
d3c60047 | 121 | int i, ok = 0; |
f84dbb91 | 122 | |
c75d720f | 123 | if (atomic_inc_return(&irq_poll_active) != 1) |
d05c65ff TG |
124 | goto out; |
125 | ||
126 | irq_poll_cpu = smp_processor_id(); | |
127 | ||
e00585bb YL |
128 | for_each_irq_desc(i, desc) { |
129 | if (!i) | |
130 | continue; | |
f84dbb91 EB |
131 | |
132 | if (i == irq) /* Already tried */ | |
133 | continue; | |
134 | ||
c1e5bd8c | 135 | if (try_one_irq(desc, false)) |
f84dbb91 | 136 | ok = 1; |
200803df | 137 | } |
d05c65ff TG |
138 | out: |
139 | atomic_dec(&irq_poll_active); | |
200803df AC |
140 | /* So the caller can adjust the irq error counts */ |
141 | return ok; | |
142 | } | |
143 | ||
24ed960a | 144 | static void poll_spurious_irqs(struct timer_list *unused) |
f84dbb91 | 145 | { |
e00585bb | 146 | struct irq_desc *desc; |
d3c60047 | 147 | int i; |
e00585bb | 148 | |
d05c65ff TG |
149 | if (atomic_inc_return(&irq_poll_active) != 1) |
150 | goto out; | |
151 | irq_poll_cpu = smp_processor_id(); | |
152 | ||
e00585bb | 153 | for_each_irq_desc(i, desc) { |
7acdd53e | 154 | unsigned int state; |
f84dbb91 | 155 | |
e00585bb YL |
156 | if (!i) |
157 | continue; | |
158 | ||
f84dbb91 | 159 | /* Racy but it doesn't matter */ |
7acdd53e | 160 | state = desc->istate; |
f84dbb91 | 161 | barrier(); |
7acdd53e | 162 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
f84dbb91 EB |
163 | continue; |
164 | ||
e7e7e0c0 | 165 | local_irq_disable(); |
c1e5bd8c | 166 | try_one_irq(desc, true); |
e7e7e0c0 | 167 | local_irq_enable(); |
f84dbb91 | 168 | } |
d05c65ff TG |
169 | out: |
170 | atomic_dec(&irq_poll_active); | |
d3c60047 TG |
171 | mod_timer(&poll_spurious_irq_timer, |
172 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
f84dbb91 EB |
173 | } |
174 | ||
3a43e05f SAS |
175 | static inline int bad_action_ret(irqreturn_t action_ret) |
176 | { | |
5d4bac9a JK |
177 | unsigned int r = action_ret; |
178 | ||
179 | if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) | |
3a43e05f SAS |
180 | return 0; |
181 | return 1; | |
182 | } | |
183 | ||
1da177e4 LT |
184 | /* |
185 | * If 99,900 of the previous 100,000 interrupts have not been handled | |
186 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | |
187 | * and try to turn the IRQ off. | |
188 | * | |
189 | * (The other 100-of-100,000 interrupts may have been a correctly | |
190 | * functioning device sharing an IRQ with the failing one) | |
1da177e4 | 191 | */ |
02d00eaa | 192 | static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 | 193 | { |
02d00eaa | 194 | unsigned int irq = irq_desc_get_irq(desc); |
1da177e4 | 195 | struct irqaction *action; |
1082687e | 196 | unsigned long flags; |
1da177e4 | 197 | |
3a43e05f | 198 | if (bad_action_ret(action_ret)) { |
1da177e4 LT |
199 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
200 | irq, action_ret); | |
201 | } else { | |
200803df AC |
202 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
203 | "the \"irqpoll\" option)\n", irq); | |
1da177e4 LT |
204 | } |
205 | dump_stack(); | |
206 | printk(KERN_ERR "handlers:\n"); | |
06fcb0c6 | 207 | |
1082687e TG |
208 | /* |
209 | * We need to take desc->lock here. note_interrupt() is called | |
210 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | |
211 | * with something else removing an action. It's ok to take | |
212 | * desc->lock here. See synchronize_irq(). | |
213 | */ | |
214 | raw_spin_lock_irqsave(&desc->lock, flags); | |
f944b5a7 | 215 | for_each_action_of_desc(desc, action) { |
d75f773c | 216 | printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler); |
ef26f20c | 217 | if (action->thread_fn) |
d75f773c | 218 | printk(KERN_CONT " threaded [<%p>] %ps", |
ef26f20c SAS |
219 | action->thread_fn, action->thread_fn); |
220 | printk(KERN_CONT "\n"); | |
1da177e4 | 221 | } |
1082687e | 222 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 LT |
223 | } |
224 | ||
02d00eaa | 225 | static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 LT |
226 | { |
227 | static int count = 100; | |
228 | ||
229 | if (count > 0) { | |
230 | count--; | |
02d00eaa | 231 | __report_bad_irq(desc, action_ret); |
1da177e4 LT |
232 | } |
233 | } | |
234 | ||
d3c60047 TG |
235 | static inline int |
236 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |
237 | irqreturn_t action_ret) | |
92ea7727 LT |
238 | { |
239 | struct irqaction *action; | |
240 | ||
241 | if (!irqfixup) | |
242 | return 0; | |
243 | ||
244 | /* We didn't actually handle the IRQ - see if it was misrouted? */ | |
245 | if (action_ret == IRQ_NONE) | |
246 | return 1; | |
247 | ||
248 | /* | |
249 | * But for 'irqfixup == 2' we also do it for handled interrupts if | |
250 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the | |
251 | * traditional PC timer interrupt.. Legacy) | |
252 | */ | |
253 | if (irqfixup < 2) | |
254 | return 0; | |
255 | ||
256 | if (!irq) | |
257 | return 1; | |
258 | ||
259 | /* | |
260 | * Since we don't get the descriptor lock, "action" can | |
261 | * change under us. We don't really care, but we don't | |
262 | * want to follow a NULL pointer. So tell the compiler to | |
263 | * just load it once by using a barrier. | |
264 | */ | |
265 | action = desc->action; | |
266 | barrier(); | |
267 | return action && (action->flags & IRQF_IRQPOLL); | |
268 | } | |
269 | ||
1e77d0a1 TG |
270 | #define SPURIOUS_DEFERRED 0x80000000 |
271 | ||
0dcdbc97 | 272 | void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 | 273 | { |
0dcdbc97 JL |
274 | unsigned int irq; |
275 | ||
b39898cd TG |
276 | if (desc->istate & IRQS_POLL_INPROGRESS || |
277 | irq_settings_is_polled(desc)) | |
fe200ae4 TG |
278 | return; |
279 | ||
3a43e05f | 280 | if (bad_action_ret(action_ret)) { |
02d00eaa | 281 | report_bad_irq(desc, action_ret); |
3a43e05f SAS |
282 | return; |
283 | } | |
284 | ||
1e77d0a1 TG |
285 | /* |
286 | * We cannot call note_interrupt from the threaded handler | |
287 | * because we need to look at the compound of all handlers | |
288 | * (primary and threaded). Aside of that in the threaded | |
289 | * shared case we have no serialization against an incoming | |
290 | * hardware interrupt while we are dealing with a threaded | |
291 | * result. | |
292 | * | |
293 | * So in case a thread is woken, we just note the fact and | |
294 | * defer the analysis to the next hardware interrupt. | |
295 | * | |
c5f48c0a | 296 | * The threaded handlers store whether they successfully |
1e77d0a1 TG |
297 | * handled an interrupt and we check whether that number |
298 | * changed versus the last invocation. | |
299 | * | |
300 | * We could handle all interrupts with the delayed by one | |
301 | * mechanism, but for the non forced threaded case we'd just | |
302 | * add pointless overhead to the straight hardirq interrupts | |
303 | * for the sake of a few lines less code. | |
304 | */ | |
305 | if (action_ret & IRQ_WAKE_THREAD) { | |
306 | /* | |
307 | * There is a thread woken. Check whether one of the | |
308 | * shared primary handlers returned IRQ_HANDLED. If | |
309 | * not we defer the spurious detection to the next | |
310 | * interrupt. | |
311 | */ | |
312 | if (action_ret == IRQ_WAKE_THREAD) { | |
313 | int handled; | |
314 | /* | |
315 | * We use bit 31 of thread_handled_last to | |
316 | * denote the deferred spurious detection | |
317 | * active. No locking necessary as | |
318 | * thread_handled_last is only accessed here | |
319 | * and we have the guarantee that hard | |
320 | * interrupts are not reentrant. | |
321 | */ | |
322 | if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { | |
323 | desc->threads_handled_last |= SPURIOUS_DEFERRED; | |
324 | return; | |
325 | } | |
326 | /* | |
327 | * Check whether one of the threaded handlers | |
328 | * returned IRQ_HANDLED since the last | |
329 | * interrupt happened. | |
330 | * | |
331 | * For simplicity we just set bit 31, as it is | |
332 | * set in threads_handled_last as well. So we | |
333 | * avoid extra masking. And we really do not | |
334 | * care about the high bits of the handled | |
335 | * count. We just care about the count being | |
336 | * different than the one we saw before. | |
337 | */ | |
338 | handled = atomic_read(&desc->threads_handled); | |
339 | handled |= SPURIOUS_DEFERRED; | |
340 | if (handled != desc->threads_handled_last) { | |
341 | action_ret = IRQ_HANDLED; | |
342 | /* | |
343 | * Note: We keep the SPURIOUS_DEFERRED | |
344 | * bit set. We are handling the | |
345 | * previous invocation right now. | |
346 | * Keep it for the current one, so the | |
347 | * next hardware interrupt will | |
348 | * account for it. | |
349 | */ | |
350 | desc->threads_handled_last = handled; | |
351 | } else { | |
352 | /* | |
353 | * None of the threaded handlers felt | |
354 | * responsible for the last interrupt | |
355 | * | |
356 | * We keep the SPURIOUS_DEFERRED bit | |
357 | * set in threads_handled_last as we | |
358 | * need to account for the current | |
359 | * interrupt as well. | |
360 | */ | |
361 | action_ret = IRQ_NONE; | |
362 | } | |
363 | } else { | |
364 | /* | |
365 | * One of the primary handlers returned | |
366 | * IRQ_HANDLED. So we don't care about the | |
367 | * threaded handlers on the same line. Clear | |
368 | * the deferred detection bit. | |
369 | * | |
370 | * In theory we could/should check whether the | |
371 | * deferred bit is set and take the result of | |
372 | * the previous run into account here as | |
373 | * well. But it's really not worth the | |
374 | * trouble. If every other interrupt is | |
375 | * handled we never trigger the spurious | |
376 | * detector. And if this is just the one out | |
377 | * of 100k unhandled ones which is handled | |
378 | * then we merily delay the spurious detection | |
379 | * by one hard interrupt. Not a real problem. | |
380 | */ | |
381 | desc->threads_handled_last &= ~SPURIOUS_DEFERRED; | |
382 | } | |
383 | } | |
384 | ||
3a43e05f | 385 | if (unlikely(action_ret == IRQ_NONE)) { |
4f27c00b AC |
386 | /* |
387 | * If we are seeing only the odd spurious IRQ caused by | |
388 | * bus asynchronicity then don't eventually trigger an error, | |
fbfecd37 | 389 | * otherwise the counter becomes a doomsday timer for otherwise |
4f27c00b AC |
390 | * working systems |
391 | */ | |
188fd89d | 392 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
4f27c00b AC |
393 | desc->irqs_unhandled = 1; |
394 | else | |
395 | desc->irqs_unhandled++; | |
396 | desc->last_unhandled = jiffies; | |
1da177e4 LT |
397 | } |
398 | ||
0dcdbc97 | 399 | irq = irq_desc_get_irq(desc); |
92ea7727 LT |
400 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
401 | int ok = misrouted_irq(irq); | |
402 | if (action_ret == IRQ_NONE) | |
403 | desc->irqs_unhandled -= ok; | |
200803df AC |
404 | } |
405 | ||
7c07012e NP |
406 | if (likely(!desc->irqs_unhandled)) |
407 | return; | |
408 | ||
409 | /* Now getting into unhandled irq detection */ | |
1da177e4 | 410 | desc->irq_count++; |
83d4e6e7 | 411 | if (likely(desc->irq_count < 100000)) |
1da177e4 LT |
412 | return; |
413 | ||
414 | desc->irq_count = 0; | |
83d4e6e7 | 415 | if (unlikely(desc->irqs_unhandled > 99900)) { |
1da177e4 LT |
416 | /* |
417 | * The interrupt is stuck | |
418 | */ | |
02d00eaa | 419 | __report_bad_irq(desc, action_ret); |
1da177e4 LT |
420 | /* |
421 | * Now kill the IRQ | |
422 | */ | |
423 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | |
7acdd53e | 424 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
1adb0850 | 425 | desc->depth++; |
87923470 | 426 | irq_disable(desc); |
f84dbb91 | 427 | |
d3c60047 TG |
428 | mod_timer(&poll_spurious_irq_timer, |
429 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
1da177e4 LT |
430 | } |
431 | desc->irqs_unhandled = 0; | |
432 | } | |
433 | ||
2329abfa | 434 | bool noirqdebug __read_mostly; |
1da177e4 | 435 | |
343cde51 | 436 | int noirqdebug_setup(char *str) |
1da177e4 LT |
437 | { |
438 | noirqdebug = 1; | |
439 | printk(KERN_INFO "IRQ lockup detection disabled\n"); | |
06fcb0c6 | 440 | |
1da177e4 LT |
441 | return 1; |
442 | } | |
443 | ||
444 | __setup("noirqdebug", noirqdebug_setup); | |
9e094c17 AK |
445 | module_param(noirqdebug, bool, 0644); |
446 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); | |
1da177e4 | 447 | |
200803df AC |
448 | static int __init irqfixup_setup(char *str) |
449 | { | |
b70e1388 IM |
450 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
451 | pr_warn("irqfixup boot option not supported with PREEMPT_RT\n"); | |
452 | return 1; | |
453 | } | |
200803df AC |
454 | irqfixup = 1; |
455 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); | |
456 | printk(KERN_WARNING "This may impact system performance.\n"); | |
06fcb0c6 | 457 | |
200803df AC |
458 | return 1; |
459 | } | |
460 | ||
461 | __setup("irqfixup", irqfixup_setup); | |
9e094c17 | 462 | module_param(irqfixup, int, 0644); |
200803df AC |
463 | |
464 | static int __init irqpoll_setup(char *str) | |
465 | { | |
b70e1388 IM |
466 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
467 | pr_warn("irqpoll boot option not supported with PREEMPT_RT\n"); | |
468 | return 1; | |
469 | } | |
200803df AC |
470 | irqfixup = 2; |
471 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " | |
472 | "enabled\n"); | |
473 | printk(KERN_WARNING "This may significantly impact system " | |
474 | "performance\n"); | |
475 | return 1; | |
476 | } | |
477 | ||
478 | __setup("irqpoll", irqpoll_setup); |