Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/spurious.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar | |
5 | * | |
6 | * This file contains spurious interrupt handling. | |
7 | */ | |
8 | ||
188fd89d | 9 | #include <linux/jiffies.h> |
1da177e4 LT |
10 | #include <linux/irq.h> |
11 | #include <linux/module.h> | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/interrupt.h> | |
9e094c17 | 14 | #include <linux/moduleparam.h> |
f84dbb91 | 15 | #include <linux/timer.h> |
1da177e4 | 16 | |
bd151412 TG |
17 | #include "internals.h" |
18 | ||
83d4e6e7 | 19 | static int irqfixup __read_mostly; |
200803df | 20 | |
f84dbb91 EB |
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); | |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | |
d05c65ff TG |
24 | static int irq_poll_cpu; |
25 | static atomic_t irq_poll_active; | |
f84dbb91 | 26 | |
200803df AC |
27 | /* |
28 | * Recovery handler for misrouted interrupts. | |
29 | */ | |
c7259cd7 | 30 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
200803df | 31 | { |
f84dbb91 | 32 | struct irqaction *action; |
d3c60047 | 33 | int ok = 0, work = 0; |
200803df | 34 | |
239007b8 | 35 | raw_spin_lock(&desc->lock); |
c7259cd7 TG |
36 | |
37 | /* PER_CPU and nested thread interrupts are never polled */ | |
38 | if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD)) | |
39 | goto out; | |
40 | ||
41 | /* | |
42 | * Do not poll disabled interrupts unless the spurious | |
43 | * disabled poller asks explicitely. | |
44 | */ | |
45 | if ((desc->status & IRQ_DISABLED) && !force) | |
46 | goto out; | |
47 | ||
48 | /* | |
49 | * All handlers must agree on IRQF_SHARED, so we test just the | |
50 | * first. Check for action->next as well. | |
51 | */ | |
52 | action = desc->action; | |
53 | if (!action || !(action->flags & IRQF_SHARED) || | |
54 | (action->flags & __IRQF_TIMER) || !action->next) | |
55 | goto out; | |
56 | ||
f84dbb91 EB |
57 | /* Already running on another processor */ |
58 | if (desc->status & IRQ_INPROGRESS) { | |
59 | /* | |
60 | * Already running: If it is shared get the other | |
61 | * CPU to go looking for our mystery interrupt too | |
62 | */ | |
c7259cd7 | 63 | desc->status |= IRQ_PENDING; |
fa27271b | 64 | goto out; |
c7259cd7 | 65 | } |
fa27271b TG |
66 | |
67 | /* Honour the normal IRQ locking */ | |
68 | desc->status |= IRQ_INPROGRESS; | |
69 | do { | |
70 | work++; | |
71 | desc->status &= ~IRQ_PENDING; | |
239007b8 | 72 | raw_spin_unlock(&desc->lock); |
fa27271b TG |
73 | if (handle_IRQ_event(irq, action) != IRQ_NONE) |
74 | ok = 1; | |
239007b8 | 75 | raw_spin_lock(&desc->lock); |
fa27271b TG |
76 | action = desc->action; |
77 | } while ((desc->status & IRQ_PENDING) && action); | |
78 | ||
f84dbb91 EB |
79 | desc->status &= ~IRQ_INPROGRESS; |
80 | /* | |
81 | * If we did actual work for the real IRQ line we must let the | |
82 | * IRQ controller clean up too | |
83 | */ | |
fa27271b | 84 | if (work > 1) |
bd151412 | 85 | irq_end(irq, desc); |
f84dbb91 | 86 | |
fa27271b TG |
87 | out: |
88 | raw_spin_unlock(&desc->lock); | |
f84dbb91 EB |
89 | return ok; |
90 | } | |
91 | ||
92 | static int misrouted_irq(int irq) | |
93 | { | |
e00585bb | 94 | struct irq_desc *desc; |
d3c60047 | 95 | int i, ok = 0; |
f84dbb91 | 96 | |
d05c65ff TG |
97 | if (atomic_inc_return(&irq_poll_active) == 1) |
98 | goto out; | |
99 | ||
100 | irq_poll_cpu = smp_processor_id(); | |
101 | ||
e00585bb YL |
102 | for_each_irq_desc(i, desc) { |
103 | if (!i) | |
104 | continue; | |
f84dbb91 EB |
105 | |
106 | if (i == irq) /* Already tried */ | |
107 | continue; | |
108 | ||
c7259cd7 | 109 | if (try_one_irq(i, desc, false)) |
f84dbb91 | 110 | ok = 1; |
200803df | 111 | } |
d05c65ff TG |
112 | out: |
113 | atomic_dec(&irq_poll_active); | |
200803df AC |
114 | /* So the caller can adjust the irq error counts */ |
115 | return ok; | |
116 | } | |
117 | ||
663e6959 | 118 | static void poll_spurious_irqs(unsigned long dummy) |
f84dbb91 | 119 | { |
e00585bb | 120 | struct irq_desc *desc; |
d3c60047 | 121 | int i; |
e00585bb | 122 | |
d05c65ff TG |
123 | if (atomic_inc_return(&irq_poll_active) != 1) |
124 | goto out; | |
125 | irq_poll_cpu = smp_processor_id(); | |
126 | ||
e00585bb | 127 | for_each_irq_desc(i, desc) { |
f84dbb91 EB |
128 | unsigned int status; |
129 | ||
e00585bb YL |
130 | if (!i) |
131 | continue; | |
132 | ||
f84dbb91 EB |
133 | /* Racy but it doesn't matter */ |
134 | status = desc->status; | |
135 | barrier(); | |
136 | if (!(status & IRQ_SPURIOUS_DISABLED)) | |
137 | continue; | |
138 | ||
e7e7e0c0 | 139 | local_irq_disable(); |
c7259cd7 | 140 | try_one_irq(i, desc, true); |
e7e7e0c0 | 141 | local_irq_enable(); |
f84dbb91 | 142 | } |
d05c65ff TG |
143 | out: |
144 | atomic_dec(&irq_poll_active); | |
d3c60047 TG |
145 | mod_timer(&poll_spurious_irq_timer, |
146 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
f84dbb91 EB |
147 | } |
148 | ||
1da177e4 LT |
149 | /* |
150 | * If 99,900 of the previous 100,000 interrupts have not been handled | |
151 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | |
152 | * and try to turn the IRQ off. | |
153 | * | |
154 | * (The other 100-of-100,000 interrupts may have been a correctly | |
155 | * functioning device sharing an IRQ with the failing one) | |
1da177e4 | 156 | */ |
1da177e4 | 157 | static void |
34ffdb72 IM |
158 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
159 | irqreturn_t action_ret) | |
1da177e4 LT |
160 | { |
161 | struct irqaction *action; | |
1082687e | 162 | unsigned long flags; |
1da177e4 LT |
163 | |
164 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { | |
165 | printk(KERN_ERR "irq event %d: bogus return value %x\n", | |
166 | irq, action_ret); | |
167 | } else { | |
200803df AC |
168 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
169 | "the \"irqpoll\" option)\n", irq); | |
1da177e4 LT |
170 | } |
171 | dump_stack(); | |
172 | printk(KERN_ERR "handlers:\n"); | |
06fcb0c6 | 173 | |
1082687e TG |
174 | /* |
175 | * We need to take desc->lock here. note_interrupt() is called | |
176 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | |
177 | * with something else removing an action. It's ok to take | |
178 | * desc->lock here. See synchronize_irq(). | |
179 | */ | |
180 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1da177e4 LT |
181 | action = desc->action; |
182 | while (action) { | |
183 | printk(KERN_ERR "[<%p>]", action->handler); | |
184 | print_symbol(" (%s)", | |
185 | (unsigned long)action->handler); | |
186 | printk("\n"); | |
187 | action = action->next; | |
188 | } | |
1082687e | 189 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 LT |
190 | } |
191 | ||
06fcb0c6 | 192 | static void |
34ffdb72 | 193 | report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 LT |
194 | { |
195 | static int count = 100; | |
196 | ||
197 | if (count > 0) { | |
198 | count--; | |
199 | __report_bad_irq(irq, desc, action_ret); | |
200 | } | |
201 | } | |
202 | ||
d3c60047 TG |
203 | static inline int |
204 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |
205 | irqreturn_t action_ret) | |
92ea7727 LT |
206 | { |
207 | struct irqaction *action; | |
208 | ||
209 | if (!irqfixup) | |
210 | return 0; | |
211 | ||
212 | /* We didn't actually handle the IRQ - see if it was misrouted? */ | |
213 | if (action_ret == IRQ_NONE) | |
214 | return 1; | |
215 | ||
216 | /* | |
217 | * But for 'irqfixup == 2' we also do it for handled interrupts if | |
218 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the | |
219 | * traditional PC timer interrupt.. Legacy) | |
220 | */ | |
221 | if (irqfixup < 2) | |
222 | return 0; | |
223 | ||
224 | if (!irq) | |
225 | return 1; | |
226 | ||
227 | /* | |
228 | * Since we don't get the descriptor lock, "action" can | |
229 | * change under us. We don't really care, but we don't | |
230 | * want to follow a NULL pointer. So tell the compiler to | |
231 | * just load it once by using a barrier. | |
232 | */ | |
233 | action = desc->action; | |
234 | barrier(); | |
235 | return action && (action->flags & IRQF_IRQPOLL); | |
236 | } | |
237 | ||
34ffdb72 | 238 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
7d12e780 | 239 | irqreturn_t action_ret) |
1da177e4 | 240 | { |
83d4e6e7 | 241 | if (unlikely(action_ret != IRQ_HANDLED)) { |
4f27c00b AC |
242 | /* |
243 | * If we are seeing only the odd spurious IRQ caused by | |
244 | * bus asynchronicity then don't eventually trigger an error, | |
fbfecd37 | 245 | * otherwise the counter becomes a doomsday timer for otherwise |
4f27c00b AC |
246 | * working systems |
247 | */ | |
188fd89d | 248 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
4f27c00b AC |
249 | desc->irqs_unhandled = 1; |
250 | else | |
251 | desc->irqs_unhandled++; | |
252 | desc->last_unhandled = jiffies; | |
83d4e6e7 | 253 | if (unlikely(action_ret != IRQ_NONE)) |
1da177e4 LT |
254 | report_bad_irq(irq, desc, action_ret); |
255 | } | |
256 | ||
92ea7727 LT |
257 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
258 | int ok = misrouted_irq(irq); | |
259 | if (action_ret == IRQ_NONE) | |
260 | desc->irqs_unhandled -= ok; | |
200803df AC |
261 | } |
262 | ||
1da177e4 | 263 | desc->irq_count++; |
83d4e6e7 | 264 | if (likely(desc->irq_count < 100000)) |
1da177e4 LT |
265 | return; |
266 | ||
267 | desc->irq_count = 0; | |
83d4e6e7 | 268 | if (unlikely(desc->irqs_unhandled > 99900)) { |
1da177e4 LT |
269 | /* |
270 | * The interrupt is stuck | |
271 | */ | |
272 | __report_bad_irq(irq, desc, action_ret); | |
273 | /* | |
274 | * Now kill the IRQ | |
275 | */ | |
276 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | |
1adb0850 TG |
277 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
278 | desc->depth++; | |
bc310dda | 279 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
f84dbb91 | 280 | |
d3c60047 TG |
281 | mod_timer(&poll_spurious_irq_timer, |
282 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
1da177e4 LT |
283 | } |
284 | desc->irqs_unhandled = 0; | |
285 | } | |
286 | ||
83d4e6e7 | 287 | int noirqdebug __read_mostly; |
1da177e4 | 288 | |
343cde51 | 289 | int noirqdebug_setup(char *str) |
1da177e4 LT |
290 | { |
291 | noirqdebug = 1; | |
292 | printk(KERN_INFO "IRQ lockup detection disabled\n"); | |
06fcb0c6 | 293 | |
1da177e4 LT |
294 | return 1; |
295 | } | |
296 | ||
297 | __setup("noirqdebug", noirqdebug_setup); | |
9e094c17 AK |
298 | module_param(noirqdebug, bool, 0644); |
299 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); | |
1da177e4 | 300 | |
200803df AC |
301 | static int __init irqfixup_setup(char *str) |
302 | { | |
303 | irqfixup = 1; | |
304 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); | |
305 | printk(KERN_WARNING "This may impact system performance.\n"); | |
06fcb0c6 | 306 | |
200803df AC |
307 | return 1; |
308 | } | |
309 | ||
310 | __setup("irqfixup", irqfixup_setup); | |
9e094c17 | 311 | module_param(irqfixup, int, 0644); |
200803df AC |
312 | |
313 | static int __init irqpoll_setup(char *str) | |
314 | { | |
315 | irqfixup = 2; | |
316 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " | |
317 | "enabled\n"); | |
318 | printk(KERN_WARNING "This may significantly impact system " | |
319 | "performance\n"); | |
320 | return 1; | |
321 | } | |
322 | ||
323 | __setup("irqpoll", irqpoll_setup); |