Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/manage.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
6 | * |
7 | * This file contains driver APIs to the irq subsystem. | |
8 | */ | |
9 | ||
10 | #include <linux/irq.h> | |
3aa551c9 | 11 | #include <linux/kthread.h> |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/random.h> | |
14 | #include <linux/interrupt.h> | |
1aeb272c | 15 | #include <linux/slab.h> |
3aa551c9 | 16 | #include <linux/sched.h> |
1da177e4 LT |
17 | |
18 | #include "internals.h" | |
19 | ||
1da177e4 LT |
20 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
1e5d5331 | 22 | * @irq: interrupt number to wait for |
1da177e4 LT |
23 | * |
24 | * This function waits for any pending IRQ handlers for this interrupt | |
25 | * to complete before returning. If you use this function while | |
26 | * holding a resource the IRQ handler may need you will deadlock. | |
27 | * | |
28 | * This function may be called - with care - from IRQ context. | |
29 | */ | |
30 | void synchronize_irq(unsigned int irq) | |
31 | { | |
cb5bc832 | 32 | struct irq_desc *desc = irq_to_desc(irq); |
a98ce5c6 | 33 | unsigned int status; |
1da177e4 | 34 | |
7d94f7ca | 35 | if (!desc) |
c2b5a251 MW |
36 | return; |
37 | ||
a98ce5c6 HX |
38 | do { |
39 | unsigned long flags; | |
40 | ||
41 | /* | |
42 | * Wait until we're out of the critical section. This might | |
43 | * give the wrong answer due to the lack of memory barriers. | |
44 | */ | |
45 | while (desc->status & IRQ_INPROGRESS) | |
46 | cpu_relax(); | |
47 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | |
239007b8 | 49 | raw_spin_lock_irqsave(&desc->lock, flags); |
a98ce5c6 | 50 | status = desc->status; |
239007b8 | 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
a98ce5c6 HX |
52 | |
53 | /* Oops, that failed? */ | |
54 | } while (status & IRQ_INPROGRESS); | |
3aa551c9 TG |
55 | |
56 | /* | |
57 | * We made sure that no hardirq handler is running. Now verify | |
58 | * that no threaded handlers are active. | |
59 | */ | |
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | |
1da177e4 | 61 | } |
1da177e4 LT |
62 | EXPORT_SYMBOL(synchronize_irq); |
63 | ||
3aa551c9 TG |
64 | #ifdef CONFIG_SMP |
65 | cpumask_var_t irq_default_affinity; | |
66 | ||
771ee3b0 TG |
67 | /** |
68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
69 | * @irq: Interrupt to check | |
70 | * | |
71 | */ | |
72 | int irq_can_set_affinity(unsigned int irq) | |
73 | { | |
08678b08 | 74 | struct irq_desc *desc = irq_to_desc(irq); |
771ee3b0 | 75 | |
6b8ff312 | 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || |
c96b3b3c | 77 | !desc->irq_data.chip->irq_set_affinity) |
771ee3b0 TG |
78 | return 0; |
79 | ||
80 | return 1; | |
81 | } | |
82 | ||
591d2fb0 TG |
83 | /** |
84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | |
85 | * @desc: irq descriptor which has affitnity changed | |
86 | * | |
87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | |
88 | * to the interrupt thread itself. We can not call | |
89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | |
90 | * code can be called from hard interrupt context. | |
91 | */ | |
92 | void irq_set_thread_affinity(struct irq_desc *desc) | |
3aa551c9 TG |
93 | { |
94 | struct irqaction *action = desc->action; | |
95 | ||
96 | while (action) { | |
97 | if (action->thread) | |
591d2fb0 | 98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
3aa551c9 TG |
99 | action = action->next; |
100 | } | |
101 | } | |
102 | ||
771ee3b0 TG |
103 | /** |
104 | * irq_set_affinity - Set the irq affinity of a given irq | |
105 | * @irq: Interrupt to set affinity | |
106 | * @cpumask: cpumask | |
107 | * | |
108 | */ | |
0de26520 | 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
771ee3b0 | 110 | { |
08678b08 | 111 | struct irq_desc *desc = irq_to_desc(irq); |
c96b3b3c | 112 | struct irq_chip *chip = desc->irq_data.chip; |
f6d87f4b | 113 | unsigned long flags; |
771ee3b0 | 114 | |
c96b3b3c | 115 | if (!chip->irq_set_affinity) |
771ee3b0 TG |
116 | return -EINVAL; |
117 | ||
239007b8 | 118 | raw_spin_lock_irqsave(&desc->lock, flags); |
f6d87f4b | 119 | |
771ee3b0 | 120 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
57b150cc | 121 | if (desc->status & IRQ_MOVE_PCNTXT) { |
c96b3b3c | 122 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { |
6b8ff312 | 123 | cpumask_copy(desc->irq_data.affinity, cpumask); |
591d2fb0 | 124 | irq_set_thread_affinity(desc); |
57b150cc YL |
125 | } |
126 | } | |
6ec3cfec | 127 | else { |
f6d87f4b | 128 | desc->status |= IRQ_MOVE_PENDING; |
7f7ace0c | 129 | cpumask_copy(desc->pending_mask, cpumask); |
f6d87f4b | 130 | } |
771ee3b0 | 131 | #else |
c96b3b3c | 132 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { |
6b8ff312 | 133 | cpumask_copy(desc->irq_data.affinity, cpumask); |
591d2fb0 | 134 | irq_set_thread_affinity(desc); |
57b150cc | 135 | } |
771ee3b0 | 136 | #endif |
f6d87f4b | 137 | desc->status |= IRQ_AFFINITY_SET; |
239007b8 | 138 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
771ee3b0 TG |
139 | return 0; |
140 | } | |
141 | ||
e7a297b0 PWJ |
142 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
143 | { | |
144 | struct irq_desc *desc = irq_to_desc(irq); | |
145 | unsigned long flags; | |
146 | ||
147 | if (!desc) | |
148 | return -EINVAL; | |
149 | ||
150 | raw_spin_lock_irqsave(&desc->lock, flags); | |
151 | desc->affinity_hint = m; | |
152 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
153 | ||
154 | return 0; | |
155 | } | |
156 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | |
157 | ||
18404756 MK |
158 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
159 | /* | |
160 | * Generic version of the affinity autoselector. | |
161 | */ | |
548c8933 | 162 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
18404756 | 163 | { |
18404756 MK |
164 | if (!irq_can_set_affinity(irq)) |
165 | return 0; | |
166 | ||
f6d87f4b TG |
167 | /* |
168 | * Preserve an userspace affinity setup, but make sure that | |
169 | * one of the targets is online. | |
170 | */ | |
612e3684 | 171 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
6b8ff312 | 172 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) |
0de26520 RR |
173 | < nr_cpu_ids) |
174 | goto set_affinity; | |
f6d87f4b TG |
175 | else |
176 | desc->status &= ~IRQ_AFFINITY_SET; | |
177 | } | |
178 | ||
6b8ff312 | 179 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); |
0de26520 | 180 | set_affinity: |
c96b3b3c | 181 | desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); |
18404756 | 182 | |
18404756 MK |
183 | return 0; |
184 | } | |
f6d87f4b | 185 | #else |
548c8933 | 186 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
f6d87f4b TG |
187 | { |
188 | return irq_select_affinity(irq); | |
189 | } | |
18404756 MK |
190 | #endif |
191 | ||
f6d87f4b TG |
192 | /* |
193 | * Called when affinity is set via /proc/irq | |
194 | */ | |
195 | int irq_select_affinity_usr(unsigned int irq) | |
196 | { | |
197 | struct irq_desc *desc = irq_to_desc(irq); | |
198 | unsigned long flags; | |
199 | int ret; | |
200 | ||
239007b8 | 201 | raw_spin_lock_irqsave(&desc->lock, flags); |
548c8933 | 202 | ret = setup_affinity(irq, desc); |
3aa551c9 | 203 | if (!ret) |
591d2fb0 | 204 | irq_set_thread_affinity(desc); |
239007b8 | 205 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
f6d87f4b TG |
206 | |
207 | return ret; | |
208 | } | |
209 | ||
210 | #else | |
548c8933 | 211 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
f6d87f4b TG |
212 | { |
213 | return 0; | |
214 | } | |
1da177e4 LT |
215 | #endif |
216 | ||
0a0c5168 RW |
217 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
218 | { | |
219 | if (suspend) { | |
685fd0b4 | 220 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
0a0c5168 RW |
221 | return; |
222 | desc->status |= IRQ_SUSPENDED; | |
223 | } | |
224 | ||
225 | if (!desc->depth++) { | |
226 | desc->status |= IRQ_DISABLED; | |
bc310dda | 227 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
0a0c5168 RW |
228 | } |
229 | } | |
230 | ||
1da177e4 LT |
231 | /** |
232 | * disable_irq_nosync - disable an irq without waiting | |
233 | * @irq: Interrupt to disable | |
234 | * | |
235 | * Disable the selected interrupt line. Disables and Enables are | |
236 | * nested. | |
237 | * Unlike disable_irq(), this function does not ensure existing | |
238 | * instances of the IRQ handler have completed before returning. | |
239 | * | |
240 | * This function may be called from IRQ context. | |
241 | */ | |
242 | void disable_irq_nosync(unsigned int irq) | |
243 | { | |
d3c60047 | 244 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
245 | unsigned long flags; |
246 | ||
7d94f7ca | 247 | if (!desc) |
c2b5a251 MW |
248 | return; |
249 | ||
3876ec9e | 250 | chip_bus_lock(desc); |
239007b8 | 251 | raw_spin_lock_irqsave(&desc->lock, flags); |
0a0c5168 | 252 | __disable_irq(desc, irq, false); |
239007b8 | 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3876ec9e | 254 | chip_bus_sync_unlock(desc); |
1da177e4 | 255 | } |
1da177e4 LT |
256 | EXPORT_SYMBOL(disable_irq_nosync); |
257 | ||
258 | /** | |
259 | * disable_irq - disable an irq and wait for completion | |
260 | * @irq: Interrupt to disable | |
261 | * | |
262 | * Disable the selected interrupt line. Enables and Disables are | |
263 | * nested. | |
264 | * This function waits for any pending IRQ handlers for this interrupt | |
265 | * to complete before returning. If you use this function while | |
266 | * holding a resource the IRQ handler may need you will deadlock. | |
267 | * | |
268 | * This function may be called - with care - from IRQ context. | |
269 | */ | |
270 | void disable_irq(unsigned int irq) | |
271 | { | |
d3c60047 | 272 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 | 273 | |
7d94f7ca | 274 | if (!desc) |
c2b5a251 MW |
275 | return; |
276 | ||
1da177e4 LT |
277 | disable_irq_nosync(irq); |
278 | if (desc->action) | |
279 | synchronize_irq(irq); | |
280 | } | |
1da177e4 LT |
281 | EXPORT_SYMBOL(disable_irq); |
282 | ||
0a0c5168 | 283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
1adb0850 | 284 | { |
0a0c5168 RW |
285 | if (resume) |
286 | desc->status &= ~IRQ_SUSPENDED; | |
287 | ||
1adb0850 TG |
288 | switch (desc->depth) { |
289 | case 0: | |
0a0c5168 | 290 | err_out: |
b8c512f6 | 291 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
1adb0850 TG |
292 | break; |
293 | case 1: { | |
294 | unsigned int status = desc->status & ~IRQ_DISABLED; | |
295 | ||
0a0c5168 RW |
296 | if (desc->status & IRQ_SUSPENDED) |
297 | goto err_out; | |
1adb0850 TG |
298 | /* Prevent probing on this irq: */ |
299 | desc->status = status | IRQ_NOPROBE; | |
300 | check_irq_resend(desc, irq); | |
301 | /* fall-through */ | |
302 | } | |
303 | default: | |
304 | desc->depth--; | |
305 | } | |
306 | } | |
307 | ||
1da177e4 LT |
308 | /** |
309 | * enable_irq - enable handling of an irq | |
310 | * @irq: Interrupt to enable | |
311 | * | |
312 | * Undoes the effect of one call to disable_irq(). If this | |
313 | * matches the last disable, processing of interrupts on this | |
314 | * IRQ line is re-enabled. | |
315 | * | |
70aedd24 | 316 | * This function may be called from IRQ context only when |
6b8ff312 | 317 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
1da177e4 LT |
318 | */ |
319 | void enable_irq(unsigned int irq) | |
320 | { | |
d3c60047 | 321 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
322 | unsigned long flags; |
323 | ||
7d94f7ca | 324 | if (!desc) |
c2b5a251 MW |
325 | return; |
326 | ||
2656c366 TG |
327 | if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, |
328 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | |
329 | return; | |
330 | ||
3876ec9e | 331 | chip_bus_lock(desc); |
239007b8 | 332 | raw_spin_lock_irqsave(&desc->lock, flags); |
0a0c5168 | 333 | __enable_irq(desc, irq, false); |
239007b8 | 334 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3876ec9e | 335 | chip_bus_sync_unlock(desc); |
1da177e4 | 336 | } |
1da177e4 LT |
337 | EXPORT_SYMBOL(enable_irq); |
338 | ||
0c5d1eb7 | 339 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 340 | { |
08678b08 | 341 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
342 | int ret = -ENXIO; |
343 | ||
2f7e99bb TG |
344 | if (desc->irq_data.chip->irq_set_wake) |
345 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | |
2db87321 UKK |
346 | |
347 | return ret; | |
348 | } | |
349 | ||
ba9a2331 TG |
350 | /** |
351 | * set_irq_wake - control irq power management wakeup | |
352 | * @irq: interrupt to control | |
353 | * @on: enable/disable power management wakeup | |
354 | * | |
15a647eb DB |
355 | * Enable/disable power management wakeup mode, which is |
356 | * disabled by default. Enables and disables must match, | |
357 | * just as they match for non-wakeup mode support. | |
358 | * | |
359 | * Wakeup mode lets this IRQ wake the system from sleep | |
360 | * states like "suspend to RAM". | |
ba9a2331 TG |
361 | */ |
362 | int set_irq_wake(unsigned int irq, unsigned int on) | |
363 | { | |
08678b08 | 364 | struct irq_desc *desc = irq_to_desc(irq); |
ba9a2331 | 365 | unsigned long flags; |
2db87321 | 366 | int ret = 0; |
ba9a2331 | 367 | |
15a647eb DB |
368 | /* wakeup-capable irqs can be shared between drivers that |
369 | * don't need to have the same sleep mode behaviors. | |
370 | */ | |
239007b8 | 371 | raw_spin_lock_irqsave(&desc->lock, flags); |
15a647eb | 372 | if (on) { |
2db87321 UKK |
373 | if (desc->wake_depth++ == 0) { |
374 | ret = set_irq_wake_real(irq, on); | |
375 | if (ret) | |
376 | desc->wake_depth = 0; | |
377 | else | |
378 | desc->status |= IRQ_WAKEUP; | |
379 | } | |
15a647eb DB |
380 | } else { |
381 | if (desc->wake_depth == 0) { | |
7a2c4770 | 382 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
2db87321 UKK |
383 | } else if (--desc->wake_depth == 0) { |
384 | ret = set_irq_wake_real(irq, on); | |
385 | if (ret) | |
386 | desc->wake_depth = 1; | |
387 | else | |
388 | desc->status &= ~IRQ_WAKEUP; | |
389 | } | |
15a647eb | 390 | } |
2db87321 | 391 | |
239007b8 | 392 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
ba9a2331 TG |
393 | return ret; |
394 | } | |
395 | EXPORT_SYMBOL(set_irq_wake); | |
396 | ||
1da177e4 LT |
397 | /* |
398 | * Internal function that tells the architecture code whether a | |
399 | * particular irq has been exclusively allocated or is available | |
400 | * for driver use. | |
401 | */ | |
402 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
403 | { | |
d3c60047 | 404 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 | 405 | struct irqaction *action; |
cc8c3b78 | 406 | unsigned long flags; |
1da177e4 | 407 | |
7d94f7ca YL |
408 | if (!desc) |
409 | return 0; | |
410 | ||
411 | if (desc->status & IRQ_NOREQUEST) | |
1da177e4 LT |
412 | return 0; |
413 | ||
cc8c3b78 | 414 | raw_spin_lock_irqsave(&desc->lock, flags); |
08678b08 | 415 | action = desc->action; |
1da177e4 | 416 | if (action) |
3cca53b0 | 417 | if (irqflags & action->flags & IRQF_SHARED) |
1da177e4 LT |
418 | action = NULL; |
419 | ||
cc8c3b78 TG |
420 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
421 | ||
1da177e4 LT |
422 | return !action; |
423 | } | |
424 | ||
6a6de9ef TG |
425 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) |
426 | { | |
427 | /* | |
428 | * If the architecture still has not overriden | |
429 | * the flow handler then zap the default. This | |
430 | * should catch incorrect flow-type setting. | |
431 | */ | |
432 | if (desc->handle_irq == &handle_bad_irq) | |
433 | desc->handle_irq = NULL; | |
434 | } | |
435 | ||
0c5d1eb7 | 436 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
b2ba2c30 | 437 | unsigned long flags) |
82736f4d UKK |
438 | { |
439 | int ret; | |
6b8ff312 | 440 | struct irq_chip *chip = desc->irq_data.chip; |
82736f4d | 441 | |
b2ba2c30 | 442 | if (!chip || !chip->irq_set_type) { |
82736f4d UKK |
443 | /* |
444 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
445 | * flow-types? | |
446 | */ | |
3ff68a6a | 447 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
82736f4d UKK |
448 | chip ? (chip->name ? : "unknown") : "unknown"); |
449 | return 0; | |
450 | } | |
451 | ||
f2b662da | 452 | /* caller masked out all except trigger mode flags */ |
b2ba2c30 | 453 | ret = chip->irq_set_type(&desc->irq_data, flags); |
82736f4d UKK |
454 | |
455 | if (ret) | |
b2ba2c30 TG |
456 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", |
457 | flags, irq, chip->irq_set_type); | |
0c5d1eb7 | 458 | else { |
f2b662da DB |
459 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
460 | flags |= IRQ_LEVEL; | |
0c5d1eb7 | 461 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ |
f2b662da DB |
462 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
463 | desc->status |= flags; | |
46732475 | 464 | |
6b8ff312 TG |
465 | if (chip != desc->irq_data.chip) |
466 | irq_chip_set_defaults(desc->irq_data.chip); | |
0c5d1eb7 | 467 | } |
82736f4d UKK |
468 | |
469 | return ret; | |
470 | } | |
471 | ||
b25c340c TG |
472 | /* |
473 | * Default primary interrupt handler for threaded interrupts. Is | |
474 | * assigned as primary handler when request_threaded_irq is called | |
475 | * with handler == NULL. Useful for oneshot interrupts. | |
476 | */ | |
477 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
478 | { | |
479 | return IRQ_WAKE_THREAD; | |
480 | } | |
481 | ||
399b5da2 TG |
482 | /* |
483 | * Primary handler for nested threaded interrupts. Should never be | |
484 | * called. | |
485 | */ | |
486 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
487 | { | |
488 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
489 | return IRQ_NONE; | |
490 | } | |
491 | ||
3aa551c9 TG |
492 | static int irq_wait_for_interrupt(struct irqaction *action) |
493 | { | |
494 | while (!kthread_should_stop()) { | |
495 | set_current_state(TASK_INTERRUPTIBLE); | |
f48fe81e TG |
496 | |
497 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
498 | &action->thread_flags)) { | |
3aa551c9 TG |
499 | __set_current_state(TASK_RUNNING); |
500 | return 0; | |
f48fe81e TG |
501 | } |
502 | schedule(); | |
3aa551c9 TG |
503 | } |
504 | return -1; | |
505 | } | |
506 | ||
b25c340c TG |
507 | /* |
508 | * Oneshot interrupts keep the irq line masked until the threaded | |
509 | * handler finished. unmask if the interrupt has not been disabled and | |
510 | * is marked MASKED. | |
511 | */ | |
512 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | |
513 | { | |
0b1adaa0 | 514 | again: |
3876ec9e | 515 | chip_bus_lock(desc); |
239007b8 | 516 | raw_spin_lock_irq(&desc->lock); |
0b1adaa0 TG |
517 | |
518 | /* | |
519 | * Implausible though it may be we need to protect us against | |
520 | * the following scenario: | |
521 | * | |
522 | * The thread is faster done than the hard interrupt handler | |
523 | * on the other CPU. If we unmask the irq line then the | |
524 | * interrupt can come in again and masks the line, leaves due | |
525 | * to IRQ_INPROGRESS and the irq line is masked forever. | |
526 | */ | |
527 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | |
528 | raw_spin_unlock_irq(&desc->lock); | |
3876ec9e | 529 | chip_bus_sync_unlock(desc); |
0b1adaa0 TG |
530 | cpu_relax(); |
531 | goto again; | |
532 | } | |
533 | ||
b25c340c TG |
534 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
535 | desc->status &= ~IRQ_MASKED; | |
0eda58b7 | 536 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
b25c340c | 537 | } |
239007b8 | 538 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 539 | chip_bus_sync_unlock(desc); |
b25c340c TG |
540 | } |
541 | ||
61f38261 | 542 | #ifdef CONFIG_SMP |
591d2fb0 TG |
543 | /* |
544 | * Check whether we need to change the affinity of the interrupt thread. | |
545 | */ | |
546 | static void | |
547 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
548 | { | |
549 | cpumask_var_t mask; | |
550 | ||
551 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
552 | return; | |
553 | ||
554 | /* | |
555 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
556 | * try again next time | |
557 | */ | |
558 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
559 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
560 | return; | |
561 | } | |
562 | ||
239007b8 | 563 | raw_spin_lock_irq(&desc->lock); |
6b8ff312 | 564 | cpumask_copy(mask, desc->irq_data.affinity); |
239007b8 | 565 | raw_spin_unlock_irq(&desc->lock); |
591d2fb0 TG |
566 | |
567 | set_cpus_allowed_ptr(current, mask); | |
568 | free_cpumask_var(mask); | |
569 | } | |
61f38261 BP |
570 | #else |
571 | static inline void | |
572 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
573 | #endif | |
591d2fb0 | 574 | |
3aa551c9 TG |
575 | /* |
576 | * Interrupt handler thread | |
577 | */ | |
578 | static int irq_thread(void *data) | |
579 | { | |
c9b5f501 | 580 | static const struct sched_param param = { |
fe7de49f KM |
581 | .sched_priority = MAX_USER_RT_PRIO/2, |
582 | }; | |
3aa551c9 TG |
583 | struct irqaction *action = data; |
584 | struct irq_desc *desc = irq_to_desc(action->irq); | |
b25c340c | 585 | int wake, oneshot = desc->status & IRQ_ONESHOT; |
3aa551c9 TG |
586 | |
587 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
588 | current->irqaction = action; | |
589 | ||
590 | while (!irq_wait_for_interrupt(action)) { | |
591 | ||
591d2fb0 TG |
592 | irq_thread_check_affinity(desc, action); |
593 | ||
3aa551c9 TG |
594 | atomic_inc(&desc->threads_active); |
595 | ||
239007b8 | 596 | raw_spin_lock_irq(&desc->lock); |
3aa551c9 TG |
597 | if (unlikely(desc->status & IRQ_DISABLED)) { |
598 | /* | |
599 | * CHECKME: We might need a dedicated | |
600 | * IRQ_THREAD_PENDING flag here, which | |
601 | * retriggers the thread in check_irq_resend() | |
602 | * but AFAICT IRQ_PENDING should be fine as it | |
603 | * retriggers the interrupt itself --- tglx | |
604 | */ | |
605 | desc->status |= IRQ_PENDING; | |
239007b8 | 606 | raw_spin_unlock_irq(&desc->lock); |
3aa551c9 | 607 | } else { |
239007b8 | 608 | raw_spin_unlock_irq(&desc->lock); |
3aa551c9 TG |
609 | |
610 | action->thread_fn(action->irq, action->dev_id); | |
b25c340c TG |
611 | |
612 | if (oneshot) | |
613 | irq_finalize_oneshot(action->irq, desc); | |
3aa551c9 TG |
614 | } |
615 | ||
616 | wake = atomic_dec_and_test(&desc->threads_active); | |
617 | ||
618 | if (wake && waitqueue_active(&desc->wait_for_threads)) | |
619 | wake_up(&desc->wait_for_threads); | |
620 | } | |
621 | ||
622 | /* | |
623 | * Clear irqaction. Otherwise exit_irq_thread() would make | |
624 | * fuzz about an active irq thread going into nirvana. | |
625 | */ | |
626 | current->irqaction = NULL; | |
627 | return 0; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Called from do_exit() | |
632 | */ | |
633 | void exit_irq_thread(void) | |
634 | { | |
635 | struct task_struct *tsk = current; | |
636 | ||
637 | if (!tsk->irqaction) | |
638 | return; | |
639 | ||
640 | printk(KERN_ERR | |
641 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | |
642 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | |
643 | ||
644 | /* | |
645 | * Set the THREAD DIED flag to prevent further wakeups of the | |
646 | * soon to be gone threaded handler. | |
647 | */ | |
648 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | |
649 | } | |
650 | ||
1da177e4 LT |
651 | /* |
652 | * Internal function to register an irqaction - typically used to | |
653 | * allocate special interrupts that are part of the architecture. | |
654 | */ | |
d3c60047 | 655 | static int |
327ec569 | 656 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 657 | { |
f17c7545 | 658 | struct irqaction *old, **old_ptr; |
8b126b77 | 659 | const char *old_name = NULL; |
1da177e4 | 660 | unsigned long flags; |
399b5da2 | 661 | int nested, shared = 0; |
82736f4d | 662 | int ret; |
1da177e4 | 663 | |
7d94f7ca | 664 | if (!desc) |
c2b5a251 MW |
665 | return -EINVAL; |
666 | ||
6b8ff312 | 667 | if (desc->irq_data.chip == &no_irq_chip) |
1da177e4 LT |
668 | return -ENOSYS; |
669 | /* | |
670 | * Some drivers like serial.c use request_irq() heavily, | |
671 | * so we have to be careful not to interfere with a | |
672 | * running system. | |
673 | */ | |
3cca53b0 | 674 | if (new->flags & IRQF_SAMPLE_RANDOM) { |
1da177e4 LT |
675 | /* |
676 | * This function might sleep, we want to call it first, | |
677 | * outside of the atomic block. | |
678 | * Yes, this might clear the entropy pool if the wrong | |
679 | * driver is attempted to be loaded, without actually | |
680 | * installing a new handler, but is this really a problem, | |
681 | * only the sysadmin is able to do this. | |
682 | */ | |
683 | rand_initialize_irq(irq); | |
684 | } | |
685 | ||
b25c340c TG |
686 | /* Oneshot interrupts are not allowed with shared */ |
687 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | |
688 | return -EINVAL; | |
689 | ||
3aa551c9 | 690 | /* |
399b5da2 TG |
691 | * Check whether the interrupt nests into another interrupt |
692 | * thread. | |
693 | */ | |
694 | nested = desc->status & IRQ_NESTED_THREAD; | |
695 | if (nested) { | |
696 | if (!new->thread_fn) | |
697 | return -EINVAL; | |
698 | /* | |
699 | * Replace the primary handler which was provided from | |
700 | * the driver for non nested interrupt handling by the | |
701 | * dummy function which warns when called. | |
702 | */ | |
703 | new->handler = irq_nested_primary_handler; | |
704 | } | |
705 | ||
3aa551c9 | 706 | /* |
399b5da2 TG |
707 | * Create a handler thread when a thread function is supplied |
708 | * and the interrupt does not nest into another interrupt | |
709 | * thread. | |
3aa551c9 | 710 | */ |
399b5da2 | 711 | if (new->thread_fn && !nested) { |
3aa551c9 TG |
712 | struct task_struct *t; |
713 | ||
714 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
715 | new->name); | |
716 | if (IS_ERR(t)) | |
717 | return PTR_ERR(t); | |
718 | /* | |
719 | * We keep the reference to the task struct even if | |
720 | * the thread dies to avoid that the interrupt code | |
721 | * references an already freed task_struct. | |
722 | */ | |
723 | get_task_struct(t); | |
724 | new->thread = t; | |
3aa551c9 TG |
725 | } |
726 | ||
1da177e4 LT |
727 | /* |
728 | * The following block of code has to be executed atomically | |
729 | */ | |
239007b8 | 730 | raw_spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
731 | old_ptr = &desc->action; |
732 | old = *old_ptr; | |
06fcb0c6 | 733 | if (old) { |
e76de9f8 TG |
734 | /* |
735 | * Can't share interrupts unless both agree to and are | |
736 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 737 | * fields must have IRQF_SHARED set and the bits which |
e76de9f8 TG |
738 | * set the trigger type must match. |
739 | */ | |
3cca53b0 | 740 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
8b126b77 AM |
741 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { |
742 | old_name = old->name; | |
f5163427 | 743 | goto mismatch; |
8b126b77 | 744 | } |
f5163427 | 745 | |
284c6680 | 746 | #if defined(CONFIG_IRQ_PER_CPU) |
f5163427 | 747 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
748 | if ((old->flags & IRQF_PERCPU) != |
749 | (new->flags & IRQF_PERCPU)) | |
f5163427 DS |
750 | goto mismatch; |
751 | #endif | |
1da177e4 LT |
752 | |
753 | /* add new interrupt at end of irq queue */ | |
754 | do { | |
f17c7545 IM |
755 | old_ptr = &old->next; |
756 | old = *old_ptr; | |
1da177e4 LT |
757 | } while (old); |
758 | shared = 1; | |
759 | } | |
760 | ||
1da177e4 | 761 | if (!shared) { |
6b8ff312 | 762 | irq_chip_set_defaults(desc->irq_data.chip); |
e76de9f8 | 763 | |
3aa551c9 TG |
764 | init_waitqueue_head(&desc->wait_for_threads); |
765 | ||
e76de9f8 | 766 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 767 | if (new->flags & IRQF_TRIGGER_MASK) { |
f2b662da DB |
768 | ret = __irq_set_trigger(desc, irq, |
769 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 770 | |
3aa551c9 TG |
771 | if (ret) |
772 | goto out_thread; | |
e76de9f8 TG |
773 | } else |
774 | compat_irq_chip_set_default_handler(desc); | |
82736f4d UKK |
775 | #if defined(CONFIG_IRQ_PER_CPU) |
776 | if (new->flags & IRQF_PERCPU) | |
777 | desc->status |= IRQ_PER_CPU; | |
778 | #endif | |
6a6de9ef | 779 | |
b25c340c | 780 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | |
1adb0850 | 781 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
94d39e1f | 782 | |
b25c340c TG |
783 | if (new->flags & IRQF_ONESHOT) |
784 | desc->status |= IRQ_ONESHOT; | |
785 | ||
94d39e1f TG |
786 | if (!(desc->status & IRQ_NOAUTOEN)) { |
787 | desc->depth = 0; | |
788 | desc->status &= ~IRQ_DISABLED; | |
37e12df7 | 789 | desc->irq_data.chip->irq_startup(&desc->irq_data); |
e76de9f8 TG |
790 | } else |
791 | /* Undo nested disables: */ | |
792 | desc->depth = 1; | |
18404756 | 793 | |
612e3684 TG |
794 | /* Exclude IRQ from balancing if requested */ |
795 | if (new->flags & IRQF_NOBALANCING) | |
796 | desc->status |= IRQ_NO_BALANCING; | |
797 | ||
18404756 | 798 | /* Set default affinity mask once everything is setup */ |
548c8933 | 799 | setup_affinity(irq, desc); |
0c5d1eb7 DB |
800 | |
801 | } else if ((new->flags & IRQF_TRIGGER_MASK) | |
802 | && (new->flags & IRQF_TRIGGER_MASK) | |
803 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | |
804 | /* hope the handler works with the actual trigger mode... */ | |
805 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | |
806 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | |
807 | (int)(new->flags & IRQF_TRIGGER_MASK)); | |
1da177e4 | 808 | } |
82736f4d | 809 | |
69ab8494 | 810 | new->irq = irq; |
f17c7545 | 811 | *old_ptr = new; |
82736f4d | 812 | |
8528b0f1 LT |
813 | /* Reset broken irq detection when installing new handler */ |
814 | desc->irq_count = 0; | |
815 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
816 | |
817 | /* | |
818 | * Check whether we disabled the irq via the spurious handler | |
819 | * before. Reenable it and give it another chance. | |
820 | */ | |
821 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | |
822 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | |
0a0c5168 | 823 | __enable_irq(desc, irq, false); |
1adb0850 TG |
824 | } |
825 | ||
239007b8 | 826 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 827 | |
69ab8494 TG |
828 | /* |
829 | * Strictly no need to wake it up, but hung_task complains | |
830 | * when no hard interrupt wakes the thread up. | |
831 | */ | |
832 | if (new->thread) | |
833 | wake_up_process(new->thread); | |
834 | ||
2c6927a3 | 835 | register_irq_proc(irq, desc); |
1da177e4 LT |
836 | new->dir = NULL; |
837 | register_handler_proc(irq, new); | |
838 | ||
839 | return 0; | |
f5163427 DS |
840 | |
841 | mismatch: | |
3f050447 | 842 | #ifdef CONFIG_DEBUG_SHIRQ |
3cca53b0 | 843 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
e8c4b9d0 | 844 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
8b126b77 AM |
845 | if (old_name) |
846 | printk(KERN_ERR "current handler: %s\n", old_name); | |
13e87ec6 AM |
847 | dump_stack(); |
848 | } | |
3f050447 | 849 | #endif |
3aa551c9 TG |
850 | ret = -EBUSY; |
851 | ||
852 | out_thread: | |
239007b8 | 853 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3aa551c9 TG |
854 | if (new->thread) { |
855 | struct task_struct *t = new->thread; | |
856 | ||
857 | new->thread = NULL; | |
858 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | |
859 | kthread_stop(t); | |
860 | put_task_struct(t); | |
861 | } | |
862 | return ret; | |
1da177e4 LT |
863 | } |
864 | ||
d3c60047 TG |
865 | /** |
866 | * setup_irq - setup an interrupt | |
867 | * @irq: Interrupt line to setup | |
868 | * @act: irqaction for the interrupt | |
869 | * | |
870 | * Used to statically setup interrupts in the early boot process. | |
871 | */ | |
872 | int setup_irq(unsigned int irq, struct irqaction *act) | |
873 | { | |
874 | struct irq_desc *desc = irq_to_desc(irq); | |
875 | ||
876 | return __setup_irq(irq, desc, act); | |
877 | } | |
eb53b4e8 | 878 | EXPORT_SYMBOL_GPL(setup_irq); |
d3c60047 | 879 | |
cbf94f06 MD |
880 | /* |
881 | * Internal function to unregister an irqaction - used to free | |
882 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 883 | */ |
cbf94f06 | 884 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1da177e4 | 885 | { |
d3c60047 | 886 | struct irq_desc *desc = irq_to_desc(irq); |
f17c7545 | 887 | struct irqaction *action, **action_ptr; |
1da177e4 LT |
888 | unsigned long flags; |
889 | ||
ae88a23b | 890 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 891 | |
7d94f7ca | 892 | if (!desc) |
f21cfb25 | 893 | return NULL; |
1da177e4 | 894 | |
239007b8 | 895 | raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
896 | |
897 | /* | |
898 | * There can be multiple actions per IRQ descriptor, find the right | |
899 | * one based on the dev_id: | |
900 | */ | |
f17c7545 | 901 | action_ptr = &desc->action; |
1da177e4 | 902 | for (;;) { |
f17c7545 | 903 | action = *action_ptr; |
1da177e4 | 904 | |
ae88a23b IM |
905 | if (!action) { |
906 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
239007b8 | 907 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 908 | |
f21cfb25 | 909 | return NULL; |
ae88a23b | 910 | } |
1da177e4 | 911 | |
8316e381 IM |
912 | if (action->dev_id == dev_id) |
913 | break; | |
f17c7545 | 914 | action_ptr = &action->next; |
ae88a23b | 915 | } |
dbce706e | 916 | |
ae88a23b | 917 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 918 | *action_ptr = action->next; |
ae88a23b IM |
919 | |
920 | /* Currently used only by UML, might disappear one day: */ | |
b77d6adc | 921 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
6b8ff312 TG |
922 | if (desc->irq_data.chip->release) |
923 | desc->irq_data.chip->release(irq, dev_id); | |
b77d6adc | 924 | #endif |
dbce706e | 925 | |
ae88a23b IM |
926 | /* If this was the last handler, shut down the IRQ line: */ |
927 | if (!desc->action) { | |
928 | desc->status |= IRQ_DISABLED; | |
bc310dda TG |
929 | if (desc->irq_data.chip->irq_shutdown) |
930 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ae88a23b | 931 | else |
bc310dda | 932 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
ae88a23b | 933 | } |
3aa551c9 | 934 | |
e7a297b0 PWJ |
935 | #ifdef CONFIG_SMP |
936 | /* make sure affinity_hint is cleaned up */ | |
937 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
938 | desc->affinity_hint = NULL; | |
939 | #endif | |
940 | ||
239007b8 | 941 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
ae88a23b IM |
942 | |
943 | unregister_handler_proc(irq, action); | |
944 | ||
945 | /* Make sure it's not being used on another CPU: */ | |
946 | synchronize_irq(irq); | |
1da177e4 | 947 | |
70edcd77 | 948 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
949 | /* |
950 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
951 | * event to happen even now it's being freed, so let's make sure that | |
952 | * is so by doing an extra call to the handler .... | |
953 | * | |
954 | * ( We do this after actually deregistering it, to make sure that a | |
955 | * 'real' IRQ doesn't run in * parallel with our fake. ) | |
956 | */ | |
957 | if (action->flags & IRQF_SHARED) { | |
958 | local_irq_save(flags); | |
959 | action->handler(irq, dev_id); | |
960 | local_irq_restore(flags); | |
1da177e4 | 961 | } |
ae88a23b | 962 | #endif |
2d860ad7 LT |
963 | |
964 | if (action->thread) { | |
965 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | |
966 | kthread_stop(action->thread); | |
967 | put_task_struct(action->thread); | |
968 | } | |
969 | ||
f21cfb25 MD |
970 | return action; |
971 | } | |
972 | ||
cbf94f06 MD |
973 | /** |
974 | * remove_irq - free an interrupt | |
975 | * @irq: Interrupt line to free | |
976 | * @act: irqaction for the interrupt | |
977 | * | |
978 | * Used to remove interrupts statically setup by the early boot process. | |
979 | */ | |
980 | void remove_irq(unsigned int irq, struct irqaction *act) | |
981 | { | |
982 | __free_irq(irq, act->dev_id); | |
983 | } | |
eb53b4e8 | 984 | EXPORT_SYMBOL_GPL(remove_irq); |
cbf94f06 | 985 | |
f21cfb25 MD |
986 | /** |
987 | * free_irq - free an interrupt allocated with request_irq | |
988 | * @irq: Interrupt line to free | |
989 | * @dev_id: Device identity to free | |
990 | * | |
991 | * Remove an interrupt handler. The handler is removed and if the | |
992 | * interrupt line is no longer in use by any driver it is disabled. | |
993 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
994 | * on the card it drives before calling this function. The function | |
995 | * does not return until any executing interrupts for this IRQ | |
996 | * have completed. | |
997 | * | |
998 | * This function must not be called from interrupt context. | |
999 | */ | |
1000 | void free_irq(unsigned int irq, void *dev_id) | |
1001 | { | |
70aedd24 TG |
1002 | struct irq_desc *desc = irq_to_desc(irq); |
1003 | ||
1004 | if (!desc) | |
1005 | return; | |
1006 | ||
3876ec9e | 1007 | chip_bus_lock(desc); |
cbf94f06 | 1008 | kfree(__free_irq(irq, dev_id)); |
3876ec9e | 1009 | chip_bus_sync_unlock(desc); |
1da177e4 | 1010 | } |
1da177e4 LT |
1011 | EXPORT_SYMBOL(free_irq); |
1012 | ||
1013 | /** | |
3aa551c9 | 1014 | * request_threaded_irq - allocate an interrupt line |
1da177e4 | 1015 | * @irq: Interrupt line to allocate |
3aa551c9 TG |
1016 | * @handler: Function to be called when the IRQ occurs. |
1017 | * Primary handler for threaded interrupts | |
b25c340c TG |
1018 | * If NULL and thread_fn != NULL the default |
1019 | * primary handler is installed | |
f48fe81e TG |
1020 | * @thread_fn: Function called from the irq handler thread |
1021 | * If NULL, no irq thread is created | |
1da177e4 LT |
1022 | * @irqflags: Interrupt type flags |
1023 | * @devname: An ascii name for the claiming device | |
1024 | * @dev_id: A cookie passed back to the handler function | |
1025 | * | |
1026 | * This call allocates interrupt resources and enables the | |
1027 | * interrupt line and IRQ handling. From the point this | |
1028 | * call is made your handler function may be invoked. Since | |
1029 | * your handler function must clear any interrupt the board | |
1030 | * raises, you must take care both to initialise your hardware | |
1031 | * and to set up the interrupt handler in the right order. | |
1032 | * | |
3aa551c9 TG |
1033 | * If you want to set up a threaded irq handler for your device |
1034 | * then you need to supply @handler and @thread_fn. @handler ist | |
1035 | * still called in hard interrupt context and has to check | |
1036 | * whether the interrupt originates from the device. If yes it | |
1037 | * needs to disable the interrupt on the device and return | |
39a2eddb | 1038 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
3aa551c9 TG |
1039 | * @thread_fn. This split handler design is necessary to support |
1040 | * shared interrupts. | |
1041 | * | |
1da177e4 LT |
1042 | * Dev_id must be globally unique. Normally the address of the |
1043 | * device data structure is used as the cookie. Since the handler | |
1044 | * receives this value it makes sense to use it. | |
1045 | * | |
1046 | * If your interrupt is shared you must pass a non NULL dev_id | |
1047 | * as this is required when freeing the interrupt. | |
1048 | * | |
1049 | * Flags: | |
1050 | * | |
3cca53b0 | 1051 | * IRQF_SHARED Interrupt is shared |
3cca53b0 | 1052 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
0c5d1eb7 | 1053 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4 LT |
1054 | * |
1055 | */ | |
3aa551c9 TG |
1056 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1057 | irq_handler_t thread_fn, unsigned long irqflags, | |
1058 | const char *devname, void *dev_id) | |
1da177e4 | 1059 | { |
06fcb0c6 | 1060 | struct irqaction *action; |
08678b08 | 1061 | struct irq_desc *desc; |
d3c60047 | 1062 | int retval; |
1da177e4 LT |
1063 | |
1064 | /* | |
1065 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
1066 | * otherwise we'll have trouble later trying to figure out | |
1067 | * which interrupt is which (messes up the interrupt freeing | |
1068 | * logic etc). | |
1069 | */ | |
3cca53b0 | 1070 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1da177e4 | 1071 | return -EINVAL; |
7d94f7ca | 1072 | |
cb5bc832 | 1073 | desc = irq_to_desc(irq); |
7d94f7ca | 1074 | if (!desc) |
1da177e4 | 1075 | return -EINVAL; |
7d94f7ca | 1076 | |
08678b08 | 1077 | if (desc->status & IRQ_NOREQUEST) |
6550c775 | 1078 | return -EINVAL; |
b25c340c TG |
1079 | |
1080 | if (!handler) { | |
1081 | if (!thread_fn) | |
1082 | return -EINVAL; | |
1083 | handler = irq_default_primary_handler; | |
1084 | } | |
1da177e4 | 1085 | |
45535732 | 1086 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
1087 | if (!action) |
1088 | return -ENOMEM; | |
1089 | ||
1090 | action->handler = handler; | |
3aa551c9 | 1091 | action->thread_fn = thread_fn; |
1da177e4 | 1092 | action->flags = irqflags; |
1da177e4 | 1093 | action->name = devname; |
1da177e4 LT |
1094 | action->dev_id = dev_id; |
1095 | ||
3876ec9e | 1096 | chip_bus_lock(desc); |
d3c60047 | 1097 | retval = __setup_irq(irq, desc, action); |
3876ec9e | 1098 | chip_bus_sync_unlock(desc); |
70aedd24 | 1099 | |
377bf1e4 AV |
1100 | if (retval) |
1101 | kfree(action); | |
1102 | ||
6d83f94d | 1103 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
6ce51c43 | 1104 | if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b8 DW |
1105 | /* |
1106 | * It's a shared IRQ -- the driver ought to be prepared for it | |
1107 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
1108 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1109 | * run in parallel with our fake. | |
a304e1b8 | 1110 | */ |
59845b1f | 1111 | unsigned long flags; |
a304e1b8 | 1112 | |
377bf1e4 | 1113 | disable_irq(irq); |
59845b1f | 1114 | local_irq_save(flags); |
377bf1e4 | 1115 | |
59845b1f | 1116 | handler(irq, dev_id); |
377bf1e4 | 1117 | |
59845b1f | 1118 | local_irq_restore(flags); |
377bf1e4 | 1119 | enable_irq(irq); |
a304e1b8 DW |
1120 | } |
1121 | #endif | |
1da177e4 LT |
1122 | return retval; |
1123 | } | |
3aa551c9 | 1124 | EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d MZ |
1125 | |
1126 | /** | |
1127 | * request_any_context_irq - allocate an interrupt line | |
1128 | * @irq: Interrupt line to allocate | |
1129 | * @handler: Function to be called when the IRQ occurs. | |
1130 | * Threaded handler for threaded interrupts. | |
1131 | * @flags: Interrupt type flags | |
1132 | * @name: An ascii name for the claiming device | |
1133 | * @dev_id: A cookie passed back to the handler function | |
1134 | * | |
1135 | * This call allocates interrupt resources and enables the | |
1136 | * interrupt line and IRQ handling. It selects either a | |
1137 | * hardirq or threaded handling method depending on the | |
1138 | * context. | |
1139 | * | |
1140 | * On failure, it returns a negative value. On success, | |
1141 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
1142 | */ | |
1143 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
1144 | unsigned long flags, const char *name, void *dev_id) | |
1145 | { | |
1146 | struct irq_desc *desc = irq_to_desc(irq); | |
1147 | int ret; | |
1148 | ||
1149 | if (!desc) | |
1150 | return -EINVAL; | |
1151 | ||
1152 | if (desc->status & IRQ_NESTED_THREAD) { | |
1153 | ret = request_threaded_irq(irq, NULL, handler, | |
1154 | flags, name, dev_id); | |
1155 | return !ret ? IRQC_IS_NESTED : ret; | |
1156 | } | |
1157 | ||
1158 | ret = request_irq(irq, handler, flags, name, dev_id); | |
1159 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
1160 | } | |
1161 | EXPORT_SYMBOL_GPL(request_any_context_irq); |