Merge branch 'stable-4.13' of git://git.infradead.org/users/pcmoore/audit
[linux-2.6-block.git] / kernel / irq / manage.c
... / ...
CommitLineData
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <uapi/linux/sched/types.h>
22#include <linux/task_work.h>
23
24#include "internals.h"
25
26#ifdef CONFIG_IRQ_FORCED_THREADING
27__read_mostly bool force_irqthreads;
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
37static void __synchronize_hardirq(struct irq_desc *desc)
38{
39 bool inprogress;
40
41 do {
42 unsigned long flags;
43
44 /*
45 * Wait until we're out of the critical section. This might
46 * give the wrong answer due to the lack of memory barriers.
47 */
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50
51 /* Ok, that indicated we're done: double-check carefully. */
52 raw_spin_lock_irqsave(&desc->lock, flags);
53 inprogress = irqd_irq_inprogress(&desc->irq_data);
54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56 /* Oops, that failed? */
57 } while (inprogress);
58}
59
60/**
61 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
62 * @irq: interrupt number to wait for
63 *
64 * This function waits for any pending hard IRQ handlers for this
65 * interrupt to complete before returning. If you use this
66 * function while holding a resource the IRQ handler may need you
67 * will deadlock. It does not take associated threaded handlers
68 * into account.
69 *
70 * Do not use this for shutdown scenarios where you must be sure
71 * that all parts (hardirq and threaded handler) have completed.
72 *
73 * Returns: false if a threaded handler is active.
74 *
75 * This function may be called - with care - from IRQ context.
76 */
77bool synchronize_hardirq(unsigned int irq)
78{
79 struct irq_desc *desc = irq_to_desc(irq);
80
81 if (desc) {
82 __synchronize_hardirq(desc);
83 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90/**
91 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
92 * @irq: interrupt number to wait for
93 *
94 * This function waits for any pending IRQ handlers for this interrupt
95 * to complete before returning. If you use this function while
96 * holding a resource the IRQ handler may need you will deadlock.
97 *
98 * This function may be called - with care - from IRQ context.
99 */
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106 /*
107 * We made sure that no hardirq handler is
108 * running. Now verify that no threaded handlers are
109 * active.
110 */
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
114}
115EXPORT_SYMBOL(synchronize_irq);
116
117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
120static bool __irq_can_set_affinity(struct irq_desc *desc)
121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124 return false;
125 return true;
126}
127
128/**
129 * irq_can_set_affinity - Check if the affinity of a given irq can be set
130 * @irq: Interrupt to check
131 *
132 */
133int irq_can_set_affinity(unsigned int irq)
134{
135 return __irq_can_set_affinity(irq_to_desc(irq));
136}
137
138/**
139 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
140 * @irq: Interrupt to check
141 *
142 * Like irq_can_set_affinity() above, but additionally checks for the
143 * AFFINITY_MANAGED flag.
144 */
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153/**
154 * irq_set_thread_affinity - Notify irq threads to adjust affinity
155 * @desc: irq descriptor which has affitnity changed
156 *
157 * We just set IRQTF_AFFINITY and delegate the affinity setting
158 * to the interrupt thread itself. We can not call
159 * set_cpus_allowed_ptr() here as we hold desc->lock and this
160 * code can be called from hard interrupt context.
161 */
162void irq_set_thread_affinity(struct irq_desc *desc)
163{
164 struct irqaction *action;
165
166 for_each_action_of_desc(desc, action)
167 if (action->thread)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169}
170
171int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
172 bool force)
173{
174 struct irq_desc *desc = irq_data_to_desc(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176 int ret;
177
178 ret = chip->irq_set_affinity(data, mask, force);
179 switch (ret) {
180 case IRQ_SET_MASK_OK:
181 case IRQ_SET_MASK_OK_DONE:
182 cpumask_copy(desc->irq_common_data.affinity, mask);
183 case IRQ_SET_MASK_OK_NOCOPY:
184 irq_set_thread_affinity(desc);
185 ret = 0;
186 }
187
188 return ret;
189}
190
191int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
192 bool force)
193{
194 struct irq_chip *chip = irq_data_get_irq_chip(data);
195 struct irq_desc *desc = irq_data_to_desc(data);
196 int ret = 0;
197
198 if (!chip || !chip->irq_set_affinity)
199 return -EINVAL;
200
201 if (irq_can_move_pcntxt(data)) {
202 ret = irq_do_set_affinity(data, mask, force);
203 } else {
204 irqd_set_move_pending(data);
205 irq_copy_pending(desc, mask);
206 }
207
208 if (desc->affinity_notify) {
209 kref_get(&desc->affinity_notify->kref);
210 schedule_work(&desc->affinity_notify->work);
211 }
212 irqd_set(data, IRQD_AFFINITY_SET);
213
214 return ret;
215}
216
217int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
218{
219 struct irq_desc *desc = irq_to_desc(irq);
220 unsigned long flags;
221 int ret;
222
223 if (!desc)
224 return -EINVAL;
225
226 raw_spin_lock_irqsave(&desc->lock, flags);
227 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
228 raw_spin_unlock_irqrestore(&desc->lock, flags);
229 return ret;
230}
231
232int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
233{
234 unsigned long flags;
235 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
236
237 if (!desc)
238 return -EINVAL;
239 desc->affinity_hint = m;
240 irq_put_desc_unlock(desc, flags);
241 /* set the initial affinity to prevent every interrupt being on CPU0 */
242 if (m)
243 __irq_set_affinity(irq, m, false);
244 return 0;
245}
246EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
247
248static void irq_affinity_notify(struct work_struct *work)
249{
250 struct irq_affinity_notify *notify =
251 container_of(work, struct irq_affinity_notify, work);
252 struct irq_desc *desc = irq_to_desc(notify->irq);
253 cpumask_var_t cpumask;
254 unsigned long flags;
255
256 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
257 goto out;
258
259 raw_spin_lock_irqsave(&desc->lock, flags);
260 if (irq_move_pending(&desc->irq_data))
261 irq_get_pending(cpumask, desc);
262 else
263 cpumask_copy(cpumask, desc->irq_common_data.affinity);
264 raw_spin_unlock_irqrestore(&desc->lock, flags);
265
266 notify->notify(notify, cpumask);
267
268 free_cpumask_var(cpumask);
269out:
270 kref_put(&notify->kref, notify->release);
271}
272
273/**
274 * irq_set_affinity_notifier - control notification of IRQ affinity changes
275 * @irq: Interrupt for which to enable/disable notification
276 * @notify: Context for notification, or %NULL to disable
277 * notification. Function pointers must be initialised;
278 * the other fields will be initialised by this function.
279 *
280 * Must be called in process context. Notification may only be enabled
281 * after the IRQ is allocated and must be disabled before the IRQ is
282 * freed using free_irq().
283 */
284int
285irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
286{
287 struct irq_desc *desc = irq_to_desc(irq);
288 struct irq_affinity_notify *old_notify;
289 unsigned long flags;
290
291 /* The release function is promised process context */
292 might_sleep();
293
294 if (!desc)
295 return -EINVAL;
296
297 /* Complete initialisation of *notify */
298 if (notify) {
299 notify->irq = irq;
300 kref_init(&notify->kref);
301 INIT_WORK(&notify->work, irq_affinity_notify);
302 }
303
304 raw_spin_lock_irqsave(&desc->lock, flags);
305 old_notify = desc->affinity_notify;
306 desc->affinity_notify = notify;
307 raw_spin_unlock_irqrestore(&desc->lock, flags);
308
309 if (old_notify)
310 kref_put(&old_notify->kref, old_notify->release);
311
312 return 0;
313}
314EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
315
316#ifndef CONFIG_AUTO_IRQ_AFFINITY
317/*
318 * Generic version of the affinity autoselector.
319 */
320int irq_setup_affinity(struct irq_desc *desc)
321{
322 struct cpumask *set = irq_default_affinity;
323 int ret, node = irq_desc_get_node(desc);
324 static DEFINE_RAW_SPINLOCK(mask_lock);
325 static struct cpumask mask;
326
327 /* Excludes PER_CPU and NO_BALANCE interrupts */
328 if (!__irq_can_set_affinity(desc))
329 return 0;
330
331 raw_spin_lock(&mask_lock);
332 /*
333 * Preserve the managed affinity setting and a userspace affinity
334 * setup, but make sure that one of the targets is online.
335 */
336 if (irqd_affinity_is_managed(&desc->irq_data) ||
337 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
338 if (cpumask_intersects(desc->irq_common_data.affinity,
339 cpu_online_mask))
340 set = desc->irq_common_data.affinity;
341 else
342 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
343 }
344
345 cpumask_and(&mask, cpu_online_mask, set);
346 if (node != NUMA_NO_NODE) {
347 const struct cpumask *nodemask = cpumask_of_node(node);
348
349 /* make sure at least one of the cpus in nodemask is online */
350 if (cpumask_intersects(&mask, nodemask))
351 cpumask_and(&mask, &mask, nodemask);
352 }
353 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
354 raw_spin_unlock(&mask_lock);
355 return ret;
356}
357#else
358/* Wrapper for ALPHA specific affinity selector magic */
359int irq_setup_affinity(struct irq_desc *desc)
360{
361 return irq_select_affinity(irq_desc_get_irq(desc));
362}
363#endif
364
365/*
366 * Called when a bogus affinity is set via /proc/irq
367 */
368int irq_select_affinity_usr(unsigned int irq)
369{
370 struct irq_desc *desc = irq_to_desc(irq);
371 unsigned long flags;
372 int ret;
373
374 raw_spin_lock_irqsave(&desc->lock, flags);
375 ret = irq_setup_affinity(desc);
376 raw_spin_unlock_irqrestore(&desc->lock, flags);
377 return ret;
378}
379#endif
380
381/**
382 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
383 * @irq: interrupt number to set affinity
384 * @vcpu_info: vCPU specific data
385 *
386 * This function uses the vCPU specific data to set the vCPU
387 * affinity for an irq. The vCPU specific data is passed from
388 * outside, such as KVM. One example code path is as below:
389 * KVM -> IOMMU -> irq_set_vcpu_affinity().
390 */
391int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
392{
393 unsigned long flags;
394 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
395 struct irq_data *data;
396 struct irq_chip *chip;
397 int ret = -ENOSYS;
398
399 if (!desc)
400 return -EINVAL;
401
402 data = irq_desc_get_irq_data(desc);
403 chip = irq_data_get_irq_chip(data);
404 if (chip && chip->irq_set_vcpu_affinity)
405 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
406 irq_put_desc_unlock(desc, flags);
407
408 return ret;
409}
410EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
411
412void __disable_irq(struct irq_desc *desc)
413{
414 if (!desc->depth++)
415 irq_disable(desc);
416}
417
418static int __disable_irq_nosync(unsigned int irq)
419{
420 unsigned long flags;
421 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
422
423 if (!desc)
424 return -EINVAL;
425 __disable_irq(desc);
426 irq_put_desc_busunlock(desc, flags);
427 return 0;
428}
429
430/**
431 * disable_irq_nosync - disable an irq without waiting
432 * @irq: Interrupt to disable
433 *
434 * Disable the selected interrupt line. Disables and Enables are
435 * nested.
436 * Unlike disable_irq(), this function does not ensure existing
437 * instances of the IRQ handler have completed before returning.
438 *
439 * This function may be called from IRQ context.
440 */
441void disable_irq_nosync(unsigned int irq)
442{
443 __disable_irq_nosync(irq);
444}
445EXPORT_SYMBOL(disable_irq_nosync);
446
447/**
448 * disable_irq - disable an irq and wait for completion
449 * @irq: Interrupt to disable
450 *
451 * Disable the selected interrupt line. Enables and Disables are
452 * nested.
453 * This function waits for any pending IRQ handlers for this interrupt
454 * to complete before returning. If you use this function while
455 * holding a resource the IRQ handler may need you will deadlock.
456 *
457 * This function may be called - with care - from IRQ context.
458 */
459void disable_irq(unsigned int irq)
460{
461 if (!__disable_irq_nosync(irq))
462 synchronize_irq(irq);
463}
464EXPORT_SYMBOL(disable_irq);
465
466/**
467 * disable_hardirq - disables an irq and waits for hardirq completion
468 * @irq: Interrupt to disable
469 *
470 * Disable the selected interrupt line. Enables and Disables are
471 * nested.
472 * This function waits for any pending hard IRQ handlers for this
473 * interrupt to complete before returning. If you use this function while
474 * holding a resource the hard IRQ handler may need you will deadlock.
475 *
476 * When used to optimistically disable an interrupt from atomic context
477 * the return value must be checked.
478 *
479 * Returns: false if a threaded handler is active.
480 *
481 * This function may be called - with care - from IRQ context.
482 */
483bool disable_hardirq(unsigned int irq)
484{
485 if (!__disable_irq_nosync(irq))
486 return synchronize_hardirq(irq);
487
488 return false;
489}
490EXPORT_SYMBOL_GPL(disable_hardirq);
491
492void __enable_irq(struct irq_desc *desc)
493{
494 switch (desc->depth) {
495 case 0:
496 err_out:
497 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
498 irq_desc_get_irq(desc));
499 break;
500 case 1: {
501 if (desc->istate & IRQS_SUSPENDED)
502 goto err_out;
503 /* Prevent probing on this irq: */
504 irq_settings_set_noprobe(desc);
505 /*
506 * Call irq_startup() not irq_enable() here because the
507 * interrupt might be marked NOAUTOEN. So irq_startup()
508 * needs to be invoked when it gets enabled the first
509 * time. If it was already started up, then irq_startup()
510 * will invoke irq_enable() under the hood.
511 */
512 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
513 break;
514 }
515 default:
516 desc->depth--;
517 }
518}
519
520/**
521 * enable_irq - enable handling of an irq
522 * @irq: Interrupt to enable
523 *
524 * Undoes the effect of one call to disable_irq(). If this
525 * matches the last disable, processing of interrupts on this
526 * IRQ line is re-enabled.
527 *
528 * This function may be called from IRQ context only when
529 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
530 */
531void enable_irq(unsigned int irq)
532{
533 unsigned long flags;
534 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
535
536 if (!desc)
537 return;
538 if (WARN(!desc->irq_data.chip,
539 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
540 goto out;
541
542 __enable_irq(desc);
543out:
544 irq_put_desc_busunlock(desc, flags);
545}
546EXPORT_SYMBOL(enable_irq);
547
548static int set_irq_wake_real(unsigned int irq, unsigned int on)
549{
550 struct irq_desc *desc = irq_to_desc(irq);
551 int ret = -ENXIO;
552
553 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
554 return 0;
555
556 if (desc->irq_data.chip->irq_set_wake)
557 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
558
559 return ret;
560}
561
562/**
563 * irq_set_irq_wake - control irq power management wakeup
564 * @irq: interrupt to control
565 * @on: enable/disable power management wakeup
566 *
567 * Enable/disable power management wakeup mode, which is
568 * disabled by default. Enables and disables must match,
569 * just as they match for non-wakeup mode support.
570 *
571 * Wakeup mode lets this IRQ wake the system from sleep
572 * states like "suspend to RAM".
573 */
574int irq_set_irq_wake(unsigned int irq, unsigned int on)
575{
576 unsigned long flags;
577 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
578 int ret = 0;
579
580 if (!desc)
581 return -EINVAL;
582
583 /* wakeup-capable irqs can be shared between drivers that
584 * don't need to have the same sleep mode behaviors.
585 */
586 if (on) {
587 if (desc->wake_depth++ == 0) {
588 ret = set_irq_wake_real(irq, on);
589 if (ret)
590 desc->wake_depth = 0;
591 else
592 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
593 }
594 } else {
595 if (desc->wake_depth == 0) {
596 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
597 } else if (--desc->wake_depth == 0) {
598 ret = set_irq_wake_real(irq, on);
599 if (ret)
600 desc->wake_depth = 1;
601 else
602 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
603 }
604 }
605 irq_put_desc_busunlock(desc, flags);
606 return ret;
607}
608EXPORT_SYMBOL(irq_set_irq_wake);
609
610/*
611 * Internal function that tells the architecture code whether a
612 * particular irq has been exclusively allocated or is available
613 * for driver use.
614 */
615int can_request_irq(unsigned int irq, unsigned long irqflags)
616{
617 unsigned long flags;
618 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
619 int canrequest = 0;
620
621 if (!desc)
622 return 0;
623
624 if (irq_settings_can_request(desc)) {
625 if (!desc->action ||
626 irqflags & desc->action->flags & IRQF_SHARED)
627 canrequest = 1;
628 }
629 irq_put_desc_unlock(desc, flags);
630 return canrequest;
631}
632
633int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
634{
635 struct irq_chip *chip = desc->irq_data.chip;
636 int ret, unmask = 0;
637
638 if (!chip || !chip->irq_set_type) {
639 /*
640 * IRQF_TRIGGER_* but the PIC does not support multiple
641 * flow-types?
642 */
643 pr_debug("No set_type function for IRQ %d (%s)\n",
644 irq_desc_get_irq(desc),
645 chip ? (chip->name ? : "unknown") : "unknown");
646 return 0;
647 }
648
649 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
650 if (!irqd_irq_masked(&desc->irq_data))
651 mask_irq(desc);
652 if (!irqd_irq_disabled(&desc->irq_data))
653 unmask = 1;
654 }
655
656 /* Mask all flags except trigger mode */
657 flags &= IRQ_TYPE_SENSE_MASK;
658 ret = chip->irq_set_type(&desc->irq_data, flags);
659
660 switch (ret) {
661 case IRQ_SET_MASK_OK:
662 case IRQ_SET_MASK_OK_DONE:
663 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
664 irqd_set(&desc->irq_data, flags);
665
666 case IRQ_SET_MASK_OK_NOCOPY:
667 flags = irqd_get_trigger_type(&desc->irq_data);
668 irq_settings_set_trigger_mask(desc, flags);
669 irqd_clear(&desc->irq_data, IRQD_LEVEL);
670 irq_settings_clr_level(desc);
671 if (flags & IRQ_TYPE_LEVEL_MASK) {
672 irq_settings_set_level(desc);
673 irqd_set(&desc->irq_data, IRQD_LEVEL);
674 }
675
676 ret = 0;
677 break;
678 default:
679 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
680 flags, irq_desc_get_irq(desc), chip->irq_set_type);
681 }
682 if (unmask)
683 unmask_irq(desc);
684 return ret;
685}
686
687#ifdef CONFIG_HARDIRQS_SW_RESEND
688int irq_set_parent(int irq, int parent_irq)
689{
690 unsigned long flags;
691 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
692
693 if (!desc)
694 return -EINVAL;
695
696 desc->parent_irq = parent_irq;
697
698 irq_put_desc_unlock(desc, flags);
699 return 0;
700}
701EXPORT_SYMBOL_GPL(irq_set_parent);
702#endif
703
704/*
705 * Default primary interrupt handler for threaded interrupts. Is
706 * assigned as primary handler when request_threaded_irq is called
707 * with handler == NULL. Useful for oneshot interrupts.
708 */
709static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
710{
711 return IRQ_WAKE_THREAD;
712}
713
714/*
715 * Primary handler for nested threaded interrupts. Should never be
716 * called.
717 */
718static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
719{
720 WARN(1, "Primary handler called for nested irq %d\n", irq);
721 return IRQ_NONE;
722}
723
724static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
725{
726 WARN(1, "Secondary action handler called for irq %d\n", irq);
727 return IRQ_NONE;
728}
729
730static int irq_wait_for_interrupt(struct irqaction *action)
731{
732 set_current_state(TASK_INTERRUPTIBLE);
733
734 while (!kthread_should_stop()) {
735
736 if (test_and_clear_bit(IRQTF_RUNTHREAD,
737 &action->thread_flags)) {
738 __set_current_state(TASK_RUNNING);
739 return 0;
740 }
741 schedule();
742 set_current_state(TASK_INTERRUPTIBLE);
743 }
744 __set_current_state(TASK_RUNNING);
745 return -1;
746}
747
748/*
749 * Oneshot interrupts keep the irq line masked until the threaded
750 * handler finished. unmask if the interrupt has not been disabled and
751 * is marked MASKED.
752 */
753static void irq_finalize_oneshot(struct irq_desc *desc,
754 struct irqaction *action)
755{
756 if (!(desc->istate & IRQS_ONESHOT) ||
757 action->handler == irq_forced_secondary_handler)
758 return;
759again:
760 chip_bus_lock(desc);
761 raw_spin_lock_irq(&desc->lock);
762
763 /*
764 * Implausible though it may be we need to protect us against
765 * the following scenario:
766 *
767 * The thread is faster done than the hard interrupt handler
768 * on the other CPU. If we unmask the irq line then the
769 * interrupt can come in again and masks the line, leaves due
770 * to IRQS_INPROGRESS and the irq line is masked forever.
771 *
772 * This also serializes the state of shared oneshot handlers
773 * versus "desc->threads_onehsot |= action->thread_mask;" in
774 * irq_wake_thread(). See the comment there which explains the
775 * serialization.
776 */
777 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
778 raw_spin_unlock_irq(&desc->lock);
779 chip_bus_sync_unlock(desc);
780 cpu_relax();
781 goto again;
782 }
783
784 /*
785 * Now check again, whether the thread should run. Otherwise
786 * we would clear the threads_oneshot bit of this thread which
787 * was just set.
788 */
789 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
790 goto out_unlock;
791
792 desc->threads_oneshot &= ~action->thread_mask;
793
794 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
795 irqd_irq_masked(&desc->irq_data))
796 unmask_threaded_irq(desc);
797
798out_unlock:
799 raw_spin_unlock_irq(&desc->lock);
800 chip_bus_sync_unlock(desc);
801}
802
803#ifdef CONFIG_SMP
804/*
805 * Check whether we need to change the affinity of the interrupt thread.
806 */
807static void
808irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
809{
810 cpumask_var_t mask;
811 bool valid = true;
812
813 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
814 return;
815
816 /*
817 * In case we are out of memory we set IRQTF_AFFINITY again and
818 * try again next time
819 */
820 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
821 set_bit(IRQTF_AFFINITY, &action->thread_flags);
822 return;
823 }
824
825 raw_spin_lock_irq(&desc->lock);
826 /*
827 * This code is triggered unconditionally. Check the affinity
828 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
829 */
830 if (cpumask_available(desc->irq_common_data.affinity))
831 cpumask_copy(mask, desc->irq_common_data.affinity);
832 else
833 valid = false;
834 raw_spin_unlock_irq(&desc->lock);
835
836 if (valid)
837 set_cpus_allowed_ptr(current, mask);
838 free_cpumask_var(mask);
839}
840#else
841static inline void
842irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
843#endif
844
845/*
846 * Interrupts which are not explicitely requested as threaded
847 * interrupts rely on the implicit bh/preempt disable of the hard irq
848 * context. So we need to disable bh here to avoid deadlocks and other
849 * side effects.
850 */
851static irqreturn_t
852irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
853{
854 irqreturn_t ret;
855
856 local_bh_disable();
857 ret = action->thread_fn(action->irq, action->dev_id);
858 irq_finalize_oneshot(desc, action);
859 local_bh_enable();
860 return ret;
861}
862
863/*
864 * Interrupts explicitly requested as threaded interrupts want to be
865 * preemtible - many of them need to sleep and wait for slow busses to
866 * complete.
867 */
868static irqreturn_t irq_thread_fn(struct irq_desc *desc,
869 struct irqaction *action)
870{
871 irqreturn_t ret;
872
873 ret = action->thread_fn(action->irq, action->dev_id);
874 irq_finalize_oneshot(desc, action);
875 return ret;
876}
877
878static void wake_threads_waitq(struct irq_desc *desc)
879{
880 if (atomic_dec_and_test(&desc->threads_active))
881 wake_up(&desc->wait_for_threads);
882}
883
884static void irq_thread_dtor(struct callback_head *unused)
885{
886 struct task_struct *tsk = current;
887 struct irq_desc *desc;
888 struct irqaction *action;
889
890 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
891 return;
892
893 action = kthread_data(tsk);
894
895 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
896 tsk->comm, tsk->pid, action->irq);
897
898
899 desc = irq_to_desc(action->irq);
900 /*
901 * If IRQTF_RUNTHREAD is set, we need to decrement
902 * desc->threads_active and wake possible waiters.
903 */
904 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
905 wake_threads_waitq(desc);
906
907 /* Prevent a stale desc->threads_oneshot */
908 irq_finalize_oneshot(desc, action);
909}
910
911static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
912{
913 struct irqaction *secondary = action->secondary;
914
915 if (WARN_ON_ONCE(!secondary))
916 return;
917
918 raw_spin_lock_irq(&desc->lock);
919 __irq_wake_thread(desc, secondary);
920 raw_spin_unlock_irq(&desc->lock);
921}
922
923/*
924 * Interrupt handler thread
925 */
926static int irq_thread(void *data)
927{
928 struct callback_head on_exit_work;
929 struct irqaction *action = data;
930 struct irq_desc *desc = irq_to_desc(action->irq);
931 irqreturn_t (*handler_fn)(struct irq_desc *desc,
932 struct irqaction *action);
933
934 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
935 &action->thread_flags))
936 handler_fn = irq_forced_thread_fn;
937 else
938 handler_fn = irq_thread_fn;
939
940 init_task_work(&on_exit_work, irq_thread_dtor);
941 task_work_add(current, &on_exit_work, false);
942
943 irq_thread_check_affinity(desc, action);
944
945 while (!irq_wait_for_interrupt(action)) {
946 irqreturn_t action_ret;
947
948 irq_thread_check_affinity(desc, action);
949
950 action_ret = handler_fn(desc, action);
951 if (action_ret == IRQ_HANDLED)
952 atomic_inc(&desc->threads_handled);
953 if (action_ret == IRQ_WAKE_THREAD)
954 irq_wake_secondary(desc, action);
955
956 wake_threads_waitq(desc);
957 }
958
959 /*
960 * This is the regular exit path. __free_irq() is stopping the
961 * thread via kthread_stop() after calling
962 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
963 * oneshot mask bit can be set. We cannot verify that as we
964 * cannot touch the oneshot mask at this point anymore as
965 * __setup_irq() might have given out currents thread_mask
966 * again.
967 */
968 task_work_cancel(current, irq_thread_dtor);
969 return 0;
970}
971
972/**
973 * irq_wake_thread - wake the irq thread for the action identified by dev_id
974 * @irq: Interrupt line
975 * @dev_id: Device identity for which the thread should be woken
976 *
977 */
978void irq_wake_thread(unsigned int irq, void *dev_id)
979{
980 struct irq_desc *desc = irq_to_desc(irq);
981 struct irqaction *action;
982 unsigned long flags;
983
984 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
985 return;
986
987 raw_spin_lock_irqsave(&desc->lock, flags);
988 for_each_action_of_desc(desc, action) {
989 if (action->dev_id == dev_id) {
990 if (action->thread)
991 __irq_wake_thread(desc, action);
992 break;
993 }
994 }
995 raw_spin_unlock_irqrestore(&desc->lock, flags);
996}
997EXPORT_SYMBOL_GPL(irq_wake_thread);
998
999static int irq_setup_forced_threading(struct irqaction *new)
1000{
1001 if (!force_irqthreads)
1002 return 0;
1003 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1004 return 0;
1005
1006 new->flags |= IRQF_ONESHOT;
1007
1008 /*
1009 * Handle the case where we have a real primary handler and a
1010 * thread handler. We force thread them as well by creating a
1011 * secondary action.
1012 */
1013 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1014 /* Allocate the secondary action */
1015 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1016 if (!new->secondary)
1017 return -ENOMEM;
1018 new->secondary->handler = irq_forced_secondary_handler;
1019 new->secondary->thread_fn = new->thread_fn;
1020 new->secondary->dev_id = new->dev_id;
1021 new->secondary->irq = new->irq;
1022 new->secondary->name = new->name;
1023 }
1024 /* Deal with the primary handler */
1025 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1026 new->thread_fn = new->handler;
1027 new->handler = irq_default_primary_handler;
1028 return 0;
1029}
1030
1031static int irq_request_resources(struct irq_desc *desc)
1032{
1033 struct irq_data *d = &desc->irq_data;
1034 struct irq_chip *c = d->chip;
1035
1036 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1037}
1038
1039static void irq_release_resources(struct irq_desc *desc)
1040{
1041 struct irq_data *d = &desc->irq_data;
1042 struct irq_chip *c = d->chip;
1043
1044 if (c->irq_release_resources)
1045 c->irq_release_resources(d);
1046}
1047
1048static int
1049setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1050{
1051 struct task_struct *t;
1052 struct sched_param param = {
1053 .sched_priority = MAX_USER_RT_PRIO/2,
1054 };
1055
1056 if (!secondary) {
1057 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1058 new->name);
1059 } else {
1060 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1061 new->name);
1062 param.sched_priority -= 1;
1063 }
1064
1065 if (IS_ERR(t))
1066 return PTR_ERR(t);
1067
1068 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1069
1070 /*
1071 * We keep the reference to the task struct even if
1072 * the thread dies to avoid that the interrupt code
1073 * references an already freed task_struct.
1074 */
1075 get_task_struct(t);
1076 new->thread = t;
1077 /*
1078 * Tell the thread to set its affinity. This is
1079 * important for shared interrupt handlers as we do
1080 * not invoke setup_affinity() for the secondary
1081 * handlers as everything is already set up. Even for
1082 * interrupts marked with IRQF_NO_BALANCE this is
1083 * correct as we want the thread to move to the cpu(s)
1084 * on which the requesting code placed the interrupt.
1085 */
1086 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1087 return 0;
1088}
1089
1090/*
1091 * Internal function to register an irqaction - typically used to
1092 * allocate special interrupts that are part of the architecture.
1093 */
1094static int
1095__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1096{
1097 struct irqaction *old, **old_ptr;
1098 unsigned long flags, thread_mask = 0;
1099 int ret, nested, shared = 0;
1100
1101 if (!desc)
1102 return -EINVAL;
1103
1104 if (desc->irq_data.chip == &no_irq_chip)
1105 return -ENOSYS;
1106 if (!try_module_get(desc->owner))
1107 return -ENODEV;
1108
1109 new->irq = irq;
1110
1111 /*
1112 * If the trigger type is not specified by the caller,
1113 * then use the default for this interrupt.
1114 */
1115 if (!(new->flags & IRQF_TRIGGER_MASK))
1116 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1117
1118 /*
1119 * Check whether the interrupt nests into another interrupt
1120 * thread.
1121 */
1122 nested = irq_settings_is_nested_thread(desc);
1123 if (nested) {
1124 if (!new->thread_fn) {
1125 ret = -EINVAL;
1126 goto out_mput;
1127 }
1128 /*
1129 * Replace the primary handler which was provided from
1130 * the driver for non nested interrupt handling by the
1131 * dummy function which warns when called.
1132 */
1133 new->handler = irq_nested_primary_handler;
1134 } else {
1135 if (irq_settings_can_thread(desc)) {
1136 ret = irq_setup_forced_threading(new);
1137 if (ret)
1138 goto out_mput;
1139 }
1140 }
1141
1142 /*
1143 * Create a handler thread when a thread function is supplied
1144 * and the interrupt does not nest into another interrupt
1145 * thread.
1146 */
1147 if (new->thread_fn && !nested) {
1148 ret = setup_irq_thread(new, irq, false);
1149 if (ret)
1150 goto out_mput;
1151 if (new->secondary) {
1152 ret = setup_irq_thread(new->secondary, irq, true);
1153 if (ret)
1154 goto out_thread;
1155 }
1156 }
1157
1158 /*
1159 * Drivers are often written to work w/o knowledge about the
1160 * underlying irq chip implementation, so a request for a
1161 * threaded irq without a primary hard irq context handler
1162 * requires the ONESHOT flag to be set. Some irq chips like
1163 * MSI based interrupts are per se one shot safe. Check the
1164 * chip flags, so we can avoid the unmask dance at the end of
1165 * the threaded handler for those.
1166 */
1167 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1168 new->flags &= ~IRQF_ONESHOT;
1169
1170 /*
1171 * The following block of code has to be executed atomically
1172 */
1173 raw_spin_lock_irqsave(&desc->lock, flags);
1174 old_ptr = &desc->action;
1175 old = *old_ptr;
1176 if (old) {
1177 /*
1178 * Can't share interrupts unless both agree to and are
1179 * the same type (level, edge, polarity). So both flag
1180 * fields must have IRQF_SHARED set and the bits which
1181 * set the trigger type must match. Also all must
1182 * agree on ONESHOT.
1183 */
1184 unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
1185
1186 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1187 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1188 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1189 goto mismatch;
1190
1191 /* All handlers must agree on per-cpuness */
1192 if ((old->flags & IRQF_PERCPU) !=
1193 (new->flags & IRQF_PERCPU))
1194 goto mismatch;
1195
1196 /* add new interrupt at end of irq queue */
1197 do {
1198 /*
1199 * Or all existing action->thread_mask bits,
1200 * so we can find the next zero bit for this
1201 * new action.
1202 */
1203 thread_mask |= old->thread_mask;
1204 old_ptr = &old->next;
1205 old = *old_ptr;
1206 } while (old);
1207 shared = 1;
1208 }
1209
1210 /*
1211 * Setup the thread mask for this irqaction for ONESHOT. For
1212 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1213 * conditional in irq_wake_thread().
1214 */
1215 if (new->flags & IRQF_ONESHOT) {
1216 /*
1217 * Unlikely to have 32 resp 64 irqs sharing one line,
1218 * but who knows.
1219 */
1220 if (thread_mask == ~0UL) {
1221 ret = -EBUSY;
1222 goto out_unlock;
1223 }
1224 /*
1225 * The thread_mask for the action is or'ed to
1226 * desc->thread_active to indicate that the
1227 * IRQF_ONESHOT thread handler has been woken, but not
1228 * yet finished. The bit is cleared when a thread
1229 * completes. When all threads of a shared interrupt
1230 * line have completed desc->threads_active becomes
1231 * zero and the interrupt line is unmasked. See
1232 * handle.c:irq_wake_thread() for further information.
1233 *
1234 * If no thread is woken by primary (hard irq context)
1235 * interrupt handlers, then desc->threads_active is
1236 * also checked for zero to unmask the irq line in the
1237 * affected hard irq flow handlers
1238 * (handle_[fasteoi|level]_irq).
1239 *
1240 * The new action gets the first zero bit of
1241 * thread_mask assigned. See the loop above which or's
1242 * all existing action->thread_mask bits.
1243 */
1244 new->thread_mask = 1 << ffz(thread_mask);
1245
1246 } else if (new->handler == irq_default_primary_handler &&
1247 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1248 /*
1249 * The interrupt was requested with handler = NULL, so
1250 * we use the default primary handler for it. But it
1251 * does not have the oneshot flag set. In combination
1252 * with level interrupts this is deadly, because the
1253 * default primary handler just wakes the thread, then
1254 * the irq lines is reenabled, but the device still
1255 * has the level irq asserted. Rinse and repeat....
1256 *
1257 * While this works for edge type interrupts, we play
1258 * it safe and reject unconditionally because we can't
1259 * say for sure which type this interrupt really
1260 * has. The type flags are unreliable as the
1261 * underlying chip implementation can override them.
1262 */
1263 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1264 irq);
1265 ret = -EINVAL;
1266 goto out_unlock;
1267 }
1268
1269 if (!shared) {
1270 ret = irq_request_resources(desc);
1271 if (ret) {
1272 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1273 new->name, irq, desc->irq_data.chip->name);
1274 goto out_unlock;
1275 }
1276
1277 init_waitqueue_head(&desc->wait_for_threads);
1278
1279 /* Setup the type (level, edge polarity) if configured: */
1280 if (new->flags & IRQF_TRIGGER_MASK) {
1281 ret = __irq_set_trigger(desc,
1282 new->flags & IRQF_TRIGGER_MASK);
1283
1284 if (ret) {
1285 irq_release_resources(desc);
1286 goto out_unlock;
1287 }
1288 }
1289
1290 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1291 IRQS_ONESHOT | IRQS_WAITING);
1292 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1293
1294 if (new->flags & IRQF_PERCPU) {
1295 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1296 irq_settings_set_per_cpu(desc);
1297 }
1298
1299 if (new->flags & IRQF_ONESHOT)
1300 desc->istate |= IRQS_ONESHOT;
1301
1302 /* Exclude IRQ from balancing if requested */
1303 if (new->flags & IRQF_NOBALANCING) {
1304 irq_settings_set_no_balancing(desc);
1305 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1306 }
1307
1308 if (irq_settings_can_autoenable(desc)) {
1309 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1310 } else {
1311 /*
1312 * Shared interrupts do not go well with disabling
1313 * auto enable. The sharing interrupt might request
1314 * it while it's still disabled and then wait for
1315 * interrupts forever.
1316 */
1317 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1318 /* Undo nested disables: */
1319 desc->depth = 1;
1320 }
1321
1322 } else if (new->flags & IRQF_TRIGGER_MASK) {
1323 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1324 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1325
1326 if (nmsk != omsk)
1327 /* hope the handler works with current trigger mode */
1328 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1329 irq, omsk, nmsk);
1330 }
1331
1332 *old_ptr = new;
1333
1334 irq_pm_install_action(desc, new);
1335
1336 /* Reset broken irq detection when installing new handler */
1337 desc->irq_count = 0;
1338 desc->irqs_unhandled = 0;
1339
1340 /*
1341 * Check whether we disabled the irq via the spurious handler
1342 * before. Reenable it and give it another chance.
1343 */
1344 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1345 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1346 __enable_irq(desc);
1347 }
1348
1349 raw_spin_unlock_irqrestore(&desc->lock, flags);
1350
1351 irq_setup_timings(desc, new);
1352
1353 /*
1354 * Strictly no need to wake it up, but hung_task complains
1355 * when no hard interrupt wakes the thread up.
1356 */
1357 if (new->thread)
1358 wake_up_process(new->thread);
1359 if (new->secondary)
1360 wake_up_process(new->secondary->thread);
1361
1362 register_irq_proc(irq, desc);
1363 irq_add_debugfs_entry(irq, desc);
1364 new->dir = NULL;
1365 register_handler_proc(irq, new);
1366 return 0;
1367
1368mismatch:
1369 if (!(new->flags & IRQF_PROBE_SHARED)) {
1370 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1371 irq, new->flags, new->name, old->flags, old->name);
1372#ifdef CONFIG_DEBUG_SHIRQ
1373 dump_stack();
1374#endif
1375 }
1376 ret = -EBUSY;
1377
1378out_unlock:
1379 raw_spin_unlock_irqrestore(&desc->lock, flags);
1380
1381out_thread:
1382 if (new->thread) {
1383 struct task_struct *t = new->thread;
1384
1385 new->thread = NULL;
1386 kthread_stop(t);
1387 put_task_struct(t);
1388 }
1389 if (new->secondary && new->secondary->thread) {
1390 struct task_struct *t = new->secondary->thread;
1391
1392 new->secondary->thread = NULL;
1393 kthread_stop(t);
1394 put_task_struct(t);
1395 }
1396out_mput:
1397 module_put(desc->owner);
1398 return ret;
1399}
1400
1401/**
1402 * setup_irq - setup an interrupt
1403 * @irq: Interrupt line to setup
1404 * @act: irqaction for the interrupt
1405 *
1406 * Used to statically setup interrupts in the early boot process.
1407 */
1408int setup_irq(unsigned int irq, struct irqaction *act)
1409{
1410 int retval;
1411 struct irq_desc *desc = irq_to_desc(irq);
1412
1413 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1414 return -EINVAL;
1415
1416 retval = irq_chip_pm_get(&desc->irq_data);
1417 if (retval < 0)
1418 return retval;
1419
1420 chip_bus_lock(desc);
1421 retval = __setup_irq(irq, desc, act);
1422 chip_bus_sync_unlock(desc);
1423
1424 if (retval)
1425 irq_chip_pm_put(&desc->irq_data);
1426
1427 return retval;
1428}
1429EXPORT_SYMBOL_GPL(setup_irq);
1430
1431/*
1432 * Internal function to unregister an irqaction - used to free
1433 * regular and special interrupts that are part of the architecture.
1434 */
1435static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1436{
1437 struct irq_desc *desc = irq_to_desc(irq);
1438 struct irqaction *action, **action_ptr;
1439 unsigned long flags;
1440
1441 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1442
1443 if (!desc)
1444 return NULL;
1445
1446 chip_bus_lock(desc);
1447 raw_spin_lock_irqsave(&desc->lock, flags);
1448
1449 /*
1450 * There can be multiple actions per IRQ descriptor, find the right
1451 * one based on the dev_id:
1452 */
1453 action_ptr = &desc->action;
1454 for (;;) {
1455 action = *action_ptr;
1456
1457 if (!action) {
1458 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1459 raw_spin_unlock_irqrestore(&desc->lock, flags);
1460 chip_bus_sync_unlock(desc);
1461 return NULL;
1462 }
1463
1464 if (action->dev_id == dev_id)
1465 break;
1466 action_ptr = &action->next;
1467 }
1468
1469 /* Found it - now remove it from the list of entries: */
1470 *action_ptr = action->next;
1471
1472 irq_pm_remove_action(desc, action);
1473
1474 /* If this was the last handler, shut down the IRQ line: */
1475 if (!desc->action) {
1476 irq_settings_clr_disable_unlazy(desc);
1477 irq_shutdown(desc);
1478 irq_release_resources(desc);
1479 irq_remove_timings(desc);
1480 }
1481
1482#ifdef CONFIG_SMP
1483 /* make sure affinity_hint is cleaned up */
1484 if (WARN_ON_ONCE(desc->affinity_hint))
1485 desc->affinity_hint = NULL;
1486#endif
1487
1488 raw_spin_unlock_irqrestore(&desc->lock, flags);
1489 chip_bus_sync_unlock(desc);
1490
1491 unregister_handler_proc(irq, action);
1492
1493 /* Make sure it's not being used on another CPU: */
1494 synchronize_irq(irq);
1495
1496#ifdef CONFIG_DEBUG_SHIRQ
1497 /*
1498 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1499 * event to happen even now it's being freed, so let's make sure that
1500 * is so by doing an extra call to the handler ....
1501 *
1502 * ( We do this after actually deregistering it, to make sure that a
1503 * 'real' IRQ doesn't run in * parallel with our fake. )
1504 */
1505 if (action->flags & IRQF_SHARED) {
1506 local_irq_save(flags);
1507 action->handler(irq, dev_id);
1508 local_irq_restore(flags);
1509 }
1510#endif
1511
1512 if (action->thread) {
1513 kthread_stop(action->thread);
1514 put_task_struct(action->thread);
1515 if (action->secondary && action->secondary->thread) {
1516 kthread_stop(action->secondary->thread);
1517 put_task_struct(action->secondary->thread);
1518 }
1519 }
1520
1521 irq_chip_pm_put(&desc->irq_data);
1522 module_put(desc->owner);
1523 kfree(action->secondary);
1524 return action;
1525}
1526
1527/**
1528 * remove_irq - free an interrupt
1529 * @irq: Interrupt line to free
1530 * @act: irqaction for the interrupt
1531 *
1532 * Used to remove interrupts statically setup by the early boot process.
1533 */
1534void remove_irq(unsigned int irq, struct irqaction *act)
1535{
1536 struct irq_desc *desc = irq_to_desc(irq);
1537
1538 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1539 __free_irq(irq, act->dev_id);
1540}
1541EXPORT_SYMBOL_GPL(remove_irq);
1542
1543/**
1544 * free_irq - free an interrupt allocated with request_irq
1545 * @irq: Interrupt line to free
1546 * @dev_id: Device identity to free
1547 *
1548 * Remove an interrupt handler. The handler is removed and if the
1549 * interrupt line is no longer in use by any driver it is disabled.
1550 * On a shared IRQ the caller must ensure the interrupt is disabled
1551 * on the card it drives before calling this function. The function
1552 * does not return until any executing interrupts for this IRQ
1553 * have completed.
1554 *
1555 * This function must not be called from interrupt context.
1556 *
1557 * Returns the devname argument passed to request_irq.
1558 */
1559const void *free_irq(unsigned int irq, void *dev_id)
1560{
1561 struct irq_desc *desc = irq_to_desc(irq);
1562 struct irqaction *action;
1563 const char *devname;
1564
1565 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1566 return NULL;
1567
1568#ifdef CONFIG_SMP
1569 if (WARN_ON(desc->affinity_notify))
1570 desc->affinity_notify = NULL;
1571#endif
1572
1573 action = __free_irq(irq, dev_id);
1574 devname = action->name;
1575 kfree(action);
1576 return devname;
1577}
1578EXPORT_SYMBOL(free_irq);
1579
1580/**
1581 * request_threaded_irq - allocate an interrupt line
1582 * @irq: Interrupt line to allocate
1583 * @handler: Function to be called when the IRQ occurs.
1584 * Primary handler for threaded interrupts
1585 * If NULL and thread_fn != NULL the default
1586 * primary handler is installed
1587 * @thread_fn: Function called from the irq handler thread
1588 * If NULL, no irq thread is created
1589 * @irqflags: Interrupt type flags
1590 * @devname: An ascii name for the claiming device
1591 * @dev_id: A cookie passed back to the handler function
1592 *
1593 * This call allocates interrupt resources and enables the
1594 * interrupt line and IRQ handling. From the point this
1595 * call is made your handler function may be invoked. Since
1596 * your handler function must clear any interrupt the board
1597 * raises, you must take care both to initialise your hardware
1598 * and to set up the interrupt handler in the right order.
1599 *
1600 * If you want to set up a threaded irq handler for your device
1601 * then you need to supply @handler and @thread_fn. @handler is
1602 * still called in hard interrupt context and has to check
1603 * whether the interrupt originates from the device. If yes it
1604 * needs to disable the interrupt on the device and return
1605 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1606 * @thread_fn. This split handler design is necessary to support
1607 * shared interrupts.
1608 *
1609 * Dev_id must be globally unique. Normally the address of the
1610 * device data structure is used as the cookie. Since the handler
1611 * receives this value it makes sense to use it.
1612 *
1613 * If your interrupt is shared you must pass a non NULL dev_id
1614 * as this is required when freeing the interrupt.
1615 *
1616 * Flags:
1617 *
1618 * IRQF_SHARED Interrupt is shared
1619 * IRQF_TRIGGER_* Specify active edge(s) or level
1620 *
1621 */
1622int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1623 irq_handler_t thread_fn, unsigned long irqflags,
1624 const char *devname, void *dev_id)
1625{
1626 struct irqaction *action;
1627 struct irq_desc *desc;
1628 int retval;
1629
1630 if (irq == IRQ_NOTCONNECTED)
1631 return -ENOTCONN;
1632
1633 /*
1634 * Sanity-check: shared interrupts must pass in a real dev-ID,
1635 * otherwise we'll have trouble later trying to figure out
1636 * which interrupt is which (messes up the interrupt freeing
1637 * logic etc).
1638 *
1639 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1640 * it cannot be set along with IRQF_NO_SUSPEND.
1641 */
1642 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1643 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1644 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1645 return -EINVAL;
1646
1647 desc = irq_to_desc(irq);
1648 if (!desc)
1649 return -EINVAL;
1650
1651 if (!irq_settings_can_request(desc) ||
1652 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1653 return -EINVAL;
1654
1655 if (!handler) {
1656 if (!thread_fn)
1657 return -EINVAL;
1658 handler = irq_default_primary_handler;
1659 }
1660
1661 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1662 if (!action)
1663 return -ENOMEM;
1664
1665 action->handler = handler;
1666 action->thread_fn = thread_fn;
1667 action->flags = irqflags;
1668 action->name = devname;
1669 action->dev_id = dev_id;
1670
1671 retval = irq_chip_pm_get(&desc->irq_data);
1672 if (retval < 0) {
1673 kfree(action);
1674 return retval;
1675 }
1676
1677 chip_bus_lock(desc);
1678 retval = __setup_irq(irq, desc, action);
1679 chip_bus_sync_unlock(desc);
1680
1681 if (retval) {
1682 irq_chip_pm_put(&desc->irq_data);
1683 kfree(action->secondary);
1684 kfree(action);
1685 }
1686
1687#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1688 if (!retval && (irqflags & IRQF_SHARED)) {
1689 /*
1690 * It's a shared IRQ -- the driver ought to be prepared for it
1691 * to happen immediately, so let's make sure....
1692 * We disable the irq to make sure that a 'real' IRQ doesn't
1693 * run in parallel with our fake.
1694 */
1695 unsigned long flags;
1696
1697 disable_irq(irq);
1698 local_irq_save(flags);
1699
1700 handler(irq, dev_id);
1701
1702 local_irq_restore(flags);
1703 enable_irq(irq);
1704 }
1705#endif
1706 return retval;
1707}
1708EXPORT_SYMBOL(request_threaded_irq);
1709
1710/**
1711 * request_any_context_irq - allocate an interrupt line
1712 * @irq: Interrupt line to allocate
1713 * @handler: Function to be called when the IRQ occurs.
1714 * Threaded handler for threaded interrupts.
1715 * @flags: Interrupt type flags
1716 * @name: An ascii name for the claiming device
1717 * @dev_id: A cookie passed back to the handler function
1718 *
1719 * This call allocates interrupt resources and enables the
1720 * interrupt line and IRQ handling. It selects either a
1721 * hardirq or threaded handling method depending on the
1722 * context.
1723 *
1724 * On failure, it returns a negative value. On success,
1725 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1726 */
1727int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1728 unsigned long flags, const char *name, void *dev_id)
1729{
1730 struct irq_desc *desc;
1731 int ret;
1732
1733 if (irq == IRQ_NOTCONNECTED)
1734 return -ENOTCONN;
1735
1736 desc = irq_to_desc(irq);
1737 if (!desc)
1738 return -EINVAL;
1739
1740 if (irq_settings_is_nested_thread(desc)) {
1741 ret = request_threaded_irq(irq, NULL, handler,
1742 flags, name, dev_id);
1743 return !ret ? IRQC_IS_NESTED : ret;
1744 }
1745
1746 ret = request_irq(irq, handler, flags, name, dev_id);
1747 return !ret ? IRQC_IS_HARDIRQ : ret;
1748}
1749EXPORT_SYMBOL_GPL(request_any_context_irq);
1750
1751void enable_percpu_irq(unsigned int irq, unsigned int type)
1752{
1753 unsigned int cpu = smp_processor_id();
1754 unsigned long flags;
1755 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1756
1757 if (!desc)
1758 return;
1759
1760 /*
1761 * If the trigger type is not specified by the caller, then
1762 * use the default for this interrupt.
1763 */
1764 type &= IRQ_TYPE_SENSE_MASK;
1765 if (type == IRQ_TYPE_NONE)
1766 type = irqd_get_trigger_type(&desc->irq_data);
1767
1768 if (type != IRQ_TYPE_NONE) {
1769 int ret;
1770
1771 ret = __irq_set_trigger(desc, type);
1772
1773 if (ret) {
1774 WARN(1, "failed to set type for IRQ%d\n", irq);
1775 goto out;
1776 }
1777 }
1778
1779 irq_percpu_enable(desc, cpu);
1780out:
1781 irq_put_desc_unlock(desc, flags);
1782}
1783EXPORT_SYMBOL_GPL(enable_percpu_irq);
1784
1785/**
1786 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1787 * @irq: Linux irq number to check for
1788 *
1789 * Must be called from a non migratable context. Returns the enable
1790 * state of a per cpu interrupt on the current cpu.
1791 */
1792bool irq_percpu_is_enabled(unsigned int irq)
1793{
1794 unsigned int cpu = smp_processor_id();
1795 struct irq_desc *desc;
1796 unsigned long flags;
1797 bool is_enabled;
1798
1799 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1800 if (!desc)
1801 return false;
1802
1803 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1804 irq_put_desc_unlock(desc, flags);
1805
1806 return is_enabled;
1807}
1808EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1809
1810void disable_percpu_irq(unsigned int irq)
1811{
1812 unsigned int cpu = smp_processor_id();
1813 unsigned long flags;
1814 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1815
1816 if (!desc)
1817 return;
1818
1819 irq_percpu_disable(desc, cpu);
1820 irq_put_desc_unlock(desc, flags);
1821}
1822EXPORT_SYMBOL_GPL(disable_percpu_irq);
1823
1824/*
1825 * Internal function to unregister a percpu irqaction.
1826 */
1827static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1828{
1829 struct irq_desc *desc = irq_to_desc(irq);
1830 struct irqaction *action;
1831 unsigned long flags;
1832
1833 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1834
1835 if (!desc)
1836 return NULL;
1837
1838 raw_spin_lock_irqsave(&desc->lock, flags);
1839
1840 action = desc->action;
1841 if (!action || action->percpu_dev_id != dev_id) {
1842 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1843 goto bad;
1844 }
1845
1846 if (!cpumask_empty(desc->percpu_enabled)) {
1847 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1848 irq, cpumask_first(desc->percpu_enabled));
1849 goto bad;
1850 }
1851
1852 /* Found it - now remove it from the list of entries: */
1853 desc->action = NULL;
1854
1855 raw_spin_unlock_irqrestore(&desc->lock, flags);
1856
1857 unregister_handler_proc(irq, action);
1858
1859 irq_chip_pm_put(&desc->irq_data);
1860 module_put(desc->owner);
1861 return action;
1862
1863bad:
1864 raw_spin_unlock_irqrestore(&desc->lock, flags);
1865 return NULL;
1866}
1867
1868/**
1869 * remove_percpu_irq - free a per-cpu interrupt
1870 * @irq: Interrupt line to free
1871 * @act: irqaction for the interrupt
1872 *
1873 * Used to remove interrupts statically setup by the early boot process.
1874 */
1875void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1876{
1877 struct irq_desc *desc = irq_to_desc(irq);
1878
1879 if (desc && irq_settings_is_per_cpu_devid(desc))
1880 __free_percpu_irq(irq, act->percpu_dev_id);
1881}
1882
1883/**
1884 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1885 * @irq: Interrupt line to free
1886 * @dev_id: Device identity to free
1887 *
1888 * Remove a percpu interrupt handler. The handler is removed, but
1889 * the interrupt line is not disabled. This must be done on each
1890 * CPU before calling this function. The function does not return
1891 * until any executing interrupts for this IRQ have completed.
1892 *
1893 * This function must not be called from interrupt context.
1894 */
1895void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1896{
1897 struct irq_desc *desc = irq_to_desc(irq);
1898
1899 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1900 return;
1901
1902 chip_bus_lock(desc);
1903 kfree(__free_percpu_irq(irq, dev_id));
1904 chip_bus_sync_unlock(desc);
1905}
1906EXPORT_SYMBOL_GPL(free_percpu_irq);
1907
1908/**
1909 * setup_percpu_irq - setup a per-cpu interrupt
1910 * @irq: Interrupt line to setup
1911 * @act: irqaction for the interrupt
1912 *
1913 * Used to statically setup per-cpu interrupts in the early boot process.
1914 */
1915int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1916{
1917 struct irq_desc *desc = irq_to_desc(irq);
1918 int retval;
1919
1920 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1921 return -EINVAL;
1922
1923 retval = irq_chip_pm_get(&desc->irq_data);
1924 if (retval < 0)
1925 return retval;
1926
1927 chip_bus_lock(desc);
1928 retval = __setup_irq(irq, desc, act);
1929 chip_bus_sync_unlock(desc);
1930
1931 if (retval)
1932 irq_chip_pm_put(&desc->irq_data);
1933
1934 return retval;
1935}
1936
1937/**
1938 * request_percpu_irq - allocate a percpu interrupt line
1939 * @irq: Interrupt line to allocate
1940 * @handler: Function to be called when the IRQ occurs.
1941 * @devname: An ascii name for the claiming device
1942 * @dev_id: A percpu cookie passed back to the handler function
1943 *
1944 * This call allocates interrupt resources and enables the
1945 * interrupt on the local CPU. If the interrupt is supposed to be
1946 * enabled on other CPUs, it has to be done on each CPU using
1947 * enable_percpu_irq().
1948 *
1949 * Dev_id must be globally unique. It is a per-cpu variable, and
1950 * the handler gets called with the interrupted CPU's instance of
1951 * that variable.
1952 */
1953int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1954 const char *devname, void __percpu *dev_id)
1955{
1956 struct irqaction *action;
1957 struct irq_desc *desc;
1958 int retval;
1959
1960 if (!dev_id)
1961 return -EINVAL;
1962
1963 desc = irq_to_desc(irq);
1964 if (!desc || !irq_settings_can_request(desc) ||
1965 !irq_settings_is_per_cpu_devid(desc))
1966 return -EINVAL;
1967
1968 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1969 if (!action)
1970 return -ENOMEM;
1971
1972 action->handler = handler;
1973 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1974 action->name = devname;
1975 action->percpu_dev_id = dev_id;
1976
1977 retval = irq_chip_pm_get(&desc->irq_data);
1978 if (retval < 0) {
1979 kfree(action);
1980 return retval;
1981 }
1982
1983 chip_bus_lock(desc);
1984 retval = __setup_irq(irq, desc, action);
1985 chip_bus_sync_unlock(desc);
1986
1987 if (retval) {
1988 irq_chip_pm_put(&desc->irq_data);
1989 kfree(action);
1990 }
1991
1992 return retval;
1993}
1994EXPORT_SYMBOL_GPL(request_percpu_irq);
1995
1996/**
1997 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
1998 * @irq: Interrupt line that is forwarded to a VM
1999 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2000 * @state: a pointer to a boolean where the state is to be storeed
2001 *
2002 * This call snapshots the internal irqchip state of an
2003 * interrupt, returning into @state the bit corresponding to
2004 * stage @which
2005 *
2006 * This function should be called with preemption disabled if the
2007 * interrupt controller has per-cpu registers.
2008 */
2009int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2010 bool *state)
2011{
2012 struct irq_desc *desc;
2013 struct irq_data *data;
2014 struct irq_chip *chip;
2015 unsigned long flags;
2016 int err = -EINVAL;
2017
2018 desc = irq_get_desc_buslock(irq, &flags, 0);
2019 if (!desc)
2020 return err;
2021
2022 data = irq_desc_get_irq_data(desc);
2023
2024 do {
2025 chip = irq_data_get_irq_chip(data);
2026 if (chip->irq_get_irqchip_state)
2027 break;
2028#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2029 data = data->parent_data;
2030#else
2031 data = NULL;
2032#endif
2033 } while (data);
2034
2035 if (data)
2036 err = chip->irq_get_irqchip_state(data, which, state);
2037
2038 irq_put_desc_busunlock(desc, flags);
2039 return err;
2040}
2041EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2042
2043/**
2044 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2045 * @irq: Interrupt line that is forwarded to a VM
2046 * @which: State to be restored (one of IRQCHIP_STATE_*)
2047 * @val: Value corresponding to @which
2048 *
2049 * This call sets the internal irqchip state of an interrupt,
2050 * depending on the value of @which.
2051 *
2052 * This function should be called with preemption disabled if the
2053 * interrupt controller has per-cpu registers.
2054 */
2055int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2056 bool val)
2057{
2058 struct irq_desc *desc;
2059 struct irq_data *data;
2060 struct irq_chip *chip;
2061 unsigned long flags;
2062 int err = -EINVAL;
2063
2064 desc = irq_get_desc_buslock(irq, &flags, 0);
2065 if (!desc)
2066 return err;
2067
2068 data = irq_desc_get_irq_data(desc);
2069
2070 do {
2071 chip = irq_data_get_irq_chip(data);
2072 if (chip->irq_set_irqchip_state)
2073 break;
2074#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2075 data = data->parent_data;
2076#else
2077 data = NULL;
2078#endif
2079 } while (data);
2080
2081 if (data)
2082 err = chip->irq_set_irqchip_state(data, which, val);
2083
2084 irq_put_desc_busunlock(desc, flags);
2085 return err;
2086}
2087EXPORT_SYMBOL_GPL(irq_set_irqchip_state);