pinctrl: at91-pio4: add missing of_node_put
[linux-2.6-block.git] / kernel / irq / manage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006 Thomas Gleixner
5  *
6  * This file contains driver APIs to the irq subsystem.
7  */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/sched/rt.h>
19 #include <linux/sched/task.h>
20 #include <uapi/linux/sched/types.h>
21 #include <linux/task_work.h>
22
23 #include "internals.h"
24
25 #ifdef CONFIG_IRQ_FORCED_THREADING
26 __read_mostly bool force_irqthreads;
27
28 static int __init setup_forced_irqthreads(char *arg)
29 {
30         force_irqthreads = true;
31         return 0;
32 }
33 early_param("threadirqs", setup_forced_irqthreads);
34 #endif
35
36 static void __synchronize_hardirq(struct irq_desc *desc)
37 {
38         bool inprogress;
39
40         do {
41                 unsigned long flags;
42
43                 /*
44                  * Wait until we're out of the critical section.  This might
45                  * give the wrong answer due to the lack of memory barriers.
46                  */
47                 while (irqd_irq_inprogress(&desc->irq_data))
48                         cpu_relax();
49
50                 /* Ok, that indicated we're done: double-check carefully. */
51                 raw_spin_lock_irqsave(&desc->lock, flags);
52                 inprogress = irqd_irq_inprogress(&desc->irq_data);
53                 raw_spin_unlock_irqrestore(&desc->lock, flags);
54
55                 /* Oops, that failed? */
56         } while (inprogress);
57 }
58
59 /**
60  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
61  *      @irq: interrupt number to wait for
62  *
63  *      This function waits for any pending hard IRQ handlers for this
64  *      interrupt to complete before returning. If you use this
65  *      function while holding a resource the IRQ handler may need you
66  *      will deadlock. It does not take associated threaded handlers
67  *      into account.
68  *
69  *      Do not use this for shutdown scenarios where you must be sure
70  *      that all parts (hardirq and threaded handler) have completed.
71  *
72  *      Returns: false if a threaded handler is active.
73  *
74  *      This function may be called - with care - from IRQ context.
75  */
76 bool synchronize_hardirq(unsigned int irq)
77 {
78         struct irq_desc *desc = irq_to_desc(irq);
79
80         if (desc) {
81                 __synchronize_hardirq(desc);
82                 return !atomic_read(&desc->threads_active);
83         }
84
85         return true;
86 }
87 EXPORT_SYMBOL(synchronize_hardirq);
88
89 /**
90  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
91  *      @irq: interrupt number to wait for
92  *
93  *      This function waits for any pending IRQ handlers for this interrupt
94  *      to complete before returning. If you use this function while
95  *      holding a resource the IRQ handler may need you will deadlock.
96  *
97  *      This function may be called - with care - from IRQ context.
98  */
99 void synchronize_irq(unsigned int irq)
100 {
101         struct irq_desc *desc = irq_to_desc(irq);
102
103         if (desc) {
104                 __synchronize_hardirq(desc);
105                 /*
106                  * We made sure that no hardirq handler is
107                  * running. Now verify that no threaded handlers are
108                  * active.
109                  */
110                 wait_event(desc->wait_for_threads,
111                            !atomic_read(&desc->threads_active));
112         }
113 }
114 EXPORT_SYMBOL(synchronize_irq);
115
116 #ifdef CONFIG_SMP
117 cpumask_var_t irq_default_affinity;
118
119 static bool __irq_can_set_affinity(struct irq_desc *desc)
120 {
121         if (!desc || !irqd_can_balance(&desc->irq_data) ||
122             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
123                 return false;
124         return true;
125 }
126
127 /**
128  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
129  *      @irq:           Interrupt to check
130  *
131  */
132 int irq_can_set_affinity(unsigned int irq)
133 {
134         return __irq_can_set_affinity(irq_to_desc(irq));
135 }
136
137 /**
138  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
139  * @irq:        Interrupt to check
140  *
141  * Like irq_can_set_affinity() above, but additionally checks for the
142  * AFFINITY_MANAGED flag.
143  */
144 bool irq_can_set_affinity_usr(unsigned int irq)
145 {
146         struct irq_desc *desc = irq_to_desc(irq);
147
148         return __irq_can_set_affinity(desc) &&
149                 !irqd_affinity_is_managed(&desc->irq_data);
150 }
151
152 /**
153  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
154  *      @desc:          irq descriptor which has affitnity changed
155  *
156  *      We just set IRQTF_AFFINITY and delegate the affinity setting
157  *      to the interrupt thread itself. We can not call
158  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
159  *      code can be called from hard interrupt context.
160  */
161 void irq_set_thread_affinity(struct irq_desc *desc)
162 {
163         struct irqaction *action;
164
165         for_each_action_of_desc(desc, action)
166                 if (action->thread)
167                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
168 }
169
170 static void irq_validate_effective_affinity(struct irq_data *data)
171 {
172 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
173         const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
174         struct irq_chip *chip = irq_data_get_irq_chip(data);
175
176         if (!cpumask_empty(m))
177                 return;
178         pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
179                      chip->name, data->irq);
180 #endif
181 }
182
183 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
184                         bool force)
185 {
186         struct irq_desc *desc = irq_data_to_desc(data);
187         struct irq_chip *chip = irq_data_get_irq_chip(data);
188         int ret;
189
190         if (!chip || !chip->irq_set_affinity)
191                 return -EINVAL;
192
193         ret = chip->irq_set_affinity(data, mask, force);
194         switch (ret) {
195         case IRQ_SET_MASK_OK:
196         case IRQ_SET_MASK_OK_DONE:
197                 cpumask_copy(desc->irq_common_data.affinity, mask);
198         case IRQ_SET_MASK_OK_NOCOPY:
199                 irq_validate_effective_affinity(data);
200                 irq_set_thread_affinity(desc);
201                 ret = 0;
202         }
203
204         return ret;
205 }
206
207 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
208                             bool force)
209 {
210         struct irq_chip *chip = irq_data_get_irq_chip(data);
211         struct irq_desc *desc = irq_data_to_desc(data);
212         int ret = 0;
213
214         if (!chip || !chip->irq_set_affinity)
215                 return -EINVAL;
216
217         if (irq_can_move_pcntxt(data)) {
218                 ret = irq_do_set_affinity(data, mask, force);
219         } else {
220                 irqd_set_move_pending(data);
221                 irq_copy_pending(desc, mask);
222         }
223
224         if (desc->affinity_notify) {
225                 kref_get(&desc->affinity_notify->kref);
226                 schedule_work(&desc->affinity_notify->work);
227         }
228         irqd_set(data, IRQD_AFFINITY_SET);
229
230         return ret;
231 }
232
233 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
234 {
235         struct irq_desc *desc = irq_to_desc(irq);
236         unsigned long flags;
237         int ret;
238
239         if (!desc)
240                 return -EINVAL;
241
242         raw_spin_lock_irqsave(&desc->lock, flags);
243         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
244         raw_spin_unlock_irqrestore(&desc->lock, flags);
245         return ret;
246 }
247
248 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
249 {
250         unsigned long flags;
251         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
252
253         if (!desc)
254                 return -EINVAL;
255         desc->affinity_hint = m;
256         irq_put_desc_unlock(desc, flags);
257         /* set the initial affinity to prevent every interrupt being on CPU0 */
258         if (m)
259                 __irq_set_affinity(irq, m, false);
260         return 0;
261 }
262 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
263
264 static void irq_affinity_notify(struct work_struct *work)
265 {
266         struct irq_affinity_notify *notify =
267                 container_of(work, struct irq_affinity_notify, work);
268         struct irq_desc *desc = irq_to_desc(notify->irq);
269         cpumask_var_t cpumask;
270         unsigned long flags;
271
272         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
273                 goto out;
274
275         raw_spin_lock_irqsave(&desc->lock, flags);
276         if (irq_move_pending(&desc->irq_data))
277                 irq_get_pending(cpumask, desc);
278         else
279                 cpumask_copy(cpumask, desc->irq_common_data.affinity);
280         raw_spin_unlock_irqrestore(&desc->lock, flags);
281
282         notify->notify(notify, cpumask);
283
284         free_cpumask_var(cpumask);
285 out:
286         kref_put(&notify->kref, notify->release);
287 }
288
289 /**
290  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
291  *      @irq:           Interrupt for which to enable/disable notification
292  *      @notify:        Context for notification, or %NULL to disable
293  *                      notification.  Function pointers must be initialised;
294  *                      the other fields will be initialised by this function.
295  *
296  *      Must be called in process context.  Notification may only be enabled
297  *      after the IRQ is allocated and must be disabled before the IRQ is
298  *      freed using free_irq().
299  */
300 int
301 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
302 {
303         struct irq_desc *desc = irq_to_desc(irq);
304         struct irq_affinity_notify *old_notify;
305         unsigned long flags;
306
307         /* The release function is promised process context */
308         might_sleep();
309
310         if (!desc)
311                 return -EINVAL;
312
313         /* Complete initialisation of *notify */
314         if (notify) {
315                 notify->irq = irq;
316                 kref_init(&notify->kref);
317                 INIT_WORK(&notify->work, irq_affinity_notify);
318         }
319
320         raw_spin_lock_irqsave(&desc->lock, flags);
321         old_notify = desc->affinity_notify;
322         desc->affinity_notify = notify;
323         raw_spin_unlock_irqrestore(&desc->lock, flags);
324
325         if (old_notify)
326                 kref_put(&old_notify->kref, old_notify->release);
327
328         return 0;
329 }
330 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
331
332 #ifndef CONFIG_AUTO_IRQ_AFFINITY
333 /*
334  * Generic version of the affinity autoselector.
335  */
336 int irq_setup_affinity(struct irq_desc *desc)
337 {
338         struct cpumask *set = irq_default_affinity;
339         int ret, node = irq_desc_get_node(desc);
340         static DEFINE_RAW_SPINLOCK(mask_lock);
341         static struct cpumask mask;
342
343         /* Excludes PER_CPU and NO_BALANCE interrupts */
344         if (!__irq_can_set_affinity(desc))
345                 return 0;
346
347         raw_spin_lock(&mask_lock);
348         /*
349          * Preserve the managed affinity setting and a userspace affinity
350          * setup, but make sure that one of the targets is online.
351          */
352         if (irqd_affinity_is_managed(&desc->irq_data) ||
353             irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
354                 if (cpumask_intersects(desc->irq_common_data.affinity,
355                                        cpu_online_mask))
356                         set = desc->irq_common_data.affinity;
357                 else
358                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
359         }
360
361         cpumask_and(&mask, cpu_online_mask, set);
362         if (node != NUMA_NO_NODE) {
363                 const struct cpumask *nodemask = cpumask_of_node(node);
364
365                 /* make sure at least one of the cpus in nodemask is online */
366                 if (cpumask_intersects(&mask, nodemask))
367                         cpumask_and(&mask, &mask, nodemask);
368         }
369         ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
370         raw_spin_unlock(&mask_lock);
371         return ret;
372 }
373 #else
374 /* Wrapper for ALPHA specific affinity selector magic */
375 int irq_setup_affinity(struct irq_desc *desc)
376 {
377         return irq_select_affinity(irq_desc_get_irq(desc));
378 }
379 #endif
380
381 /*
382  * Called when a bogus affinity is set via /proc/irq
383  */
384 int irq_select_affinity_usr(unsigned int irq)
385 {
386         struct irq_desc *desc = irq_to_desc(irq);
387         unsigned long flags;
388         int ret;
389
390         raw_spin_lock_irqsave(&desc->lock, flags);
391         ret = irq_setup_affinity(desc);
392         raw_spin_unlock_irqrestore(&desc->lock, flags);
393         return ret;
394 }
395 #endif
396
397 /**
398  *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
399  *      @irq: interrupt number to set affinity
400  *      @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
401  *                  specific data for percpu_devid interrupts
402  *
403  *      This function uses the vCPU specific data to set the vCPU
404  *      affinity for an irq. The vCPU specific data is passed from
405  *      outside, such as KVM. One example code path is as below:
406  *      KVM -> IOMMU -> irq_set_vcpu_affinity().
407  */
408 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
409 {
410         unsigned long flags;
411         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
412         struct irq_data *data;
413         struct irq_chip *chip;
414         int ret = -ENOSYS;
415
416         if (!desc)
417                 return -EINVAL;
418
419         data = irq_desc_get_irq_data(desc);
420         do {
421                 chip = irq_data_get_irq_chip(data);
422                 if (chip && chip->irq_set_vcpu_affinity)
423                         break;
424 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
425                 data = data->parent_data;
426 #else
427                 data = NULL;
428 #endif
429         } while (data);
430
431         if (data)
432                 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
433         irq_put_desc_unlock(desc, flags);
434
435         return ret;
436 }
437 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
438
439 void __disable_irq(struct irq_desc *desc)
440 {
441         if (!desc->depth++)
442                 irq_disable(desc);
443 }
444
445 static int __disable_irq_nosync(unsigned int irq)
446 {
447         unsigned long flags;
448         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
449
450         if (!desc)
451                 return -EINVAL;
452         __disable_irq(desc);
453         irq_put_desc_busunlock(desc, flags);
454         return 0;
455 }
456
457 /**
458  *      disable_irq_nosync - disable an irq without waiting
459  *      @irq: Interrupt to disable
460  *
461  *      Disable the selected interrupt line.  Disables and Enables are
462  *      nested.
463  *      Unlike disable_irq(), this function does not ensure existing
464  *      instances of the IRQ handler have completed before returning.
465  *
466  *      This function may be called from IRQ context.
467  */
468 void disable_irq_nosync(unsigned int irq)
469 {
470         __disable_irq_nosync(irq);
471 }
472 EXPORT_SYMBOL(disable_irq_nosync);
473
474 /**
475  *      disable_irq - disable an irq and wait for completion
476  *      @irq: Interrupt to disable
477  *
478  *      Disable the selected interrupt line.  Enables and Disables are
479  *      nested.
480  *      This function waits for any pending IRQ handlers for this interrupt
481  *      to complete before returning. If you use this function while
482  *      holding a resource the IRQ handler may need you will deadlock.
483  *
484  *      This function may be called - with care - from IRQ context.
485  */
486 void disable_irq(unsigned int irq)
487 {
488         if (!__disable_irq_nosync(irq))
489                 synchronize_irq(irq);
490 }
491 EXPORT_SYMBOL(disable_irq);
492
493 /**
494  *      disable_hardirq - disables an irq and waits for hardirq completion
495  *      @irq: Interrupt to disable
496  *
497  *      Disable the selected interrupt line.  Enables and Disables are
498  *      nested.
499  *      This function waits for any pending hard IRQ handlers for this
500  *      interrupt to complete before returning. If you use this function while
501  *      holding a resource the hard IRQ handler may need you will deadlock.
502  *
503  *      When used to optimistically disable an interrupt from atomic context
504  *      the return value must be checked.
505  *
506  *      Returns: false if a threaded handler is active.
507  *
508  *      This function may be called - with care - from IRQ context.
509  */
510 bool disable_hardirq(unsigned int irq)
511 {
512         if (!__disable_irq_nosync(irq))
513                 return synchronize_hardirq(irq);
514
515         return false;
516 }
517 EXPORT_SYMBOL_GPL(disable_hardirq);
518
519 void __enable_irq(struct irq_desc *desc)
520 {
521         switch (desc->depth) {
522         case 0:
523  err_out:
524                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
525                      irq_desc_get_irq(desc));
526                 break;
527         case 1: {
528                 if (desc->istate & IRQS_SUSPENDED)
529                         goto err_out;
530                 /* Prevent probing on this irq: */
531                 irq_settings_set_noprobe(desc);
532                 /*
533                  * Call irq_startup() not irq_enable() here because the
534                  * interrupt might be marked NOAUTOEN. So irq_startup()
535                  * needs to be invoked when it gets enabled the first
536                  * time. If it was already started up, then irq_startup()
537                  * will invoke irq_enable() under the hood.
538                  */
539                 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
540                 break;
541         }
542         default:
543                 desc->depth--;
544         }
545 }
546
547 /**
548  *      enable_irq - enable handling of an irq
549  *      @irq: Interrupt to enable
550  *
551  *      Undoes the effect of one call to disable_irq().  If this
552  *      matches the last disable, processing of interrupts on this
553  *      IRQ line is re-enabled.
554  *
555  *      This function may be called from IRQ context only when
556  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
557  */
558 void enable_irq(unsigned int irq)
559 {
560         unsigned long flags;
561         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
562
563         if (!desc)
564                 return;
565         if (WARN(!desc->irq_data.chip,
566                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
567                 goto out;
568
569         __enable_irq(desc);
570 out:
571         irq_put_desc_busunlock(desc, flags);
572 }
573 EXPORT_SYMBOL(enable_irq);
574
575 static int set_irq_wake_real(unsigned int irq, unsigned int on)
576 {
577         struct irq_desc *desc = irq_to_desc(irq);
578         int ret = -ENXIO;
579
580         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
581                 return 0;
582
583         if (desc->irq_data.chip->irq_set_wake)
584                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
585
586         return ret;
587 }
588
589 /**
590  *      irq_set_irq_wake - control irq power management wakeup
591  *      @irq:   interrupt to control
592  *      @on:    enable/disable power management wakeup
593  *
594  *      Enable/disable power management wakeup mode, which is
595  *      disabled by default.  Enables and disables must match,
596  *      just as they match for non-wakeup mode support.
597  *
598  *      Wakeup mode lets this IRQ wake the system from sleep
599  *      states like "suspend to RAM".
600  */
601 int irq_set_irq_wake(unsigned int irq, unsigned int on)
602 {
603         unsigned long flags;
604         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
605         int ret = 0;
606
607         if (!desc)
608                 return -EINVAL;
609
610         /* wakeup-capable irqs can be shared between drivers that
611          * don't need to have the same sleep mode behaviors.
612          */
613         if (on) {
614                 if (desc->wake_depth++ == 0) {
615                         ret = set_irq_wake_real(irq, on);
616                         if (ret)
617                                 desc->wake_depth = 0;
618                         else
619                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
620                 }
621         } else {
622                 if (desc->wake_depth == 0) {
623                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
624                 } else if (--desc->wake_depth == 0) {
625                         ret = set_irq_wake_real(irq, on);
626                         if (ret)
627                                 desc->wake_depth = 1;
628                         else
629                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
630                 }
631         }
632         irq_put_desc_busunlock(desc, flags);
633         return ret;
634 }
635 EXPORT_SYMBOL(irq_set_irq_wake);
636
637 /*
638  * Internal function that tells the architecture code whether a
639  * particular irq has been exclusively allocated or is available
640  * for driver use.
641  */
642 int can_request_irq(unsigned int irq, unsigned long irqflags)
643 {
644         unsigned long flags;
645         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
646         int canrequest = 0;
647
648         if (!desc)
649                 return 0;
650
651         if (irq_settings_can_request(desc)) {
652                 if (!desc->action ||
653                     irqflags & desc->action->flags & IRQF_SHARED)
654                         canrequest = 1;
655         }
656         irq_put_desc_unlock(desc, flags);
657         return canrequest;
658 }
659
660 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
661 {
662         struct irq_chip *chip = desc->irq_data.chip;
663         int ret, unmask = 0;
664
665         if (!chip || !chip->irq_set_type) {
666                 /*
667                  * IRQF_TRIGGER_* but the PIC does not support multiple
668                  * flow-types?
669                  */
670                 pr_debug("No set_type function for IRQ %d (%s)\n",
671                          irq_desc_get_irq(desc),
672                          chip ? (chip->name ? : "unknown") : "unknown");
673                 return 0;
674         }
675
676         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
677                 if (!irqd_irq_masked(&desc->irq_data))
678                         mask_irq(desc);
679                 if (!irqd_irq_disabled(&desc->irq_data))
680                         unmask = 1;
681         }
682
683         /* Mask all flags except trigger mode */
684         flags &= IRQ_TYPE_SENSE_MASK;
685         ret = chip->irq_set_type(&desc->irq_data, flags);
686
687         switch (ret) {
688         case IRQ_SET_MASK_OK:
689         case IRQ_SET_MASK_OK_DONE:
690                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
691                 irqd_set(&desc->irq_data, flags);
692
693         case IRQ_SET_MASK_OK_NOCOPY:
694                 flags = irqd_get_trigger_type(&desc->irq_data);
695                 irq_settings_set_trigger_mask(desc, flags);
696                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
697                 irq_settings_clr_level(desc);
698                 if (flags & IRQ_TYPE_LEVEL_MASK) {
699                         irq_settings_set_level(desc);
700                         irqd_set(&desc->irq_data, IRQD_LEVEL);
701                 }
702
703                 ret = 0;
704                 break;
705         default:
706                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
707                        flags, irq_desc_get_irq(desc), chip->irq_set_type);
708         }
709         if (unmask)
710                 unmask_irq(desc);
711         return ret;
712 }
713
714 #ifdef CONFIG_HARDIRQS_SW_RESEND
715 int irq_set_parent(int irq, int parent_irq)
716 {
717         unsigned long flags;
718         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
719
720         if (!desc)
721                 return -EINVAL;
722
723         desc->parent_irq = parent_irq;
724
725         irq_put_desc_unlock(desc, flags);
726         return 0;
727 }
728 EXPORT_SYMBOL_GPL(irq_set_parent);
729 #endif
730
731 /*
732  * Default primary interrupt handler for threaded interrupts. Is
733  * assigned as primary handler when request_threaded_irq is called
734  * with handler == NULL. Useful for oneshot interrupts.
735  */
736 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
737 {
738         return IRQ_WAKE_THREAD;
739 }
740
741 /*
742  * Primary handler for nested threaded interrupts. Should never be
743  * called.
744  */
745 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
746 {
747         WARN(1, "Primary handler called for nested irq %d\n", irq);
748         return IRQ_NONE;
749 }
750
751 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
752 {
753         WARN(1, "Secondary action handler called for irq %d\n", irq);
754         return IRQ_NONE;
755 }
756
757 static int irq_wait_for_interrupt(struct irqaction *action)
758 {
759         set_current_state(TASK_INTERRUPTIBLE);
760
761         while (!kthread_should_stop()) {
762
763                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
764                                        &action->thread_flags)) {
765                         __set_current_state(TASK_RUNNING);
766                         return 0;
767                 }
768                 schedule();
769                 set_current_state(TASK_INTERRUPTIBLE);
770         }
771         __set_current_state(TASK_RUNNING);
772         return -1;
773 }
774
775 /*
776  * Oneshot interrupts keep the irq line masked until the threaded
777  * handler finished. unmask if the interrupt has not been disabled and
778  * is marked MASKED.
779  */
780 static void irq_finalize_oneshot(struct irq_desc *desc,
781                                  struct irqaction *action)
782 {
783         if (!(desc->istate & IRQS_ONESHOT) ||
784             action->handler == irq_forced_secondary_handler)
785                 return;
786 again:
787         chip_bus_lock(desc);
788         raw_spin_lock_irq(&desc->lock);
789
790         /*
791          * Implausible though it may be we need to protect us against
792          * the following scenario:
793          *
794          * The thread is faster done than the hard interrupt handler
795          * on the other CPU. If we unmask the irq line then the
796          * interrupt can come in again and masks the line, leaves due
797          * to IRQS_INPROGRESS and the irq line is masked forever.
798          *
799          * This also serializes the state of shared oneshot handlers
800          * versus "desc->threads_onehsot |= action->thread_mask;" in
801          * irq_wake_thread(). See the comment there which explains the
802          * serialization.
803          */
804         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
805                 raw_spin_unlock_irq(&desc->lock);
806                 chip_bus_sync_unlock(desc);
807                 cpu_relax();
808                 goto again;
809         }
810
811         /*
812          * Now check again, whether the thread should run. Otherwise
813          * we would clear the threads_oneshot bit of this thread which
814          * was just set.
815          */
816         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
817                 goto out_unlock;
818
819         desc->threads_oneshot &= ~action->thread_mask;
820
821         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
822             irqd_irq_masked(&desc->irq_data))
823                 unmask_threaded_irq(desc);
824
825 out_unlock:
826         raw_spin_unlock_irq(&desc->lock);
827         chip_bus_sync_unlock(desc);
828 }
829
830 #ifdef CONFIG_SMP
831 /*
832  * Check whether we need to change the affinity of the interrupt thread.
833  */
834 static void
835 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
836 {
837         cpumask_var_t mask;
838         bool valid = true;
839
840         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
841                 return;
842
843         /*
844          * In case we are out of memory we set IRQTF_AFFINITY again and
845          * try again next time
846          */
847         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
848                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
849                 return;
850         }
851
852         raw_spin_lock_irq(&desc->lock);
853         /*
854          * This code is triggered unconditionally. Check the affinity
855          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
856          */
857         if (cpumask_available(desc->irq_common_data.affinity)) {
858                 const struct cpumask *m;
859
860                 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
861                 cpumask_copy(mask, m);
862         } else {
863                 valid = false;
864         }
865         raw_spin_unlock_irq(&desc->lock);
866
867         if (valid)
868                 set_cpus_allowed_ptr(current, mask);
869         free_cpumask_var(mask);
870 }
871 #else
872 static inline void
873 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
874 #endif
875
876 /*
877  * Interrupts which are not explicitely requested as threaded
878  * interrupts rely on the implicit bh/preempt disable of the hard irq
879  * context. So we need to disable bh here to avoid deadlocks and other
880  * side effects.
881  */
882 static irqreturn_t
883 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
884 {
885         irqreturn_t ret;
886
887         local_bh_disable();
888         ret = action->thread_fn(action->irq, action->dev_id);
889         irq_finalize_oneshot(desc, action);
890         local_bh_enable();
891         return ret;
892 }
893
894 /*
895  * Interrupts explicitly requested as threaded interrupts want to be
896  * preemtible - many of them need to sleep and wait for slow busses to
897  * complete.
898  */
899 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
900                 struct irqaction *action)
901 {
902         irqreturn_t ret;
903
904         ret = action->thread_fn(action->irq, action->dev_id);
905         irq_finalize_oneshot(desc, action);
906         return ret;
907 }
908
909 static void wake_threads_waitq(struct irq_desc *desc)
910 {
911         if (atomic_dec_and_test(&desc->threads_active))
912                 wake_up(&desc->wait_for_threads);
913 }
914
915 static void irq_thread_dtor(struct callback_head *unused)
916 {
917         struct task_struct *tsk = current;
918         struct irq_desc *desc;
919         struct irqaction *action;
920
921         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
922                 return;
923
924         action = kthread_data(tsk);
925
926         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
927                tsk->comm, tsk->pid, action->irq);
928
929
930         desc = irq_to_desc(action->irq);
931         /*
932          * If IRQTF_RUNTHREAD is set, we need to decrement
933          * desc->threads_active and wake possible waiters.
934          */
935         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
936                 wake_threads_waitq(desc);
937
938         /* Prevent a stale desc->threads_oneshot */
939         irq_finalize_oneshot(desc, action);
940 }
941
942 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
943 {
944         struct irqaction *secondary = action->secondary;
945
946         if (WARN_ON_ONCE(!secondary))
947                 return;
948
949         raw_spin_lock_irq(&desc->lock);
950         __irq_wake_thread(desc, secondary);
951         raw_spin_unlock_irq(&desc->lock);
952 }
953
954 /*
955  * Interrupt handler thread
956  */
957 static int irq_thread(void *data)
958 {
959         struct callback_head on_exit_work;
960         struct irqaction *action = data;
961         struct irq_desc *desc = irq_to_desc(action->irq);
962         irqreturn_t (*handler_fn)(struct irq_desc *desc,
963                         struct irqaction *action);
964
965         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
966                                         &action->thread_flags))
967                 handler_fn = irq_forced_thread_fn;
968         else
969                 handler_fn = irq_thread_fn;
970
971         init_task_work(&on_exit_work, irq_thread_dtor);
972         task_work_add(current, &on_exit_work, false);
973
974         irq_thread_check_affinity(desc, action);
975
976         while (!irq_wait_for_interrupt(action)) {
977                 irqreturn_t action_ret;
978
979                 irq_thread_check_affinity(desc, action);
980
981                 action_ret = handler_fn(desc, action);
982                 if (action_ret == IRQ_HANDLED)
983                         atomic_inc(&desc->threads_handled);
984                 if (action_ret == IRQ_WAKE_THREAD)
985                         irq_wake_secondary(desc, action);
986
987                 wake_threads_waitq(desc);
988         }
989
990         /*
991          * This is the regular exit path. __free_irq() is stopping the
992          * thread via kthread_stop() after calling
993          * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
994          * oneshot mask bit can be set. We cannot verify that as we
995          * cannot touch the oneshot mask at this point anymore as
996          * __setup_irq() might have given out currents thread_mask
997          * again.
998          */
999         task_work_cancel(current, irq_thread_dtor);
1000         return 0;
1001 }
1002
1003 /**
1004  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
1005  *      @irq:           Interrupt line
1006  *      @dev_id:        Device identity for which the thread should be woken
1007  *
1008  */
1009 void irq_wake_thread(unsigned int irq, void *dev_id)
1010 {
1011         struct irq_desc *desc = irq_to_desc(irq);
1012         struct irqaction *action;
1013         unsigned long flags;
1014
1015         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1016                 return;
1017
1018         raw_spin_lock_irqsave(&desc->lock, flags);
1019         for_each_action_of_desc(desc, action) {
1020                 if (action->dev_id == dev_id) {
1021                         if (action->thread)
1022                                 __irq_wake_thread(desc, action);
1023                         break;
1024                 }
1025         }
1026         raw_spin_unlock_irqrestore(&desc->lock, flags);
1027 }
1028 EXPORT_SYMBOL_GPL(irq_wake_thread);
1029
1030 static int irq_setup_forced_threading(struct irqaction *new)
1031 {
1032         if (!force_irqthreads)
1033                 return 0;
1034         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1035                 return 0;
1036
1037         new->flags |= IRQF_ONESHOT;
1038
1039         /*
1040          * Handle the case where we have a real primary handler and a
1041          * thread handler. We force thread them as well by creating a
1042          * secondary action.
1043          */
1044         if (new->handler != irq_default_primary_handler && new->thread_fn) {
1045                 /* Allocate the secondary action */
1046                 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1047                 if (!new->secondary)
1048                         return -ENOMEM;
1049                 new->secondary->handler = irq_forced_secondary_handler;
1050                 new->secondary->thread_fn = new->thread_fn;
1051                 new->secondary->dev_id = new->dev_id;
1052                 new->secondary->irq = new->irq;
1053                 new->secondary->name = new->name;
1054         }
1055         /* Deal with the primary handler */
1056         set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1057         new->thread_fn = new->handler;
1058         new->handler = irq_default_primary_handler;
1059         return 0;
1060 }
1061
1062 static int irq_request_resources(struct irq_desc *desc)
1063 {
1064         struct irq_data *d = &desc->irq_data;
1065         struct irq_chip *c = d->chip;
1066
1067         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1068 }
1069
1070 static void irq_release_resources(struct irq_desc *desc)
1071 {
1072         struct irq_data *d = &desc->irq_data;
1073         struct irq_chip *c = d->chip;
1074
1075         if (c->irq_release_resources)
1076                 c->irq_release_resources(d);
1077 }
1078
1079 static int
1080 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1081 {
1082         struct task_struct *t;
1083         struct sched_param param = {
1084                 .sched_priority = MAX_USER_RT_PRIO/2,
1085         };
1086
1087         if (!secondary) {
1088                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1089                                    new->name);
1090         } else {
1091                 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1092                                    new->name);
1093                 param.sched_priority -= 1;
1094         }
1095
1096         if (IS_ERR(t))
1097                 return PTR_ERR(t);
1098
1099         sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1100
1101         /*
1102          * We keep the reference to the task struct even if
1103          * the thread dies to avoid that the interrupt code
1104          * references an already freed task_struct.
1105          */
1106         get_task_struct(t);
1107         new->thread = t;
1108         /*
1109          * Tell the thread to set its affinity. This is
1110          * important for shared interrupt handlers as we do
1111          * not invoke setup_affinity() for the secondary
1112          * handlers as everything is already set up. Even for
1113          * interrupts marked with IRQF_NO_BALANCE this is
1114          * correct as we want the thread to move to the cpu(s)
1115          * on which the requesting code placed the interrupt.
1116          */
1117         set_bit(IRQTF_AFFINITY, &new->thread_flags);
1118         return 0;
1119 }
1120
1121 /*
1122  * Internal function to register an irqaction - typically used to
1123  * allocate special interrupts that are part of the architecture.
1124  *
1125  * Locking rules:
1126  *
1127  * desc->request_mutex  Provides serialization against a concurrent free_irq()
1128  *   chip_bus_lock      Provides serialization for slow bus operations
1129  *     desc->lock       Provides serialization against hard interrupts
1130  *
1131  * chip_bus_lock and desc->lock are sufficient for all other management and
1132  * interrupt related functions. desc->request_mutex solely serializes
1133  * request/free_irq().
1134  */
1135 static int
1136 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1137 {
1138         struct irqaction *old, **old_ptr;
1139         unsigned long flags, thread_mask = 0;
1140         int ret, nested, shared = 0;
1141
1142         if (!desc)
1143                 return -EINVAL;
1144
1145         if (desc->irq_data.chip == &no_irq_chip)
1146                 return -ENOSYS;
1147         if (!try_module_get(desc->owner))
1148                 return -ENODEV;
1149
1150         new->irq = irq;
1151
1152         /*
1153          * If the trigger type is not specified by the caller,
1154          * then use the default for this interrupt.
1155          */
1156         if (!(new->flags & IRQF_TRIGGER_MASK))
1157                 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1158
1159         /*
1160          * Check whether the interrupt nests into another interrupt
1161          * thread.
1162          */
1163         nested = irq_settings_is_nested_thread(desc);
1164         if (nested) {
1165                 if (!new->thread_fn) {
1166                         ret = -EINVAL;
1167                         goto out_mput;
1168                 }
1169                 /*
1170                  * Replace the primary handler which was provided from
1171                  * the driver for non nested interrupt handling by the
1172                  * dummy function which warns when called.
1173                  */
1174                 new->handler = irq_nested_primary_handler;
1175         } else {
1176                 if (irq_settings_can_thread(desc)) {
1177                         ret = irq_setup_forced_threading(new);
1178                         if (ret)
1179                                 goto out_mput;
1180                 }
1181         }
1182
1183         /*
1184          * Create a handler thread when a thread function is supplied
1185          * and the interrupt does not nest into another interrupt
1186          * thread.
1187          */
1188         if (new->thread_fn && !nested) {
1189                 ret = setup_irq_thread(new, irq, false);
1190                 if (ret)
1191                         goto out_mput;
1192                 if (new->secondary) {
1193                         ret = setup_irq_thread(new->secondary, irq, true);
1194                         if (ret)
1195                                 goto out_thread;
1196                 }
1197         }
1198
1199         /*
1200          * Drivers are often written to work w/o knowledge about the
1201          * underlying irq chip implementation, so a request for a
1202          * threaded irq without a primary hard irq context handler
1203          * requires the ONESHOT flag to be set. Some irq chips like
1204          * MSI based interrupts are per se one shot safe. Check the
1205          * chip flags, so we can avoid the unmask dance at the end of
1206          * the threaded handler for those.
1207          */
1208         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1209                 new->flags &= ~IRQF_ONESHOT;
1210
1211         /*
1212          * Protects against a concurrent __free_irq() call which might wait
1213          * for synchronize_irq() to complete without holding the optional
1214          * chip bus lock and desc->lock.
1215          */
1216         mutex_lock(&desc->request_mutex);
1217
1218         /*
1219          * Acquire bus lock as the irq_request_resources() callback below
1220          * might rely on the serialization or the magic power management
1221          * functions which are abusing the irq_bus_lock() callback,
1222          */
1223         chip_bus_lock(desc);
1224
1225         /* First installed action requests resources. */
1226         if (!desc->action) {
1227                 ret = irq_request_resources(desc);
1228                 if (ret) {
1229                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1230                                new->name, irq, desc->irq_data.chip->name);
1231                         goto out_bus_unlock;
1232                 }
1233         }
1234
1235         /*
1236          * The following block of code has to be executed atomically
1237          * protected against a concurrent interrupt and any of the other
1238          * management calls which are not serialized via
1239          * desc->request_mutex or the optional bus lock.
1240          */
1241         raw_spin_lock_irqsave(&desc->lock, flags);
1242         old_ptr = &desc->action;
1243         old = *old_ptr;
1244         if (old) {
1245                 /*
1246                  * Can't share interrupts unless both agree to and are
1247                  * the same type (level, edge, polarity). So both flag
1248                  * fields must have IRQF_SHARED set and the bits which
1249                  * set the trigger type must match. Also all must
1250                  * agree on ONESHOT.
1251                  */
1252                 unsigned int oldtype;
1253
1254                 /*
1255                  * If nobody did set the configuration before, inherit
1256                  * the one provided by the requester.
1257                  */
1258                 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1259                         oldtype = irqd_get_trigger_type(&desc->irq_data);
1260                 } else {
1261                         oldtype = new->flags & IRQF_TRIGGER_MASK;
1262                         irqd_set_trigger_type(&desc->irq_data, oldtype);
1263                 }
1264
1265                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1266                     (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1267                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1268                         goto mismatch;
1269
1270                 /* All handlers must agree on per-cpuness */
1271                 if ((old->flags & IRQF_PERCPU) !=
1272                     (new->flags & IRQF_PERCPU))
1273                         goto mismatch;
1274
1275                 /* add new interrupt at end of irq queue */
1276                 do {
1277                         /*
1278                          * Or all existing action->thread_mask bits,
1279                          * so we can find the next zero bit for this
1280                          * new action.
1281                          */
1282                         thread_mask |= old->thread_mask;
1283                         old_ptr = &old->next;
1284                         old = *old_ptr;
1285                 } while (old);
1286                 shared = 1;
1287         }
1288
1289         /*
1290          * Setup the thread mask for this irqaction for ONESHOT. For
1291          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1292          * conditional in irq_wake_thread().
1293          */
1294         if (new->flags & IRQF_ONESHOT) {
1295                 /*
1296                  * Unlikely to have 32 resp 64 irqs sharing one line,
1297                  * but who knows.
1298                  */
1299                 if (thread_mask == ~0UL) {
1300                         ret = -EBUSY;
1301                         goto out_unlock;
1302                 }
1303                 /*
1304                  * The thread_mask for the action is or'ed to
1305                  * desc->thread_active to indicate that the
1306                  * IRQF_ONESHOT thread handler has been woken, but not
1307                  * yet finished. The bit is cleared when a thread
1308                  * completes. When all threads of a shared interrupt
1309                  * line have completed desc->threads_active becomes
1310                  * zero and the interrupt line is unmasked. See
1311                  * handle.c:irq_wake_thread() for further information.
1312                  *
1313                  * If no thread is woken by primary (hard irq context)
1314                  * interrupt handlers, then desc->threads_active is
1315                  * also checked for zero to unmask the irq line in the
1316                  * affected hard irq flow handlers
1317                  * (handle_[fasteoi|level]_irq).
1318                  *
1319                  * The new action gets the first zero bit of
1320                  * thread_mask assigned. See the loop above which or's
1321                  * all existing action->thread_mask bits.
1322                  */
1323                 new->thread_mask = 1UL << ffz(thread_mask);
1324
1325         } else if (new->handler == irq_default_primary_handler &&
1326                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1327                 /*
1328                  * The interrupt was requested with handler = NULL, so
1329                  * we use the default primary handler for it. But it
1330                  * does not have the oneshot flag set. In combination
1331                  * with level interrupts this is deadly, because the
1332                  * default primary handler just wakes the thread, then
1333                  * the irq lines is reenabled, but the device still
1334                  * has the level irq asserted. Rinse and repeat....
1335                  *
1336                  * While this works for edge type interrupts, we play
1337                  * it safe and reject unconditionally because we can't
1338                  * say for sure which type this interrupt really
1339                  * has. The type flags are unreliable as the
1340                  * underlying chip implementation can override them.
1341                  */
1342                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1343                        irq);
1344                 ret = -EINVAL;
1345                 goto out_unlock;
1346         }
1347
1348         if (!shared) {
1349                 init_waitqueue_head(&desc->wait_for_threads);
1350
1351                 /* Setup the type (level, edge polarity) if configured: */
1352                 if (new->flags & IRQF_TRIGGER_MASK) {
1353                         ret = __irq_set_trigger(desc,
1354                                                 new->flags & IRQF_TRIGGER_MASK);
1355
1356                         if (ret)
1357                                 goto out_unlock;
1358                 }
1359
1360                 /*
1361                  * Activate the interrupt. That activation must happen
1362                  * independently of IRQ_NOAUTOEN. request_irq() can fail
1363                  * and the callers are supposed to handle
1364                  * that. enable_irq() of an interrupt requested with
1365                  * IRQ_NOAUTOEN is not supposed to fail. The activation
1366                  * keeps it in shutdown mode, it merily associates
1367                  * resources if necessary and if that's not possible it
1368                  * fails. Interrupts which are in managed shutdown mode
1369                  * will simply ignore that activation request.
1370                  */
1371                 ret = irq_activate(desc);
1372                 if (ret)
1373                         goto out_unlock;
1374
1375                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1376                                   IRQS_ONESHOT | IRQS_WAITING);
1377                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1378
1379                 if (new->flags & IRQF_PERCPU) {
1380                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1381                         irq_settings_set_per_cpu(desc);
1382                 }
1383
1384                 if (new->flags & IRQF_ONESHOT)
1385                         desc->istate |= IRQS_ONESHOT;
1386
1387                 /* Exclude IRQ from balancing if requested */
1388                 if (new->flags & IRQF_NOBALANCING) {
1389                         irq_settings_set_no_balancing(desc);
1390                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1391                 }
1392
1393                 if (irq_settings_can_autoenable(desc)) {
1394                         irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1395                 } else {
1396                         /*
1397                          * Shared interrupts do not go well with disabling
1398                          * auto enable. The sharing interrupt might request
1399                          * it while it's still disabled and then wait for
1400                          * interrupts forever.
1401                          */
1402                         WARN_ON_ONCE(new->flags & IRQF_SHARED);
1403                         /* Undo nested disables: */
1404                         desc->depth = 1;
1405                 }
1406
1407         } else if (new->flags & IRQF_TRIGGER_MASK) {
1408                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1409                 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1410
1411                 if (nmsk != omsk)
1412                         /* hope the handler works with current  trigger mode */
1413                         pr_warn("irq %d uses trigger mode %u; requested %u\n",
1414                                 irq, omsk, nmsk);
1415         }
1416
1417         *old_ptr = new;
1418
1419         irq_pm_install_action(desc, new);
1420
1421         /* Reset broken irq detection when installing new handler */
1422         desc->irq_count = 0;
1423         desc->irqs_unhandled = 0;
1424
1425         /*
1426          * Check whether we disabled the irq via the spurious handler
1427          * before. Reenable it and give it another chance.
1428          */
1429         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1430                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1431                 __enable_irq(desc);
1432         }
1433
1434         raw_spin_unlock_irqrestore(&desc->lock, flags);
1435         chip_bus_sync_unlock(desc);
1436         mutex_unlock(&desc->request_mutex);
1437
1438         irq_setup_timings(desc, new);
1439
1440         /*
1441          * Strictly no need to wake it up, but hung_task complains
1442          * when no hard interrupt wakes the thread up.
1443          */
1444         if (new->thread)
1445                 wake_up_process(new->thread);
1446         if (new->secondary)
1447                 wake_up_process(new->secondary->thread);
1448
1449         register_irq_proc(irq, desc);
1450         new->dir = NULL;
1451         register_handler_proc(irq, new);
1452         return 0;
1453
1454 mismatch:
1455         if (!(new->flags & IRQF_PROBE_SHARED)) {
1456                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1457                        irq, new->flags, new->name, old->flags, old->name);
1458 #ifdef CONFIG_DEBUG_SHIRQ
1459                 dump_stack();
1460 #endif
1461         }
1462         ret = -EBUSY;
1463
1464 out_unlock:
1465         raw_spin_unlock_irqrestore(&desc->lock, flags);
1466
1467         if (!desc->action)
1468                 irq_release_resources(desc);
1469 out_bus_unlock:
1470         chip_bus_sync_unlock(desc);
1471         mutex_unlock(&desc->request_mutex);
1472
1473 out_thread:
1474         if (new->thread) {
1475                 struct task_struct *t = new->thread;
1476
1477                 new->thread = NULL;
1478                 kthread_stop(t);
1479                 put_task_struct(t);
1480         }
1481         if (new->secondary && new->secondary->thread) {
1482                 struct task_struct *t = new->secondary->thread;
1483
1484                 new->secondary->thread = NULL;
1485                 kthread_stop(t);
1486                 put_task_struct(t);
1487         }
1488 out_mput:
1489         module_put(desc->owner);
1490         return ret;
1491 }
1492
1493 /**
1494  *      setup_irq - setup an interrupt
1495  *      @irq: Interrupt line to setup
1496  *      @act: irqaction for the interrupt
1497  *
1498  * Used to statically setup interrupts in the early boot process.
1499  */
1500 int setup_irq(unsigned int irq, struct irqaction *act)
1501 {
1502         int retval;
1503         struct irq_desc *desc = irq_to_desc(irq);
1504
1505         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1506                 return -EINVAL;
1507
1508         retval = irq_chip_pm_get(&desc->irq_data);
1509         if (retval < 0)
1510                 return retval;
1511
1512         retval = __setup_irq(irq, desc, act);
1513
1514         if (retval)
1515                 irq_chip_pm_put(&desc->irq_data);
1516
1517         return retval;
1518 }
1519 EXPORT_SYMBOL_GPL(setup_irq);
1520
1521 /*
1522  * Internal function to unregister an irqaction - used to free
1523  * regular and special interrupts that are part of the architecture.
1524  */
1525 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1526 {
1527         unsigned irq = desc->irq_data.irq;
1528         struct irqaction *action, **action_ptr;
1529         unsigned long flags;
1530
1531         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1532
1533         if (!desc)
1534                 return NULL;
1535
1536         mutex_lock(&desc->request_mutex);
1537         chip_bus_lock(desc);
1538         raw_spin_lock_irqsave(&desc->lock, flags);
1539
1540         /*
1541          * There can be multiple actions per IRQ descriptor, find the right
1542          * one based on the dev_id:
1543          */
1544         action_ptr = &desc->action;
1545         for (;;) {
1546                 action = *action_ptr;
1547
1548                 if (!action) {
1549                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1550                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1551                         chip_bus_sync_unlock(desc);
1552                         mutex_unlock(&desc->request_mutex);
1553                         return NULL;
1554                 }
1555
1556                 if (action->dev_id == dev_id)
1557                         break;
1558                 action_ptr = &action->next;
1559         }
1560
1561         /* Found it - now remove it from the list of entries: */
1562         *action_ptr = action->next;
1563
1564         irq_pm_remove_action(desc, action);
1565
1566         /* If this was the last handler, shut down the IRQ line: */
1567         if (!desc->action) {
1568                 irq_settings_clr_disable_unlazy(desc);
1569                 irq_shutdown(desc);
1570         }
1571
1572 #ifdef CONFIG_SMP
1573         /* make sure affinity_hint is cleaned up */
1574         if (WARN_ON_ONCE(desc->affinity_hint))
1575                 desc->affinity_hint = NULL;
1576 #endif
1577
1578         raw_spin_unlock_irqrestore(&desc->lock, flags);
1579         /*
1580          * Drop bus_lock here so the changes which were done in the chip
1581          * callbacks above are synced out to the irq chips which hang
1582          * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1583          *
1584          * Aside of that the bus_lock can also be taken from the threaded
1585          * handler in irq_finalize_oneshot() which results in a deadlock
1586          * because synchronize_irq() would wait forever for the thread to
1587          * complete, which is blocked on the bus lock.
1588          *
1589          * The still held desc->request_mutex() protects against a
1590          * concurrent request_irq() of this irq so the release of resources
1591          * and timing data is properly serialized.
1592          */
1593         chip_bus_sync_unlock(desc);
1594
1595         unregister_handler_proc(irq, action);
1596
1597         /* Make sure it's not being used on another CPU: */
1598         synchronize_irq(irq);
1599
1600 #ifdef CONFIG_DEBUG_SHIRQ
1601         /*
1602          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1603          * event to happen even now it's being freed, so let's make sure that
1604          * is so by doing an extra call to the handler ....
1605          *
1606          * ( We do this after actually deregistering it, to make sure that a
1607          *   'real' IRQ doesn't run in * parallel with our fake. )
1608          */
1609         if (action->flags & IRQF_SHARED) {
1610                 local_irq_save(flags);
1611                 action->handler(irq, dev_id);
1612                 local_irq_restore(flags);
1613         }
1614 #endif
1615
1616         if (action->thread) {
1617                 kthread_stop(action->thread);
1618                 put_task_struct(action->thread);
1619                 if (action->secondary && action->secondary->thread) {
1620                         kthread_stop(action->secondary->thread);
1621                         put_task_struct(action->secondary->thread);
1622                 }
1623         }
1624
1625         /* Last action releases resources */
1626         if (!desc->action) {
1627                 /*
1628                  * Reaquire bus lock as irq_release_resources() might
1629                  * require it to deallocate resources over the slow bus.
1630                  */
1631                 chip_bus_lock(desc);
1632                 irq_release_resources(desc);
1633                 chip_bus_sync_unlock(desc);
1634                 irq_remove_timings(desc);
1635         }
1636
1637         mutex_unlock(&desc->request_mutex);
1638
1639         irq_chip_pm_put(&desc->irq_data);
1640         module_put(desc->owner);
1641         kfree(action->secondary);
1642         return action;
1643 }
1644
1645 /**
1646  *      remove_irq - free an interrupt
1647  *      @irq: Interrupt line to free
1648  *      @act: irqaction for the interrupt
1649  *
1650  * Used to remove interrupts statically setup by the early boot process.
1651  */
1652 void remove_irq(unsigned int irq, struct irqaction *act)
1653 {
1654         struct irq_desc *desc = irq_to_desc(irq);
1655
1656         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1657                 __free_irq(desc, act->dev_id);
1658 }
1659 EXPORT_SYMBOL_GPL(remove_irq);
1660
1661 /**
1662  *      free_irq - free an interrupt allocated with request_irq
1663  *      @irq: Interrupt line to free
1664  *      @dev_id: Device identity to free
1665  *
1666  *      Remove an interrupt handler. The handler is removed and if the
1667  *      interrupt line is no longer in use by any driver it is disabled.
1668  *      On a shared IRQ the caller must ensure the interrupt is disabled
1669  *      on the card it drives before calling this function. The function
1670  *      does not return until any executing interrupts for this IRQ
1671  *      have completed.
1672  *
1673  *      This function must not be called from interrupt context.
1674  *
1675  *      Returns the devname argument passed to request_irq.
1676  */
1677 const void *free_irq(unsigned int irq, void *dev_id)
1678 {
1679         struct irq_desc *desc = irq_to_desc(irq);
1680         struct irqaction *action;
1681         const char *devname;
1682
1683         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1684                 return NULL;
1685
1686 #ifdef CONFIG_SMP
1687         if (WARN_ON(desc->affinity_notify))
1688                 desc->affinity_notify = NULL;
1689 #endif
1690
1691         action = __free_irq(desc, dev_id);
1692
1693         if (!action)
1694                 return NULL;
1695
1696         devname = action->name;
1697         kfree(action);
1698         return devname;
1699 }
1700 EXPORT_SYMBOL(free_irq);
1701
1702 /**
1703  *      request_threaded_irq - allocate an interrupt line
1704  *      @irq: Interrupt line to allocate
1705  *      @handler: Function to be called when the IRQ occurs.
1706  *                Primary handler for threaded interrupts
1707  *                If NULL and thread_fn != NULL the default
1708  *                primary handler is installed
1709  *      @thread_fn: Function called from the irq handler thread
1710  *                  If NULL, no irq thread is created
1711  *      @irqflags: Interrupt type flags
1712  *      @devname: An ascii name for the claiming device
1713  *      @dev_id: A cookie passed back to the handler function
1714  *
1715  *      This call allocates interrupt resources and enables the
1716  *      interrupt line and IRQ handling. From the point this
1717  *      call is made your handler function may be invoked. Since
1718  *      your handler function must clear any interrupt the board
1719  *      raises, you must take care both to initialise your hardware
1720  *      and to set up the interrupt handler in the right order.
1721  *
1722  *      If you want to set up a threaded irq handler for your device
1723  *      then you need to supply @handler and @thread_fn. @handler is
1724  *      still called in hard interrupt context and has to check
1725  *      whether the interrupt originates from the device. If yes it
1726  *      needs to disable the interrupt on the device and return
1727  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1728  *      @thread_fn. This split handler design is necessary to support
1729  *      shared interrupts.
1730  *
1731  *      Dev_id must be globally unique. Normally the address of the
1732  *      device data structure is used as the cookie. Since the handler
1733  *      receives this value it makes sense to use it.
1734  *
1735  *      If your interrupt is shared you must pass a non NULL dev_id
1736  *      as this is required when freeing the interrupt.
1737  *
1738  *      Flags:
1739  *
1740  *      IRQF_SHARED             Interrupt is shared
1741  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1742  *
1743  */
1744 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1745                          irq_handler_t thread_fn, unsigned long irqflags,
1746                          const char *devname, void *dev_id)
1747 {
1748         struct irqaction *action;
1749         struct irq_desc *desc;
1750         int retval;
1751
1752         if (irq == IRQ_NOTCONNECTED)
1753                 return -ENOTCONN;
1754
1755         /*
1756          * Sanity-check: shared interrupts must pass in a real dev-ID,
1757          * otherwise we'll have trouble later trying to figure out
1758          * which interrupt is which (messes up the interrupt freeing
1759          * logic etc).
1760          *
1761          * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1762          * it cannot be set along with IRQF_NO_SUSPEND.
1763          */
1764         if (((irqflags & IRQF_SHARED) && !dev_id) ||
1765             (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1766             ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1767                 return -EINVAL;
1768
1769         desc = irq_to_desc(irq);
1770         if (!desc)
1771                 return -EINVAL;
1772
1773         if (!irq_settings_can_request(desc) ||
1774             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1775                 return -EINVAL;
1776
1777         if (!handler) {
1778                 if (!thread_fn)
1779                         return -EINVAL;
1780                 handler = irq_default_primary_handler;
1781         }
1782
1783         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1784         if (!action)
1785                 return -ENOMEM;
1786
1787         action->handler = handler;
1788         action->thread_fn = thread_fn;
1789         action->flags = irqflags;
1790         action->name = devname;
1791         action->dev_id = dev_id;
1792
1793         retval = irq_chip_pm_get(&desc->irq_data);
1794         if (retval < 0) {
1795                 kfree(action);
1796                 return retval;
1797         }
1798
1799         retval = __setup_irq(irq, desc, action);
1800
1801         if (retval) {
1802                 irq_chip_pm_put(&desc->irq_data);
1803                 kfree(action->secondary);
1804                 kfree(action);
1805         }
1806
1807 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1808         if (!retval && (irqflags & IRQF_SHARED)) {
1809                 /*
1810                  * It's a shared IRQ -- the driver ought to be prepared for it
1811                  * to happen immediately, so let's make sure....
1812                  * We disable the irq to make sure that a 'real' IRQ doesn't
1813                  * run in parallel with our fake.
1814                  */
1815                 unsigned long flags;
1816
1817                 disable_irq(irq);
1818                 local_irq_save(flags);
1819
1820                 handler(irq, dev_id);
1821
1822                 local_irq_restore(flags);
1823                 enable_irq(irq);
1824         }
1825 #endif
1826         return retval;
1827 }
1828 EXPORT_SYMBOL(request_threaded_irq);
1829
1830 /**
1831  *      request_any_context_irq - allocate an interrupt line
1832  *      @irq: Interrupt line to allocate
1833  *      @handler: Function to be called when the IRQ occurs.
1834  *                Threaded handler for threaded interrupts.
1835  *      @flags: Interrupt type flags
1836  *      @name: An ascii name for the claiming device
1837  *      @dev_id: A cookie passed back to the handler function
1838  *
1839  *      This call allocates interrupt resources and enables the
1840  *      interrupt line and IRQ handling. It selects either a
1841  *      hardirq or threaded handling method depending on the
1842  *      context.
1843  *
1844  *      On failure, it returns a negative value. On success,
1845  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1846  */
1847 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1848                             unsigned long flags, const char *name, void *dev_id)
1849 {
1850         struct irq_desc *desc;
1851         int ret;
1852
1853         if (irq == IRQ_NOTCONNECTED)
1854                 return -ENOTCONN;
1855
1856         desc = irq_to_desc(irq);
1857         if (!desc)
1858                 return -EINVAL;
1859
1860         if (irq_settings_is_nested_thread(desc)) {
1861                 ret = request_threaded_irq(irq, NULL, handler,
1862                                            flags, name, dev_id);
1863                 return !ret ? IRQC_IS_NESTED : ret;
1864         }
1865
1866         ret = request_irq(irq, handler, flags, name, dev_id);
1867         return !ret ? IRQC_IS_HARDIRQ : ret;
1868 }
1869 EXPORT_SYMBOL_GPL(request_any_context_irq);
1870
1871 void enable_percpu_irq(unsigned int irq, unsigned int type)
1872 {
1873         unsigned int cpu = smp_processor_id();
1874         unsigned long flags;
1875         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1876
1877         if (!desc)
1878                 return;
1879
1880         /*
1881          * If the trigger type is not specified by the caller, then
1882          * use the default for this interrupt.
1883          */
1884         type &= IRQ_TYPE_SENSE_MASK;
1885         if (type == IRQ_TYPE_NONE)
1886                 type = irqd_get_trigger_type(&desc->irq_data);
1887
1888         if (type != IRQ_TYPE_NONE) {
1889                 int ret;
1890
1891                 ret = __irq_set_trigger(desc, type);
1892
1893                 if (ret) {
1894                         WARN(1, "failed to set type for IRQ%d\n", irq);
1895                         goto out;
1896                 }
1897         }
1898
1899         irq_percpu_enable(desc, cpu);
1900 out:
1901         irq_put_desc_unlock(desc, flags);
1902 }
1903 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1904
1905 /**
1906  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1907  * @irq:        Linux irq number to check for
1908  *
1909  * Must be called from a non migratable context. Returns the enable
1910  * state of a per cpu interrupt on the current cpu.
1911  */
1912 bool irq_percpu_is_enabled(unsigned int irq)
1913 {
1914         unsigned int cpu = smp_processor_id();
1915         struct irq_desc *desc;
1916         unsigned long flags;
1917         bool is_enabled;
1918
1919         desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1920         if (!desc)
1921                 return false;
1922
1923         is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1924         irq_put_desc_unlock(desc, flags);
1925
1926         return is_enabled;
1927 }
1928 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1929
1930 void disable_percpu_irq(unsigned int irq)
1931 {
1932         unsigned int cpu = smp_processor_id();
1933         unsigned long flags;
1934         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1935
1936         if (!desc)
1937                 return;
1938
1939         irq_percpu_disable(desc, cpu);
1940         irq_put_desc_unlock(desc, flags);
1941 }
1942 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1943
1944 /*
1945  * Internal function to unregister a percpu irqaction.
1946  */
1947 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1948 {
1949         struct irq_desc *desc = irq_to_desc(irq);
1950         struct irqaction *action;
1951         unsigned long flags;
1952
1953         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1954
1955         if (!desc)
1956                 return NULL;
1957
1958         raw_spin_lock_irqsave(&desc->lock, flags);
1959
1960         action = desc->action;
1961         if (!action || action->percpu_dev_id != dev_id) {
1962                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1963                 goto bad;
1964         }
1965
1966         if (!cpumask_empty(desc->percpu_enabled)) {
1967                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1968                      irq, cpumask_first(desc->percpu_enabled));
1969                 goto bad;
1970         }
1971
1972         /* Found it - now remove it from the list of entries: */
1973         desc->action = NULL;
1974
1975         raw_spin_unlock_irqrestore(&desc->lock, flags);
1976
1977         unregister_handler_proc(irq, action);
1978
1979         irq_chip_pm_put(&desc->irq_data);
1980         module_put(desc->owner);
1981         return action;
1982
1983 bad:
1984         raw_spin_unlock_irqrestore(&desc->lock, flags);
1985         return NULL;
1986 }
1987
1988 /**
1989  *      remove_percpu_irq - free a per-cpu interrupt
1990  *      @irq: Interrupt line to free
1991  *      @act: irqaction for the interrupt
1992  *
1993  * Used to remove interrupts statically setup by the early boot process.
1994  */
1995 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1996 {
1997         struct irq_desc *desc = irq_to_desc(irq);
1998
1999         if (desc && irq_settings_is_per_cpu_devid(desc))
2000             __free_percpu_irq(irq, act->percpu_dev_id);
2001 }
2002
2003 /**
2004  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
2005  *      @irq: Interrupt line to free
2006  *      @dev_id: Device identity to free
2007  *
2008  *      Remove a percpu interrupt handler. The handler is removed, but
2009  *      the interrupt line is not disabled. This must be done on each
2010  *      CPU before calling this function. The function does not return
2011  *      until any executing interrupts for this IRQ have completed.
2012  *
2013  *      This function must not be called from interrupt context.
2014  */
2015 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2016 {
2017         struct irq_desc *desc = irq_to_desc(irq);
2018
2019         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2020                 return;
2021
2022         chip_bus_lock(desc);
2023         kfree(__free_percpu_irq(irq, dev_id));
2024         chip_bus_sync_unlock(desc);
2025 }
2026 EXPORT_SYMBOL_GPL(free_percpu_irq);
2027
2028 /**
2029  *      setup_percpu_irq - setup a per-cpu interrupt
2030  *      @irq: Interrupt line to setup
2031  *      @act: irqaction for the interrupt
2032  *
2033  * Used to statically setup per-cpu interrupts in the early boot process.
2034  */
2035 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2036 {
2037         struct irq_desc *desc = irq_to_desc(irq);
2038         int retval;
2039
2040         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2041                 return -EINVAL;
2042
2043         retval = irq_chip_pm_get(&desc->irq_data);
2044         if (retval < 0)
2045                 return retval;
2046
2047         retval = __setup_irq(irq, desc, act);
2048
2049         if (retval)
2050                 irq_chip_pm_put(&desc->irq_data);
2051
2052         return retval;
2053 }
2054
2055 /**
2056  *      __request_percpu_irq - allocate a percpu interrupt line
2057  *      @irq: Interrupt line to allocate
2058  *      @handler: Function to be called when the IRQ occurs.
2059  *      @flags: Interrupt type flags (IRQF_TIMER only)
2060  *      @devname: An ascii name for the claiming device
2061  *      @dev_id: A percpu cookie passed back to the handler function
2062  *
2063  *      This call allocates interrupt resources and enables the
2064  *      interrupt on the local CPU. If the interrupt is supposed to be
2065  *      enabled on other CPUs, it has to be done on each CPU using
2066  *      enable_percpu_irq().
2067  *
2068  *      Dev_id must be globally unique. It is a per-cpu variable, and
2069  *      the handler gets called with the interrupted CPU's instance of
2070  *      that variable.
2071  */
2072 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2073                          unsigned long flags, const char *devname,
2074                          void __percpu *dev_id)
2075 {
2076         struct irqaction *action;
2077         struct irq_desc *desc;
2078         int retval;
2079
2080         if (!dev_id)
2081                 return -EINVAL;
2082
2083         desc = irq_to_desc(irq);
2084         if (!desc || !irq_settings_can_request(desc) ||
2085             !irq_settings_is_per_cpu_devid(desc))
2086                 return -EINVAL;
2087
2088         if (flags && flags != IRQF_TIMER)
2089                 return -EINVAL;
2090
2091         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2092         if (!action)
2093                 return -ENOMEM;
2094
2095         action->handler = handler;
2096         action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2097         action->name = devname;
2098         action->percpu_dev_id = dev_id;
2099
2100         retval = irq_chip_pm_get(&desc->irq_data);
2101         if (retval < 0) {
2102                 kfree(action);
2103                 return retval;
2104         }
2105
2106         retval = __setup_irq(irq, desc, action);
2107
2108         if (retval) {
2109                 irq_chip_pm_put(&desc->irq_data);
2110                 kfree(action);
2111         }
2112
2113         return retval;
2114 }
2115 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2116
2117 /**
2118  *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
2119  *      @irq: Interrupt line that is forwarded to a VM
2120  *      @which: One of IRQCHIP_STATE_* the caller wants to know about
2121  *      @state: a pointer to a boolean where the state is to be storeed
2122  *
2123  *      This call snapshots the internal irqchip state of an
2124  *      interrupt, returning into @state the bit corresponding to
2125  *      stage @which
2126  *
2127  *      This function should be called with preemption disabled if the
2128  *      interrupt controller has per-cpu registers.
2129  */
2130 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2131                           bool *state)
2132 {
2133         struct irq_desc *desc;
2134         struct irq_data *data;
2135         struct irq_chip *chip;
2136         unsigned long flags;
2137         int err = -EINVAL;
2138
2139         desc = irq_get_desc_buslock(irq, &flags, 0);
2140         if (!desc)
2141                 return err;
2142
2143         data = irq_desc_get_irq_data(desc);
2144
2145         do {
2146                 chip = irq_data_get_irq_chip(data);
2147                 if (chip->irq_get_irqchip_state)
2148                         break;
2149 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2150                 data = data->parent_data;
2151 #else
2152                 data = NULL;
2153 #endif
2154         } while (data);
2155
2156         if (data)
2157                 err = chip->irq_get_irqchip_state(data, which, state);
2158
2159         irq_put_desc_busunlock(desc, flags);
2160         return err;
2161 }
2162 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2163
2164 /**
2165  *      irq_set_irqchip_state - set the state of a forwarded interrupt.
2166  *      @irq: Interrupt line that is forwarded to a VM
2167  *      @which: State to be restored (one of IRQCHIP_STATE_*)
2168  *      @val: Value corresponding to @which
2169  *
2170  *      This call sets the internal irqchip state of an interrupt,
2171  *      depending on the value of @which.
2172  *
2173  *      This function should be called with preemption disabled if the
2174  *      interrupt controller has per-cpu registers.
2175  */
2176 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2177                           bool val)
2178 {
2179         struct irq_desc *desc;
2180         struct irq_data *data;
2181         struct irq_chip *chip;
2182         unsigned long flags;
2183         int err = -EINVAL;
2184
2185         desc = irq_get_desc_buslock(irq, &flags, 0);
2186         if (!desc)
2187                 return err;
2188
2189         data = irq_desc_get_irq_data(desc);
2190
2191         do {
2192                 chip = irq_data_get_irq_chip(data);
2193                 if (chip->irq_set_irqchip_state)
2194                         break;
2195 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2196                 data = data->parent_data;
2197 #else
2198                 data = NULL;
2199 #endif
2200         } while (data);
2201
2202         if (data)
2203                 err = chip->irq_set_irqchip_state(data, which, val);
2204
2205         irq_put_desc_busunlock(desc, flags);
2206         return err;
2207 }
2208 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);