Commit | Line | Data |
---|---|---|
52a65ff5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
a34db9b2 IM |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
4 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
5 | * |
6 | * This file contains driver APIs to the irq subsystem. | |
7 | */ | |
8 | ||
97fd75b7 AM |
9 | #define pr_fmt(fmt) "genirq: " fmt |
10 | ||
1da177e4 | 11 | #include <linux/irq.h> |
3aa551c9 | 12 | #include <linux/kthread.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/random.h> | |
15 | #include <linux/interrupt.h> | |
4001d8e8 | 16 | #include <linux/irqdomain.h> |
1aeb272c | 17 | #include <linux/slab.h> |
3aa551c9 | 18 | #include <linux/sched.h> |
8bd75c77 | 19 | #include <linux/sched/rt.h> |
0881e7bd | 20 | #include <linux/sched/task.h> |
11ea68f5 | 21 | #include <linux/sched/isolation.h> |
ae7e81c0 | 22 | #include <uapi/linux/sched/types.h> |
4d1d61a6 | 23 | #include <linux/task_work.h> |
1da177e4 LT |
24 | |
25 | #include "internals.h" | |
26 | ||
b6a32bbd | 27 | #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) |
91cc470e | 28 | DEFINE_STATIC_KEY_FALSE(force_irqthreads_key); |
8d32a307 TG |
29 | |
30 | static int __init setup_forced_irqthreads(char *arg) | |
31 | { | |
91cc470e | 32 | static_branch_enable(&force_irqthreads_key); |
8d32a307 TG |
33 | return 0; |
34 | } | |
35 | early_param("threadirqs", setup_forced_irqthreads); | |
36 | #endif | |
37 | ||
827bafd5 TG |
38 | static int __irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *state); |
39 | ||
62e04686 | 40 | static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) |
1da177e4 | 41 | { |
62e04686 | 42 | struct irq_data *irqd = irq_desc_get_irq_data(desc); |
32f4125e | 43 | bool inprogress; |
1da177e4 | 44 | |
a98ce5c6 | 45 | do { |
a98ce5c6 HX |
46 | /* |
47 | * Wait until we're out of the critical section. This might | |
48 | * give the wrong answer due to the lack of memory barriers. | |
49 | */ | |
32f4125e | 50 | while (irqd_irq_inprogress(&desc->irq_data)) |
a98ce5c6 HX |
51 | cpu_relax(); |
52 | ||
53 | /* Ok, that indicated we're done: double-check carefully. */ | |
17c19535 | 54 | guard(raw_spinlock_irqsave)(&desc->lock); |
32f4125e | 55 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
62e04686 TG |
56 | |
57 | /* | |
58 | * If requested and supported, check at the chip whether it | |
59 | * is in flight at the hardware level, i.e. already pending | |
60 | * in a CPU and waiting for service and acknowledge. | |
61 | */ | |
62 | if (!inprogress && sync_chip) { | |
63 | /* | |
64 | * Ignore the return code. inprogress is only updated | |
65 | * when the chip supports it. | |
66 | */ | |
67 | __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, | |
68 | &inprogress); | |
69 | } | |
a98ce5c6 | 70 | /* Oops, that failed? */ |
32f4125e | 71 | } while (inprogress); |
18258f72 TG |
72 | } |
73 | ||
74 | /** | |
0c169edf TG |
75 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
76 | * @irq: interrupt number to wait for | |
18258f72 | 77 | * |
0c169edf TG |
78 | * This function waits for any pending hard IRQ handlers for this interrupt |
79 | * to complete before returning. If you use this function while holding a | |
80 | * resource the IRQ handler may need you will deadlock. It does not take | |
81 | * associated threaded handlers into account. | |
18258f72 | 82 | * |
0c169edf TG |
83 | * Do not use this for shutdown scenarios where you must be sure that all |
84 | * parts (hardirq and threaded handler) have completed. | |
18258f72 | 85 | * |
0c169edf | 86 | * Returns: false if a threaded handler is active. |
02cea395 | 87 | * |
0c169edf | 88 | * This function may be called - with care - from IRQ context. |
62e04686 | 89 | * |
0c169edf TG |
90 | * It does not check whether there is an interrupt in flight at the |
91 | * hardware level, but not serviced yet, as this might deadlock when called | |
92 | * with interrupts disabled and the target CPU of the interrupt is the | |
93 | * current CPU. | |
18258f72 | 94 | */ |
02cea395 | 95 | bool synchronize_hardirq(unsigned int irq) |
18258f72 TG |
96 | { |
97 | struct irq_desc *desc = irq_to_desc(irq); | |
3aa551c9 | 98 | |
02cea395 | 99 | if (desc) { |
62e04686 | 100 | __synchronize_hardirq(desc, false); |
02cea395 PZ |
101 | return !atomic_read(&desc->threads_active); |
102 | } | |
103 | ||
104 | return true; | |
18258f72 TG |
105 | } |
106 | EXPORT_SYMBOL(synchronize_hardirq); | |
107 | ||
e2c12739 VW |
108 | static void __synchronize_irq(struct irq_desc *desc) |
109 | { | |
110 | __synchronize_hardirq(desc, true); | |
111 | /* | |
112 | * We made sure that no hardirq handler is running. Now verify that no | |
113 | * threaded handlers are active. | |
114 | */ | |
115 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | |
116 | } | |
117 | ||
18258f72 | 118 | /** |
0c169edf TG |
119 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
120 | * @irq: interrupt number to wait for | |
18258f72 | 121 | * |
0c169edf TG |
122 | * This function waits for any pending IRQ handlers for this interrupt to |
123 | * complete before returning. If you use this function while holding a | |
124 | * resource the IRQ handler may need you will deadlock. | |
18258f72 | 125 | * |
0c169edf TG |
126 | * Can only be called from preemptible code as it might sleep when |
127 | * an interrupt thread is associated to @irq. | |
62e04686 | 128 | * |
0c169edf TG |
129 | * It optionally makes sure (when the irq chip supports that method) |
130 | * that the interrupt is not pending in any CPU and waiting for | |
131 | * service. | |
18258f72 TG |
132 | */ |
133 | void synchronize_irq(unsigned int irq) | |
134 | { | |
135 | struct irq_desc *desc = irq_to_desc(irq); | |
136 | ||
e2c12739 VW |
137 | if (desc) |
138 | __synchronize_irq(desc); | |
1da177e4 | 139 | } |
1da177e4 LT |
140 | EXPORT_SYMBOL(synchronize_irq); |
141 | ||
3aa551c9 TG |
142 | #ifdef CONFIG_SMP |
143 | cpumask_var_t irq_default_affinity; | |
144 | ||
9c255583 | 145 | static bool __irq_can_set_affinity(struct irq_desc *desc) |
e019c249 JL |
146 | { |
147 | if (!desc || !irqd_can_balance(&desc->irq_data) || | |
148 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | |
9c255583 TG |
149 | return false; |
150 | return true; | |
e019c249 JL |
151 | } |
152 | ||
771ee3b0 | 153 | /** |
0c169edf TG |
154 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
155 | * @irq: Interrupt to check | |
771ee3b0 TG |
156 | * |
157 | */ | |
158 | int irq_can_set_affinity(unsigned int irq) | |
159 | { | |
e019c249 | 160 | return __irq_can_set_affinity(irq_to_desc(irq)); |
771ee3b0 TG |
161 | } |
162 | ||
9c255583 TG |
163 | /** |
164 | * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space | |
165 | * @irq: Interrupt to check | |
166 | * | |
167 | * Like irq_can_set_affinity() above, but additionally checks for the | |
168 | * AFFINITY_MANAGED flag. | |
169 | */ | |
170 | bool irq_can_set_affinity_usr(unsigned int irq) | |
171 | { | |
172 | struct irq_desc *desc = irq_to_desc(irq); | |
173 | ||
174 | return __irq_can_set_affinity(desc) && | |
175 | !irqd_affinity_is_managed(&desc->irq_data); | |
176 | } | |
177 | ||
591d2fb0 | 178 | /** |
0c169edf TG |
179 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
180 | * @desc: irq descriptor which has affinity changed | |
591d2fb0 | 181 | * |
0c169edf TG |
182 | * Just set IRQTF_AFFINITY and delegate the affinity setting to the |
183 | * interrupt thread itself. We can not call set_cpus_allowed_ptr() here as | |
184 | * we hold desc->lock and this code can be called from hard interrupt | |
185 | * context. | |
591d2fb0 | 186 | */ |
827bafd5 | 187 | static void irq_set_thread_affinity(struct irq_desc *desc) |
3aa551c9 | 188 | { |
f944b5a7 | 189 | struct irqaction *action; |
3aa551c9 | 190 | |
80323598 | 191 | for_each_action_of_desc(desc, action) { |
c99303a2 | 192 | if (action->thread) { |
591d2fb0 | 193 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
c99303a2 CW |
194 | wake_up_process(action->thread); |
195 | } | |
196 | if (action->secondary && action->secondary->thread) { | |
80323598 | 197 | set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags); |
c99303a2 CW |
198 | wake_up_process(action->secondary->thread); |
199 | } | |
80323598 | 200 | } |
3aa551c9 TG |
201 | } |
202 | ||
baedb87d | 203 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
19e1d4e9 TG |
204 | static void irq_validate_effective_affinity(struct irq_data *data) |
205 | { | |
19e1d4e9 TG |
206 | const struct cpumask *m = irq_data_get_effective_affinity_mask(data); |
207 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
208 | ||
209 | if (!cpumask_empty(m)) | |
210 | return; | |
211 | pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", | |
212 | chip->name, data->irq); | |
19e1d4e9 | 213 | } |
baedb87d TG |
214 | #else |
215 | static inline void irq_validate_effective_affinity(struct irq_data *data) { } | |
baedb87d TG |
216 | #endif |
217 | ||
64b6d1d7 MZ |
218 | static DEFINE_PER_CPU(struct cpumask, __tmp_mask); |
219 | ||
818b0f3b JL |
220 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
221 | bool force) | |
222 | { | |
64b6d1d7 | 223 | struct cpumask *tmp_mask = this_cpu_ptr(&__tmp_mask); |
818b0f3b JL |
224 | struct irq_desc *desc = irq_data_to_desc(data); |
225 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
33de0aa4 | 226 | const struct cpumask *prog_mask; |
818b0f3b JL |
227 | int ret; |
228 | ||
e43b3b58 TG |
229 | if (!chip || !chip->irq_set_affinity) |
230 | return -EINVAL; | |
231 | ||
11ea68f5 ML |
232 | /* |
233 | * If this is a managed interrupt and housekeeping is enabled on | |
234 | * it check whether the requested affinity mask intersects with | |
235 | * a housekeeping CPU. If so, then remove the isolated CPUs from | |
236 | * the mask and just keep the housekeeping CPU(s). This prevents | |
237 | * the affinity setter from routing the interrupt to an isolated | |
238 | * CPU to avoid that I/O submitted from a housekeeping CPU causes | |
239 | * interrupts on an isolated one. | |
240 | * | |
241 | * If the masks do not intersect or include online CPU(s) then | |
242 | * keep the requested mask. The isolated target CPUs are only | |
243 | * receiving interrupts when the I/O operation was submitted | |
244 | * directly from them. | |
245 | * | |
246 | * If all housekeeping CPUs in the affinity mask are offline, the | |
247 | * interrupt will be migrated by the CPU hotplug code once a | |
248 | * housekeeping CPU which belongs to the affinity mask comes | |
249 | * online. | |
250 | */ | |
251 | if (irqd_affinity_is_managed(data) && | |
04d4e665 | 252 | housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) { |
33de0aa4 | 253 | const struct cpumask *hk_mask; |
11ea68f5 | 254 | |
04d4e665 | 255 | hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); |
11ea68f5 | 256 | |
64b6d1d7 MZ |
257 | cpumask_and(tmp_mask, mask, hk_mask); |
258 | if (!cpumask_intersects(tmp_mask, cpu_online_mask)) | |
11ea68f5 ML |
259 | prog_mask = mask; |
260 | else | |
64b6d1d7 | 261 | prog_mask = tmp_mask; |
11ea68f5 | 262 | } else { |
33de0aa4 | 263 | prog_mask = mask; |
11ea68f5 | 264 | } |
33de0aa4 | 265 | |
c48c8b82 MZ |
266 | /* |
267 | * Make sure we only provide online CPUs to the irqchip, | |
268 | * unless we are being asked to force the affinity (in which | |
269 | * case we do as we are told). | |
270 | */ | |
64b6d1d7 MZ |
271 | cpumask_and(tmp_mask, prog_mask, cpu_online_mask); |
272 | if (!force && !cpumask_empty(tmp_mask)) | |
273 | ret = chip->irq_set_affinity(data, tmp_mask, force); | |
c48c8b82 MZ |
274 | else if (force) |
275 | ret = chip->irq_set_affinity(data, mask, force); | |
33de0aa4 MZ |
276 | else |
277 | ret = -EINVAL; | |
278 | ||
818b0f3b JL |
279 | switch (ret) { |
280 | case IRQ_SET_MASK_OK: | |
2cb62547 | 281 | case IRQ_SET_MASK_OK_DONE: |
9df872fa | 282 | cpumask_copy(desc->irq_common_data.affinity, mask); |
df561f66 | 283 | fallthrough; |
818b0f3b | 284 | case IRQ_SET_MASK_OK_NOCOPY: |
19e1d4e9 | 285 | irq_validate_effective_affinity(data); |
818b0f3b JL |
286 | irq_set_thread_affinity(desc); |
287 | ret = 0; | |
288 | } | |
289 | ||
290 | return ret; | |
291 | } | |
292 | ||
12f47073 TG |
293 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
294 | static inline int irq_set_affinity_pending(struct irq_data *data, | |
295 | const struct cpumask *dest) | |
296 | { | |
297 | struct irq_desc *desc = irq_data_to_desc(data); | |
298 | ||
299 | irqd_set_move_pending(data); | |
300 | irq_copy_pending(desc, dest); | |
301 | return 0; | |
302 | } | |
303 | #else | |
304 | static inline int irq_set_affinity_pending(struct irq_data *data, | |
305 | const struct cpumask *dest) | |
306 | { | |
307 | return -EBUSY; | |
308 | } | |
309 | #endif | |
310 | ||
311 | static int irq_try_set_affinity(struct irq_data *data, | |
312 | const struct cpumask *dest, bool force) | |
313 | { | |
314 | int ret = irq_do_set_affinity(data, dest, force); | |
315 | ||
316 | /* | |
317 | * In case that the underlying vector management is busy and the | |
318 | * architecture supports the generic pending mechanism then utilize | |
319 | * this to avoid returning an error to user space. | |
320 | */ | |
321 | if (ret == -EBUSY && !force) | |
322 | ret = irq_set_affinity_pending(data, dest); | |
323 | return ret; | |
324 | } | |
325 | ||
baedb87d | 326 | static bool irq_set_affinity_deactivated(struct irq_data *data, |
fd19ce77 | 327 | const struct cpumask *mask) |
baedb87d TG |
328 | { |
329 | struct irq_desc *desc = irq_data_to_desc(data); | |
330 | ||
331 | /* | |
f0c7baca TG |
332 | * Handle irq chips which can handle affinity only in activated |
333 | * state correctly | |
334 | * | |
baedb87d TG |
335 | * If the interrupt is not yet activated, just store the affinity |
336 | * mask and do not call the chip driver at all. On activation the | |
337 | * driver has to make sure anyway that the interrupt is in a | |
a359f757 | 338 | * usable state so startup works. |
baedb87d | 339 | */ |
f0c7baca TG |
340 | if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || |
341 | irqd_is_activated(data) || !irqd_affinity_on_activate(data)) | |
baedb87d TG |
342 | return false; |
343 | ||
344 | cpumask_copy(desc->irq_common_data.affinity, mask); | |
61030630 | 345 | irq_data_update_effective_affinity(data, mask); |
baedb87d TG |
346 | irqd_set(data, IRQD_AFFINITY_SET); |
347 | return true; | |
348 | } | |
349 | ||
01f8fa4f TG |
350 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
351 | bool force) | |
771ee3b0 | 352 | { |
c2d0c555 DD |
353 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
354 | struct irq_desc *desc = irq_data_to_desc(data); | |
1fa46f1f | 355 | int ret = 0; |
771ee3b0 | 356 | |
c2d0c555 | 357 | if (!chip || !chip->irq_set_affinity) |
771ee3b0 TG |
358 | return -EINVAL; |
359 | ||
fd19ce77 | 360 | if (irq_set_affinity_deactivated(data, mask)) |
baedb87d TG |
361 | return 0; |
362 | ||
12f47073 TG |
363 | if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { |
364 | ret = irq_try_set_affinity(data, mask, force); | |
1fa46f1f | 365 | } else { |
c2d0c555 | 366 | irqd_set_move_pending(data); |
1fa46f1f | 367 | irq_copy_pending(desc, mask); |
57b150cc | 368 | } |
1fa46f1f | 369 | |
cd7eab44 BH |
370 | if (desc->affinity_notify) { |
371 | kref_get(&desc->affinity_notify->kref); | |
df81dfcf EC |
372 | if (!schedule_work(&desc->affinity_notify->work)) { |
373 | /* Work was already scheduled, drop our extra ref */ | |
374 | kref_put(&desc->affinity_notify->kref, | |
375 | desc->affinity_notify->release); | |
376 | } | |
cd7eab44 | 377 | } |
c2d0c555 DD |
378 | irqd_set(data, IRQD_AFFINITY_SET); |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
1d3aec89 JG |
383 | /** |
384 | * irq_update_affinity_desc - Update affinity management for an interrupt | |
385 | * @irq: The interrupt number to update | |
386 | * @affinity: Pointer to the affinity descriptor | |
387 | * | |
388 | * This interface can be used to configure the affinity management of | |
389 | * interrupts which have been allocated already. | |
390 | * | |
391 | * There are certain limitations on when it may be used - attempts to use it | |
392 | * for when the kernel is configured for generic IRQ reservation mode (in | |
393 | * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with | |
394 | * managed/non-managed interrupt accounting. In addition, attempts to use it on | |
395 | * an interrupt which is already started or which has already been configured | |
396 | * as managed will also fail, as these mean invalid init state or double init. | |
397 | */ | |
b0561582 | 398 | int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity) |
1d3aec89 | 399 | { |
1d3aec89 JG |
400 | /* |
401 | * Supporting this with the reservation scheme used by x86 needs | |
402 | * some more thought. Fail it for now. | |
403 | */ | |
404 | if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) | |
405 | return -EOPNOTSUPP; | |
406 | ||
b0561582 TG |
407 | scoped_irqdesc_get_and_buslock(irq, 0) { |
408 | struct irq_desc *desc = scoped_irqdesc; | |
409 | bool activated; | |
1d3aec89 | 410 | |
b0561582 TG |
411 | /* Requires the interrupt to be shut down */ |
412 | if (irqd_is_started(&desc->irq_data)) | |
413 | return -EBUSY; | |
1d3aec89 | 414 | |
b0561582 TG |
415 | /* Interrupts which are already managed cannot be modified */ |
416 | if (irqd_affinity_is_managed(&desc->irq_data)) | |
417 | return -EBUSY; | |
418 | /* | |
419 | * Deactivate the interrupt. That's required to undo | |
420 | * anything an earlier activation has established. | |
421 | */ | |
422 | activated = irqd_is_activated(&desc->irq_data); | |
423 | if (activated) | |
424 | irq_domain_deactivate_irq(&desc->irq_data); | |
1d3aec89 | 425 | |
b0561582 TG |
426 | if (affinity->is_managed) { |
427 | irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); | |
428 | irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); | |
429 | } | |
1d3aec89 | 430 | |
b0561582 | 431 | cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); |
1d3aec89 | 432 | |
b0561582 TG |
433 | /* Restore the activation state */ |
434 | if (activated) | |
435 | irq_domain_activate_irq(&desc->irq_data, false); | |
436 | return 0; | |
437 | } | |
438 | return -EINVAL; | |
1d3aec89 JG |
439 | } |
440 | ||
4d80d6ca TG |
441 | static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, |
442 | bool force) | |
c2d0c555 DD |
443 | { |
444 | struct irq_desc *desc = irq_to_desc(irq); | |
c2d0c555 DD |
445 | |
446 | if (!desc) | |
447 | return -EINVAL; | |
448 | ||
17c19535 TG |
449 | guard(raw_spinlock_irqsave)(&desc->lock); |
450 | return irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); | |
771ee3b0 TG |
451 | } |
452 | ||
4d80d6ca TG |
453 | /** |
454 | * irq_set_affinity - Set the irq affinity of a given irq | |
455 | * @irq: Interrupt to set affinity | |
456 | * @cpumask: cpumask | |
457 | * | |
458 | * Fails if cpumask does not contain an online CPU | |
459 | */ | |
460 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |
461 | { | |
462 | return __irq_set_affinity(irq, cpumask, false); | |
463 | } | |
464 | EXPORT_SYMBOL_GPL(irq_set_affinity); | |
465 | ||
466 | /** | |
467 | * irq_force_affinity - Force the irq affinity of a given irq | |
468 | * @irq: Interrupt to set affinity | |
469 | * @cpumask: cpumask | |
470 | * | |
471 | * Same as irq_set_affinity, but without checking the mask against | |
472 | * online cpus. | |
473 | * | |
474 | * Solely for low level cpu hotplug code, where we need to make per | |
475 | * cpu interrupts affine before the cpu becomes online. | |
476 | */ | |
477 | int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) | |
478 | { | |
479 | return __irq_set_affinity(irq, cpumask, true); | |
480 | } | |
481 | EXPORT_SYMBOL_GPL(irq_force_affinity); | |
482 | ||
7e04e5c6 | 483 | int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, bool setaffinity) |
e7a297b0 | 484 | { |
7e04e5c6 | 485 | int ret = -EINVAL; |
e7a297b0 | 486 | |
7e04e5c6 TG |
487 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { |
488 | scoped_irqdesc->affinity_hint = m; | |
489 | ret = 0; | |
490 | } | |
491 | ||
492 | if (!ret && m && setaffinity) | |
4fe7ffb7 | 493 | __irq_set_affinity(irq, m, false); |
7e04e5c6 | 494 | return ret; |
e7a297b0 | 495 | } |
65c7cded | 496 | EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint); |
e7a297b0 | 497 | |
cd7eab44 BH |
498 | static void irq_affinity_notify(struct work_struct *work) |
499 | { | |
7e04e5c6 | 500 | struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); |
cd7eab44 BH |
501 | struct irq_desc *desc = irq_to_desc(notify->irq); |
502 | cpumask_var_t cpumask; | |
cd7eab44 | 503 | |
1fa46f1f | 504 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
cd7eab44 BH |
505 | goto out; |
506 | ||
17c19535 TG |
507 | scoped_guard(raw_spinlock_irqsave, &desc->lock) { |
508 | if (irq_move_pending(&desc->irq_data)) | |
509 | irq_get_pending(cpumask, desc); | |
510 | else | |
511 | cpumask_copy(cpumask, desc->irq_common_data.affinity); | |
512 | } | |
cd7eab44 BH |
513 | |
514 | notify->notify(notify, cpumask); | |
515 | ||
516 | free_cpumask_var(cpumask); | |
517 | out: | |
518 | kref_put(¬ify->kref, notify->release); | |
519 | } | |
520 | ||
521 | /** | |
0c169edf TG |
522 | * irq_set_affinity_notifier - control notification of IRQ affinity changes |
523 | * @irq: Interrupt for which to enable/disable notification | |
524 | * @notify: Context for notification, or %NULL to disable | |
525 | * notification. Function pointers must be initialised; | |
526 | * the other fields will be initialised by this function. | |
527 | * | |
528 | * Must be called in process context. Notification may only be enabled | |
529 | * after the IRQ is allocated and must be disabled before the IRQ is freed | |
530 | * using free_irq(). | |
cd7eab44 | 531 | */ |
0c169edf | 532 | int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) |
cd7eab44 BH |
533 | { |
534 | struct irq_desc *desc = irq_to_desc(irq); | |
535 | struct irq_affinity_notify *old_notify; | |
cd7eab44 BH |
536 | |
537 | /* The release function is promised process context */ | |
538 | might_sleep(); | |
539 | ||
6678ae19 | 540 | if (!desc || irq_is_nmi(desc)) |
cd7eab44 BH |
541 | return -EINVAL; |
542 | ||
543 | /* Complete initialisation of *notify */ | |
544 | if (notify) { | |
545 | notify->irq = irq; | |
546 | kref_init(¬ify->kref); | |
547 | INIT_WORK(¬ify->work, irq_affinity_notify); | |
548 | } | |
549 | ||
17c19535 TG |
550 | scoped_guard(raw_spinlock_irqsave, &desc->lock) { |
551 | old_notify = desc->affinity_notify; | |
552 | desc->affinity_notify = notify; | |
553 | } | |
cd7eab44 | 554 | |
59c39840 | 555 | if (old_notify) { |
df81dfcf EC |
556 | if (cancel_work_sync(&old_notify->work)) { |
557 | /* Pending work had a ref, put that one too */ | |
558 | kref_put(&old_notify->kref, old_notify->release); | |
559 | } | |
cd7eab44 | 560 | kref_put(&old_notify->kref, old_notify->release); |
59c39840 | 561 | } |
cd7eab44 BH |
562 | |
563 | return 0; | |
564 | } | |
565 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |
566 | ||
18404756 MK |
567 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
568 | /* | |
569 | * Generic version of the affinity autoselector. | |
570 | */ | |
43564bd9 | 571 | int irq_setup_affinity(struct irq_desc *desc) |
18404756 | 572 | { |
569bda8d | 573 | struct cpumask *set = irq_default_affinity; |
17c19535 TG |
574 | int node = irq_desc_get_node(desc); |
575 | ||
cba4235e TG |
576 | static DEFINE_RAW_SPINLOCK(mask_lock); |
577 | static struct cpumask mask; | |
569bda8d | 578 | |
b008207c | 579 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
e019c249 | 580 | if (!__irq_can_set_affinity(desc)) |
18404756 MK |
581 | return 0; |
582 | ||
17c19535 | 583 | guard(raw_spinlock)(&mask_lock); |
f6d87f4b | 584 | /* |
9332ef9d | 585 | * Preserve the managed affinity setting and a userspace affinity |
06ee6d57 | 586 | * setup, but make sure that one of the targets is online. |
f6d87f4b | 587 | */ |
06ee6d57 TG |
588 | if (irqd_affinity_is_managed(&desc->irq_data) || |
589 | irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | |
9df872fa | 590 | if (cpumask_intersects(desc->irq_common_data.affinity, |
569bda8d | 591 | cpu_online_mask)) |
9df872fa | 592 | set = desc->irq_common_data.affinity; |
0c6f8a8b | 593 | else |
2bdd1055 | 594 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
f6d87f4b | 595 | } |
18404756 | 596 | |
cba4235e | 597 | cpumask_and(&mask, cpu_online_mask, set); |
bddda606 SR |
598 | if (cpumask_empty(&mask)) |
599 | cpumask_copy(&mask, cpu_online_mask); | |
600 | ||
241fc640 PB |
601 | if (node != NUMA_NO_NODE) { |
602 | const struct cpumask *nodemask = cpumask_of_node(node); | |
603 | ||
604 | /* make sure at least one of the cpus in nodemask is online */ | |
cba4235e TG |
605 | if (cpumask_intersects(&mask, nodemask)) |
606 | cpumask_and(&mask, &mask, nodemask); | |
241fc640 | 607 | } |
17c19535 | 608 | return irq_do_set_affinity(&desc->irq_data, &mask, false); |
18404756 | 609 | } |
f6d87f4b | 610 | #else |
a8a98eac | 611 | /* Wrapper for ALPHA specific affinity selector magic */ |
cba4235e | 612 | int irq_setup_affinity(struct irq_desc *desc) |
f6d87f4b | 613 | { |
cba4235e | 614 | return irq_select_affinity(irq_desc_get_irq(desc)); |
f6d87f4b | 615 | } |
cba6437a TG |
616 | #endif /* CONFIG_AUTO_IRQ_AFFINITY */ |
617 | #endif /* CONFIG_SMP */ | |
18404756 | 618 | |
1da177e4 | 619 | |
fcf1ae2f | 620 | /** |
0c169edf TG |
621 | * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt |
622 | * @irq: interrupt number to set affinity | |
623 | * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU | |
624 | * specific data for percpu_devid interrupts | |
625 | * | |
626 | * This function uses the vCPU specific data to set the vCPU affinity for | |
627 | * an irq. The vCPU specific data is passed from outside, such as KVM. One | |
628 | * example code path is as below: KVM -> IOMMU -> irq_set_vcpu_affinity(). | |
fcf1ae2f FW |
629 | */ |
630 | int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) | |
631 | { | |
55ac0ad2 TG |
632 | scoped_irqdesc_get_and_lock(irq, 0) { |
633 | struct irq_desc *desc = scoped_irqdesc; | |
634 | struct irq_data *data; | |
635 | struct irq_chip *chip; | |
fcf1ae2f | 636 | |
55ac0ad2 TG |
637 | data = irq_desc_get_irq_data(desc); |
638 | do { | |
639 | chip = irq_data_get_irq_chip(data); | |
640 | if (chip && chip->irq_set_vcpu_affinity) | |
641 | break; | |
fcf1ae2f | 642 | |
55ac0ad2 TG |
643 | data = irqd_get_parent_data(data); |
644 | } while (data); | |
fcf1ae2f | 645 | |
55ac0ad2 TG |
646 | if (!data) |
647 | return -ENOSYS; | |
648 | return chip->irq_set_vcpu_affinity(data, vcpu_info); | |
649 | } | |
650 | return -EINVAL; | |
fcf1ae2f FW |
651 | } |
652 | EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); | |
653 | ||
79ff1cda | 654 | void __disable_irq(struct irq_desc *desc) |
0a0c5168 | 655 | { |
3aae994f | 656 | if (!desc->depth++) |
87923470 | 657 | irq_disable(desc); |
0a0c5168 RW |
658 | } |
659 | ||
02725e74 TG |
660 | static int __disable_irq_nosync(unsigned int irq) |
661 | { | |
1b744444 TG |
662 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { |
663 | __disable_irq(scoped_irqdesc); | |
664 | return 0; | |
665 | } | |
666 | return -EINVAL; | |
02725e74 TG |
667 | } |
668 | ||
1da177e4 | 669 | /** |
0c169edf TG |
670 | * disable_irq_nosync - disable an irq without waiting |
671 | * @irq: Interrupt to disable | |
1da177e4 | 672 | * |
0c169edf TG |
673 | * Disable the selected interrupt line. Disables and Enables are |
674 | * nested. | |
675 | * Unlike disable_irq(), this function does not ensure existing | |
676 | * instances of the IRQ handler have completed before returning. | |
1da177e4 | 677 | * |
0c169edf | 678 | * This function may be called from IRQ context. |
1da177e4 LT |
679 | */ |
680 | void disable_irq_nosync(unsigned int irq) | |
681 | { | |
02725e74 | 682 | __disable_irq_nosync(irq); |
1da177e4 | 683 | } |
1da177e4 LT |
684 | EXPORT_SYMBOL(disable_irq_nosync); |
685 | ||
686 | /** | |
0c169edf TG |
687 | * disable_irq - disable an irq and wait for completion |
688 | * @irq: Interrupt to disable | |
689 | * | |
690 | * Disable the selected interrupt line. Enables and Disables are nested. | |
1da177e4 | 691 | * |
0c169edf TG |
692 | * This function waits for any pending IRQ handlers for this interrupt to |
693 | * complete before returning. If you use this function while holding a | |
694 | * resource the IRQ handler may need you will deadlock. | |
1da177e4 | 695 | * |
0c169edf TG |
696 | * Can only be called from preemptible code as it might sleep when an |
697 | * interrupt thread is associated to @irq. | |
17549b0f | 698 | * |
1da177e4 LT |
699 | */ |
700 | void disable_irq(unsigned int irq) | |
701 | { | |
17549b0f | 702 | might_sleep(); |
02725e74 | 703 | if (!__disable_irq_nosync(irq)) |
1da177e4 LT |
704 | synchronize_irq(irq); |
705 | } | |
1da177e4 LT |
706 | EXPORT_SYMBOL(disable_irq); |
707 | ||
02cea395 | 708 | /** |
0c169edf TG |
709 | * disable_hardirq - disables an irq and waits for hardirq completion |
710 | * @irq: Interrupt to disable | |
02cea395 | 711 | * |
0c169edf | 712 | * Disable the selected interrupt line. Enables and Disables are nested. |
02cea395 | 713 | * |
0c169edf TG |
714 | * This function waits for any pending hard IRQ handlers for this interrupt |
715 | * to complete before returning. If you use this function while holding a | |
716 | * resource the hard IRQ handler may need you will deadlock. | |
02cea395 | 717 | * |
0c169edf TG |
718 | * When used to optimistically disable an interrupt from atomic context the |
719 | * return value must be checked. | |
02cea395 | 720 | * |
0c169edf TG |
721 | * Returns: false if a threaded handler is active. |
722 | * | |
723 | * This function may be called - with care - from IRQ context. | |
02cea395 PZ |
724 | */ |
725 | bool disable_hardirq(unsigned int irq) | |
726 | { | |
727 | if (!__disable_irq_nosync(irq)) | |
728 | return synchronize_hardirq(irq); | |
02cea395 PZ |
729 | return false; |
730 | } | |
731 | EXPORT_SYMBOL_GPL(disable_hardirq); | |
732 | ||
b525903c | 733 | /** |
0c169edf TG |
734 | * disable_nmi_nosync - disable an nmi without waiting |
735 | * @irq: Interrupt to disable | |
736 | * | |
737 | * Disable the selected interrupt line. Disables and enables are nested. | |
738 | * | |
739 | * The interrupt to disable must have been requested through request_nmi. | |
740 | * Unlike disable_nmi(), this function does not ensure existing | |
741 | * instances of the IRQ handler have completed before returning. | |
b525903c JT |
742 | */ |
743 | void disable_nmi_nosync(unsigned int irq) | |
744 | { | |
745 | disable_irq_nosync(irq); | |
746 | } | |
747 | ||
79ff1cda | 748 | void __enable_irq(struct irq_desc *desc) |
1adb0850 TG |
749 | { |
750 | switch (desc->depth) { | |
751 | case 0: | |
0a0c5168 | 752 | err_out: |
79ff1cda JL |
753 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", |
754 | irq_desc_get_irq(desc)); | |
1adb0850 TG |
755 | break; |
756 | case 1: { | |
c531e836 | 757 | if (desc->istate & IRQS_SUSPENDED) |
0a0c5168 | 758 | goto err_out; |
1adb0850 | 759 | /* Prevent probing on this irq: */ |
1ccb4e61 | 760 | irq_settings_set_noprobe(desc); |
201d7f47 TG |
761 | /* |
762 | * Call irq_startup() not irq_enable() here because the | |
a60dd06a DS |
763 | * interrupt might be marked NOAUTOEN so irq_startup() |
764 | * needs to be invoked when it gets enabled the first time. | |
765 | * This is also required when __enable_irq() is invoked for | |
766 | * a managed and shutdown interrupt from the S3 resume | |
767 | * path. | |
768 | * | |
769 | * If it was already started up, then irq_startup() will | |
770 | * invoke irq_enable() under the hood. | |
201d7f47 | 771 | */ |
c942cee4 | 772 | irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); |
201d7f47 | 773 | break; |
1adb0850 TG |
774 | } |
775 | default: | |
776 | desc->depth--; | |
777 | } | |
778 | } | |
779 | ||
1da177e4 | 780 | /** |
0c169edf TG |
781 | * enable_irq - enable handling of an irq |
782 | * @irq: Interrupt to enable | |
1da177e4 | 783 | * |
0c169edf TG |
784 | * Undoes the effect of one call to disable_irq(). If this matches the |
785 | * last disable, processing of interrupts on this IRQ line is re-enabled. | |
1da177e4 | 786 | * |
0c169edf TG |
787 | * This function may be called from IRQ context only when |
788 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | |
1da177e4 LT |
789 | */ |
790 | void enable_irq(unsigned int irq) | |
791 | { | |
bddd10c5 TG |
792 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { |
793 | struct irq_desc *desc = scoped_irqdesc; | |
2656c366 | 794 | |
bddd10c5 TG |
795 | if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) |
796 | return; | |
797 | __enable_irq(desc); | |
798 | } | |
1da177e4 | 799 | } |
1da177e4 LT |
800 | EXPORT_SYMBOL(enable_irq); |
801 | ||
b525903c | 802 | /** |
0c169edf TG |
803 | * enable_nmi - enable handling of an nmi |
804 | * @irq: Interrupt to enable | |
b525903c | 805 | * |
0c169edf TG |
806 | * The interrupt to enable must have been requested through request_nmi. |
807 | * Undoes the effect of one call to disable_nmi(). If this matches the last | |
808 | * disable, processing of interrupts on this IRQ line is re-enabled. | |
b525903c JT |
809 | */ |
810 | void enable_nmi(unsigned int irq) | |
811 | { | |
812 | enable_irq(irq); | |
813 | } | |
814 | ||
0c5d1eb7 | 815 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 816 | { |
08678b08 | 817 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
818 | int ret = -ENXIO; |
819 | ||
60f96b41 SS |
820 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) |
821 | return 0; | |
822 | ||
2f7e99bb TG |
823 | if (desc->irq_data.chip->irq_set_wake) |
824 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | |
2db87321 UKK |
825 | |
826 | return ret; | |
827 | } | |
828 | ||
ba9a2331 | 829 | /** |
0c169edf TG |
830 | * irq_set_irq_wake - control irq power management wakeup |
831 | * @irq: interrupt to control | |
832 | * @on: enable/disable power management wakeup | |
833 | * | |
834 | * Enable/disable power management wakeup mode, which is disabled by | |
835 | * default. Enables and disables must match, just as they match for | |
836 | * non-wakeup mode support. | |
837 | * | |
838 | * Wakeup mode lets this IRQ wake the system from sleep states like | |
839 | * "suspend to RAM". | |
840 | * | |
841 | * Note: irq enable/disable state is completely orthogonal to the | |
842 | * enable/disable state of irq wake. An irq can be disabled with | |
843 | * disable_irq() and still wake the system as long as the irq has wake | |
844 | * enabled. If this does not hold, then the underlying irq chip and the | |
845 | * related driver need to be investigated. | |
ba9a2331 | 846 | */ |
a0cd9ca2 | 847 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
ba9a2331 | 848 | { |
58eb5721 | 849 | scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { |
8589e325 TG |
850 | struct irq_desc *desc = scoped_irqdesc; |
851 | int ret = 0; | |
13863a66 | 852 | |
8589e325 TG |
853 | /* Don't use NMIs as wake up interrupts please */ |
854 | if (irq_is_nmi(desc)) | |
855 | return -EINVAL; | |
b525903c | 856 | |
8589e325 TG |
857 | /* |
858 | * wakeup-capable irqs can be shared between drivers that | |
859 | * don't need to have the same sleep mode behaviors. | |
860 | */ | |
861 | if (on) { | |
862 | if (desc->wake_depth++ == 0) { | |
863 | ret = set_irq_wake_real(irq, on); | |
864 | if (ret) | |
865 | desc->wake_depth = 0; | |
866 | else | |
867 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); | |
868 | } | |
869 | } else { | |
870 | if (desc->wake_depth == 0) { | |
871 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | |
872 | } else if (--desc->wake_depth == 0) { | |
873 | ret = set_irq_wake_real(irq, on); | |
874 | if (ret) | |
875 | desc->wake_depth = 1; | |
876 | else | |
877 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | |
878 | } | |
2db87321 | 879 | } |
8589e325 | 880 | return ret; |
15a647eb | 881 | } |
8589e325 | 882 | return -EINVAL; |
ba9a2331 | 883 | } |
a0cd9ca2 | 884 | EXPORT_SYMBOL(irq_set_irq_wake); |
ba9a2331 | 885 | |
1da177e4 LT |
886 | /* |
887 | * Internal function that tells the architecture code whether a | |
888 | * particular irq has been exclusively allocated or is available | |
889 | * for driver use. | |
890 | */ | |
a1ceb831 | 891 | bool can_request_irq(unsigned int irq, unsigned long irqflags) |
1da177e4 | 892 | { |
a1ceb831 TG |
893 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { |
894 | struct irq_desc *desc = scoped_irqdesc; | |
7d94f7ca | 895 | |
a1ceb831 TG |
896 | if (irq_settings_can_request(desc)) { |
897 | if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) | |
898 | return true; | |
899 | } | |
02725e74 | 900 | } |
a1ceb831 | 901 | return false; |
1da177e4 LT |
902 | } |
903 | ||
a1ff541a | 904 | int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) |
82736f4d | 905 | { |
6b8ff312 | 906 | struct irq_chip *chip = desc->irq_data.chip; |
d4d5e089 | 907 | int ret, unmask = 0; |
82736f4d | 908 | |
b2ba2c30 | 909 | if (!chip || !chip->irq_set_type) { |
82736f4d UKK |
910 | /* |
911 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
912 | * flow-types? | |
913 | */ | |
a1ff541a JL |
914 | pr_debug("No set_type function for IRQ %d (%s)\n", |
915 | irq_desc_get_irq(desc), | |
f5d89470 | 916 | chip ? (chip->name ? : "unknown") : "unknown"); |
82736f4d UKK |
917 | return 0; |
918 | } | |
919 | ||
d4d5e089 | 920 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
32f4125e | 921 | if (!irqd_irq_masked(&desc->irq_data)) |
d4d5e089 | 922 | mask_irq(desc); |
32f4125e | 923 | if (!irqd_irq_disabled(&desc->irq_data)) |
d4d5e089 TG |
924 | unmask = 1; |
925 | } | |
926 | ||
00b992de AK |
927 | /* Mask all flags except trigger mode */ |
928 | flags &= IRQ_TYPE_SENSE_MASK; | |
b2ba2c30 | 929 | ret = chip->irq_set_type(&desc->irq_data, flags); |
82736f4d | 930 | |
876dbd4c TG |
931 | switch (ret) { |
932 | case IRQ_SET_MASK_OK: | |
2cb62547 | 933 | case IRQ_SET_MASK_OK_DONE: |
876dbd4c TG |
934 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
935 | irqd_set(&desc->irq_data, flags); | |
df561f66 | 936 | fallthrough; |
876dbd4c TG |
937 | |
938 | case IRQ_SET_MASK_OK_NOCOPY: | |
939 | flags = irqd_get_trigger_type(&desc->irq_data); | |
940 | irq_settings_set_trigger_mask(desc, flags); | |
941 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | |
942 | irq_settings_clr_level(desc); | |
943 | if (flags & IRQ_TYPE_LEVEL_MASK) { | |
944 | irq_settings_set_level(desc); | |
945 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
946 | } | |
46732475 | 947 | |
d4d5e089 | 948 | ret = 0; |
8fff39e0 | 949 | break; |
876dbd4c | 950 | default: |
d75f773c | 951 | pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", |
a1ff541a | 952 | flags, irq_desc_get_irq(desc), chip->irq_set_type); |
0c5d1eb7 | 953 | } |
d4d5e089 TG |
954 | if (unmask) |
955 | unmask_irq(desc); | |
82736f4d UKK |
956 | return ret; |
957 | } | |
958 | ||
293a7a0a TG |
959 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
960 | int irq_set_parent(int irq, int parent_irq) | |
961 | { | |
90140d08 TG |
962 | scoped_irqdesc_get_and_lock(irq, 0) { |
963 | scoped_irqdesc->parent_irq = parent_irq; | |
964 | return 0; | |
965 | } | |
966 | return -EINVAL; | |
293a7a0a | 967 | } |
3118dac5 | 968 | EXPORT_SYMBOL_GPL(irq_set_parent); |
293a7a0a TG |
969 | #endif |
970 | ||
b25c340c TG |
971 | /* |
972 | * Default primary interrupt handler for threaded interrupts. Is | |
973 | * assigned as primary handler when request_threaded_irq is called | |
974 | * with handler == NULL. Useful for oneshot interrupts. | |
975 | */ | |
976 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
977 | { | |
978 | return IRQ_WAKE_THREAD; | |
979 | } | |
980 | ||
399b5da2 TG |
981 | /* |
982 | * Primary handler for nested threaded interrupts. Should never be | |
983 | * called. | |
984 | */ | |
985 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
986 | { | |
987 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
988 | return IRQ_NONE; | |
989 | } | |
990 | ||
2a1d3ab8 TG |
991 | static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) |
992 | { | |
993 | WARN(1, "Secondary action handler called for irq %d\n", irq); | |
994 | return IRQ_NONE; | |
995 | } | |
996 | ||
c99303a2 CW |
997 | #ifdef CONFIG_SMP |
998 | /* | |
999 | * Check whether we need to change the affinity of the interrupt thread. | |
1000 | */ | |
1001 | static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
1002 | { | |
1003 | cpumask_var_t mask; | |
1004 | bool valid = false; | |
1005 | ||
1006 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
1007 | return; | |
1008 | ||
1009 | __set_current_state(TASK_RUNNING); | |
1010 | ||
1011 | /* | |
1012 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
1013 | * try again next time | |
1014 | */ | |
1015 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
1016 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
1017 | return; | |
1018 | } | |
1019 | ||
17c19535 TG |
1020 | scoped_guard(raw_spinlock_irq, &desc->lock) { |
1021 | /* | |
1022 | * This code is triggered unconditionally. Check the affinity | |
1023 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | |
1024 | */ | |
1025 | if (cpumask_available(desc->irq_common_data.affinity)) { | |
1026 | const struct cpumask *m; | |
c99303a2 | 1027 | |
17c19535 TG |
1028 | m = irq_data_get_effective_affinity_mask(&desc->irq_data); |
1029 | cpumask_copy(mask, m); | |
1030 | valid = true; | |
1031 | } | |
c99303a2 | 1032 | } |
c99303a2 CW |
1033 | |
1034 | if (valid) | |
1035 | set_cpus_allowed_ptr(current, mask); | |
1036 | free_cpumask_var(mask); | |
1037 | } | |
1038 | #else | |
1039 | static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
1040 | #endif | |
1041 | ||
1042 | static int irq_wait_for_interrupt(struct irq_desc *desc, | |
1043 | struct irqaction *action) | |
3aa551c9 | 1044 | { |
519cc865 LW |
1045 | for (;;) { |
1046 | set_current_state(TASK_INTERRUPTIBLE); | |
c99303a2 | 1047 | irq_thread_check_affinity(desc, action); |
550acb19 | 1048 | |
519cc865 LW |
1049 | if (kthread_should_stop()) { |
1050 | /* may need to run one last time */ | |
1051 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
1052 | &action->thread_flags)) { | |
1053 | __set_current_state(TASK_RUNNING); | |
1054 | return 0; | |
1055 | } | |
1056 | __set_current_state(TASK_RUNNING); | |
1057 | return -1; | |
1058 | } | |
f48fe81e TG |
1059 | |
1060 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
1061 | &action->thread_flags)) { | |
3aa551c9 TG |
1062 | __set_current_state(TASK_RUNNING); |
1063 | return 0; | |
f48fe81e TG |
1064 | } |
1065 | schedule(); | |
3aa551c9 | 1066 | } |
3aa551c9 TG |
1067 | } |
1068 | ||
b25c340c TG |
1069 | /* |
1070 | * Oneshot interrupts keep the irq line masked until the threaded | |
1071 | * handler finished. unmask if the interrupt has not been disabled and | |
1072 | * is marked MASKED. | |
1073 | */ | |
b5faba21 | 1074 | static void irq_finalize_oneshot(struct irq_desc *desc, |
f3f79e38 | 1075 | struct irqaction *action) |
b25c340c | 1076 | { |
2a1d3ab8 TG |
1077 | if (!(desc->istate & IRQS_ONESHOT) || |
1078 | action->handler == irq_forced_secondary_handler) | |
b5faba21 | 1079 | return; |
0b1adaa0 | 1080 | again: |
3876ec9e | 1081 | chip_bus_lock(desc); |
239007b8 | 1082 | raw_spin_lock_irq(&desc->lock); |
0b1adaa0 TG |
1083 | |
1084 | /* | |
1085 | * Implausible though it may be we need to protect us against | |
1086 | * the following scenario: | |
1087 | * | |
1088 | * The thread is faster done than the hard interrupt handler | |
1089 | * on the other CPU. If we unmask the irq line then the | |
1090 | * interrupt can come in again and masks the line, leaves due | |
009b4c3b | 1091 | * to IRQS_INPROGRESS and the irq line is masked forever. |
b5faba21 TG |
1092 | * |
1093 | * This also serializes the state of shared oneshot handlers | |
a359f757 | 1094 | * versus "desc->threads_oneshot |= action->thread_mask;" in |
b5faba21 TG |
1095 | * irq_wake_thread(). See the comment there which explains the |
1096 | * serialization. | |
0b1adaa0 | 1097 | */ |
32f4125e | 1098 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
0b1adaa0 | 1099 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 1100 | chip_bus_sync_unlock(desc); |
0b1adaa0 TG |
1101 | cpu_relax(); |
1102 | goto again; | |
1103 | } | |
1104 | ||
b5faba21 TG |
1105 | /* |
1106 | * Now check again, whether the thread should run. Otherwise | |
1107 | * we would clear the threads_oneshot bit of this thread which | |
1108 | * was just set. | |
1109 | */ | |
f3f79e38 | 1110 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
b5faba21 TG |
1111 | goto out_unlock; |
1112 | ||
1113 | desc->threads_oneshot &= ~action->thread_mask; | |
1114 | ||
32f4125e TG |
1115 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
1116 | irqd_irq_masked(&desc->irq_data)) | |
328a4978 | 1117 | unmask_threaded_irq(desc); |
32f4125e | 1118 | |
b5faba21 | 1119 | out_unlock: |
239007b8 | 1120 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 1121 | chip_bus_sync_unlock(desc); |
b25c340c TG |
1122 | } |
1123 | ||
8d32a307 | 1124 | /* |
6f8b7968 AS |
1125 | * Interrupts explicitly requested as threaded interrupts want to be |
1126 | * preemptible - many of them need to sleep and wait for slow busses to | |
1127 | * complete. | |
8d32a307 | 1128 | */ |
6f8b7968 | 1129 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) |
8d32a307 | 1130 | { |
6f8b7968 | 1131 | irqreturn_t ret = action->thread_fn(action->irq, action->dev_id); |
3a43e05f | 1132 | |
746a923b LW |
1133 | if (ret == IRQ_HANDLED) |
1134 | atomic_inc(&desc->threads_handled); | |
1135 | ||
f3f79e38 | 1136 | irq_finalize_oneshot(desc, action); |
3a43e05f | 1137 | return ret; |
8d32a307 TG |
1138 | } |
1139 | ||
1140 | /* | |
6f8b7968 AS |
1141 | * Interrupts which are not explicitly requested as threaded |
1142 | * interrupts rely on the implicit bh/preempt disable of the hard irq | |
1143 | * context. So we need to disable bh here to avoid deadlocks and other | |
1144 | * side effects. | |
8d32a307 | 1145 | */ |
6f8b7968 | 1146 | static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
8d32a307 | 1147 | { |
3a43e05f SAS |
1148 | irqreturn_t ret; |
1149 | ||
6f8b7968 AS |
1150 | local_bh_disable(); |
1151 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
1152 | local_irq_disable(); | |
429f49ad | 1153 | ret = irq_thread_fn(desc, action); |
6f8b7968 AS |
1154 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
1155 | local_irq_enable(); | |
1156 | local_bh_enable(); | |
3a43e05f | 1157 | return ret; |
8d32a307 TG |
1158 | } |
1159 | ||
e2c12739 | 1160 | void wake_threads_waitq(struct irq_desc *desc) |
7140ea19 | 1161 | { |
c685689f | 1162 | if (atomic_dec_and_test(&desc->threads_active)) |
7140ea19 IY |
1163 | wake_up(&desc->wait_for_threads); |
1164 | } | |
1165 | ||
67d12145 | 1166 | static void irq_thread_dtor(struct callback_head *unused) |
4d1d61a6 ON |
1167 | { |
1168 | struct task_struct *tsk = current; | |
1169 | struct irq_desc *desc; | |
1170 | struct irqaction *action; | |
1171 | ||
1172 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | |
1173 | return; | |
1174 | ||
1175 | action = kthread_data(tsk); | |
1176 | ||
fb21affa | 1177 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
19af395d | 1178 | tsk->comm, tsk->pid, action->irq); |
4d1d61a6 ON |
1179 | |
1180 | ||
1181 | desc = irq_to_desc(action->irq); | |
1182 | /* | |
1183 | * If IRQTF_RUNTHREAD is set, we need to decrement | |
1184 | * desc->threads_active and wake possible waiters. | |
1185 | */ | |
1186 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | |
1187 | wake_threads_waitq(desc); | |
1188 | ||
1189 | /* Prevent a stale desc->threads_oneshot */ | |
1190 | irq_finalize_oneshot(desc, action); | |
1191 | } | |
1192 | ||
2a1d3ab8 TG |
1193 | static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) |
1194 | { | |
1195 | struct irqaction *secondary = action->secondary; | |
1196 | ||
1197 | if (WARN_ON_ONCE(!secondary)) | |
1198 | return; | |
1199 | ||
17c19535 | 1200 | guard(raw_spinlock_irq)(&desc->lock); |
2a1d3ab8 | 1201 | __irq_wake_thread(desc, secondary); |
2a1d3ab8 TG |
1202 | } |
1203 | ||
8707898e TP |
1204 | /* |
1205 | * Internal function to notify that a interrupt thread is ready. | |
1206 | */ | |
1207 | static void irq_thread_set_ready(struct irq_desc *desc, | |
1208 | struct irqaction *action) | |
1209 | { | |
1210 | set_bit(IRQTF_READY, &action->thread_flags); | |
1211 | wake_up(&desc->wait_for_threads); | |
1212 | } | |
1213 | ||
1214 | /* | |
1215 | * Internal function to wake up a interrupt thread and wait until it is | |
1216 | * ready. | |
1217 | */ | |
1218 | static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, | |
1219 | struct irqaction *action) | |
1220 | { | |
1221 | if (!action || !action->thread) | |
1222 | return; | |
1223 | ||
1224 | wake_up_process(action->thread); | |
1225 | wait_event(desc->wait_for_threads, | |
1226 | test_bit(IRQTF_READY, &action->thread_flags)); | |
1227 | } | |
1228 | ||
3aa551c9 TG |
1229 | /* |
1230 | * Interrupt handler thread | |
1231 | */ | |
1232 | static int irq_thread(void *data) | |
1233 | { | |
67d12145 | 1234 | struct callback_head on_exit_work; |
3aa551c9 TG |
1235 | struct irqaction *action = data; |
1236 | struct irq_desc *desc = irq_to_desc(action->irq); | |
3a43e05f SAS |
1237 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
1238 | struct irqaction *action); | |
3aa551c9 | 1239 | |
8707898e TP |
1240 | irq_thread_set_ready(desc, action); |
1241 | ||
e739f98b TG |
1242 | sched_set_fifo(current); |
1243 | ||
91cc470e TL |
1244 | if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD, |
1245 | &action->thread_flags)) | |
8d32a307 TG |
1246 | handler_fn = irq_forced_thread_fn; |
1247 | else | |
1248 | handler_fn = irq_thread_fn; | |
1249 | ||
41f9d29f | 1250 | init_task_work(&on_exit_work, irq_thread_dtor); |
91989c70 | 1251 | task_work_add(current, &on_exit_work, TWA_NONE); |
3aa551c9 | 1252 | |
c99303a2 | 1253 | while (!irq_wait_for_interrupt(desc, action)) { |
7140ea19 | 1254 | irqreturn_t action_ret; |
3aa551c9 | 1255 | |
7140ea19 | 1256 | action_ret = handler_fn(desc, action); |
2a1d3ab8 TG |
1257 | if (action_ret == IRQ_WAKE_THREAD) |
1258 | irq_wake_secondary(desc, action); | |
3aa551c9 | 1259 | |
7140ea19 | 1260 | wake_threads_waitq(desc); |
3aa551c9 TG |
1261 | } |
1262 | ||
7140ea19 IY |
1263 | /* |
1264 | * This is the regular exit path. __free_irq() is stopping the | |
1265 | * thread via kthread_stop() after calling | |
519cc865 | 1266 | * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the |
836557bd | 1267 | * oneshot mask bit can be set. |
3aa551c9 | 1268 | */ |
68cbd415 | 1269 | task_work_cancel_func(current, irq_thread_dtor); |
3aa551c9 TG |
1270 | return 0; |
1271 | } | |
1272 | ||
a92444c6 | 1273 | /** |
0c169edf TG |
1274 | * irq_wake_thread - wake the irq thread for the action identified by dev_id |
1275 | * @irq: Interrupt line | |
1276 | * @dev_id: Device identity for which the thread should be woken | |
a92444c6 TG |
1277 | */ |
1278 | void irq_wake_thread(unsigned int irq, void *dev_id) | |
1279 | { | |
1280 | struct irq_desc *desc = irq_to_desc(irq); | |
1281 | struct irqaction *action; | |
a92444c6 TG |
1282 | |
1283 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
1284 | return; | |
1285 | ||
17c19535 | 1286 | guard(raw_spinlock_irqsave)(&desc->lock); |
f944b5a7 | 1287 | for_each_action_of_desc(desc, action) { |
a92444c6 TG |
1288 | if (action->dev_id == dev_id) { |
1289 | if (action->thread) | |
1290 | __irq_wake_thread(desc, action); | |
1291 | break; | |
1292 | } | |
1293 | } | |
a92444c6 TG |
1294 | } |
1295 | EXPORT_SYMBOL_GPL(irq_wake_thread); | |
1296 | ||
2a1d3ab8 | 1297 | static int irq_setup_forced_threading(struct irqaction *new) |
8d32a307 | 1298 | { |
91cc470e | 1299 | if (!force_irqthreads()) |
2a1d3ab8 | 1300 | return 0; |
8d32a307 | 1301 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
2a1d3ab8 | 1302 | return 0; |
8d32a307 | 1303 | |
d1f0301b TG |
1304 | /* |
1305 | * No further action required for interrupts which are requested as | |
1306 | * threaded interrupts already | |
1307 | */ | |
1308 | if (new->handler == irq_default_primary_handler) | |
1309 | return 0; | |
1310 | ||
8d32a307 TG |
1311 | new->flags |= IRQF_ONESHOT; |
1312 | ||
2a1d3ab8 TG |
1313 | /* |
1314 | * Handle the case where we have a real primary handler and a | |
1315 | * thread handler. We force thread them as well by creating a | |
1316 | * secondary action. | |
1317 | */ | |
d1f0301b | 1318 | if (new->handler && new->thread_fn) { |
2a1d3ab8 TG |
1319 | /* Allocate the secondary action */ |
1320 | new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
1321 | if (!new->secondary) | |
1322 | return -ENOMEM; | |
1323 | new->secondary->handler = irq_forced_secondary_handler; | |
1324 | new->secondary->thread_fn = new->thread_fn; | |
1325 | new->secondary->dev_id = new->dev_id; | |
1326 | new->secondary->irq = new->irq; | |
1327 | new->secondary->name = new->name; | |
8d32a307 | 1328 | } |
2a1d3ab8 TG |
1329 | /* Deal with the primary handler */ |
1330 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | |
1331 | new->thread_fn = new->handler; | |
1332 | new->handler = irq_default_primary_handler; | |
1333 | return 0; | |
8d32a307 TG |
1334 | } |
1335 | ||
c1bacbae TG |
1336 | static int irq_request_resources(struct irq_desc *desc) |
1337 | { | |
1338 | struct irq_data *d = &desc->irq_data; | |
1339 | struct irq_chip *c = d->chip; | |
1340 | ||
1341 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | |
1342 | } | |
1343 | ||
1344 | static void irq_release_resources(struct irq_desc *desc) | |
1345 | { | |
1346 | struct irq_data *d = &desc->irq_data; | |
1347 | struct irq_chip *c = d->chip; | |
1348 | ||
1349 | if (c->irq_release_resources) | |
1350 | c->irq_release_resources(d); | |
1351 | } | |
1352 | ||
b525903c JT |
1353 | static bool irq_supports_nmi(struct irq_desc *desc) |
1354 | { | |
1355 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
1356 | ||
1357 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
1358 | /* Only IRQs directly managed by the root irqchip can be set as NMI */ | |
1359 | if (d->parent_data) | |
1360 | return false; | |
1361 | #endif | |
1362 | /* Don't support NMIs for chips behind a slow bus */ | |
1363 | if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) | |
1364 | return false; | |
1365 | ||
1366 | return d->chip->flags & IRQCHIP_SUPPORTS_NMI; | |
1367 | } | |
1368 | ||
1369 | static int irq_nmi_setup(struct irq_desc *desc) | |
1370 | { | |
1371 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
1372 | struct irq_chip *c = d->chip; | |
1373 | ||
1374 | return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; | |
1375 | } | |
1376 | ||
1377 | static void irq_nmi_teardown(struct irq_desc *desc) | |
1378 | { | |
1379 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
1380 | struct irq_chip *c = d->chip; | |
1381 | ||
1382 | if (c->irq_nmi_teardown) | |
1383 | c->irq_nmi_teardown(d); | |
1384 | } | |
1385 | ||
2a1d3ab8 TG |
1386 | static int |
1387 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | |
1388 | { | |
1389 | struct task_struct *t; | |
2a1d3ab8 TG |
1390 | |
1391 | if (!secondary) { | |
1392 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
1393 | new->name); | |
1394 | } else { | |
1395 | t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, | |
1396 | new->name); | |
2a1d3ab8 TG |
1397 | } |
1398 | ||
1399 | if (IS_ERR(t)) | |
1400 | return PTR_ERR(t); | |
1401 | ||
2a1d3ab8 TG |
1402 | /* |
1403 | * We keep the reference to the task struct even if | |
1404 | * the thread dies to avoid that the interrupt code | |
1405 | * references an already freed task_struct. | |
1406 | */ | |
7b3c92b8 | 1407 | new->thread = get_task_struct(t); |
2a1d3ab8 TG |
1408 | /* |
1409 | * Tell the thread to set its affinity. This is | |
1410 | * important for shared interrupt handlers as we do | |
1411 | * not invoke setup_affinity() for the secondary | |
1412 | * handlers as everything is already set up. Even for | |
1413 | * interrupts marked with IRQF_NO_BALANCE this is | |
1414 | * correct as we want the thread to move to the cpu(s) | |
1415 | * on which the requesting code placed the interrupt. | |
1416 | */ | |
1417 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | |
1418 | return 0; | |
1419 | } | |
1420 | ||
1da177e4 LT |
1421 | /* |
1422 | * Internal function to register an irqaction - typically used to | |
1423 | * allocate special interrupts that are part of the architecture. | |
19d39a38 TG |
1424 | * |
1425 | * Locking rules: | |
1426 | * | |
1427 | * desc->request_mutex Provides serialization against a concurrent free_irq() | |
1428 | * chip_bus_lock Provides serialization for slow bus operations | |
1429 | * desc->lock Provides serialization against hard interrupts | |
1430 | * | |
1431 | * chip_bus_lock and desc->lock are sufficient for all other management and | |
1432 | * interrupt related functions. desc->request_mutex solely serializes | |
1433 | * request/free_irq(). | |
1da177e4 | 1434 | */ |
d3c60047 | 1435 | static int |
327ec569 | 1436 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 1437 | { |
f17c7545 | 1438 | struct irqaction *old, **old_ptr; |
b5faba21 | 1439 | unsigned long flags, thread_mask = 0; |
3b8249e7 | 1440 | int ret, nested, shared = 0; |
1da177e4 | 1441 | |
7d94f7ca | 1442 | if (!desc) |
c2b5a251 MW |
1443 | return -EINVAL; |
1444 | ||
6b8ff312 | 1445 | if (desc->irq_data.chip == &no_irq_chip) |
1da177e4 | 1446 | return -ENOSYS; |
b6873807 SAS |
1447 | if (!try_module_get(desc->owner)) |
1448 | return -ENODEV; | |
1da177e4 | 1449 | |
2a1d3ab8 TG |
1450 | new->irq = irq; |
1451 | ||
4b357dae JH |
1452 | /* |
1453 | * If the trigger type is not specified by the caller, | |
1454 | * then use the default for this interrupt. | |
1455 | */ | |
1456 | if (!(new->flags & IRQF_TRIGGER_MASK)) | |
1457 | new->flags |= irqd_get_trigger_type(&desc->irq_data); | |
1458 | ||
3aa551c9 | 1459 | /* |
399b5da2 TG |
1460 | * Check whether the interrupt nests into another interrupt |
1461 | * thread. | |
1462 | */ | |
1ccb4e61 | 1463 | nested = irq_settings_is_nested_thread(desc); |
399b5da2 | 1464 | if (nested) { |
b6873807 SAS |
1465 | if (!new->thread_fn) { |
1466 | ret = -EINVAL; | |
1467 | goto out_mput; | |
1468 | } | |
399b5da2 TG |
1469 | /* |
1470 | * Replace the primary handler which was provided from | |
1471 | * the driver for non nested interrupt handling by the | |
1472 | * dummy function which warns when called. | |
1473 | */ | |
1474 | new->handler = irq_nested_primary_handler; | |
8d32a307 | 1475 | } else { |
2a1d3ab8 TG |
1476 | if (irq_settings_can_thread(desc)) { |
1477 | ret = irq_setup_forced_threading(new); | |
1478 | if (ret) | |
1479 | goto out_mput; | |
1480 | } | |
399b5da2 TG |
1481 | } |
1482 | ||
3aa551c9 | 1483 | /* |
399b5da2 TG |
1484 | * Create a handler thread when a thread function is supplied |
1485 | * and the interrupt does not nest into another interrupt | |
1486 | * thread. | |
3aa551c9 | 1487 | */ |
399b5da2 | 1488 | if (new->thread_fn && !nested) { |
2a1d3ab8 TG |
1489 | ret = setup_irq_thread(new, irq, false); |
1490 | if (ret) | |
b6873807 | 1491 | goto out_mput; |
2a1d3ab8 TG |
1492 | if (new->secondary) { |
1493 | ret = setup_irq_thread(new->secondary, irq, true); | |
1494 | if (ret) | |
1495 | goto out_thread; | |
b6873807 | 1496 | } |
3aa551c9 TG |
1497 | } |
1498 | ||
dc9b229a TG |
1499 | /* |
1500 | * Drivers are often written to work w/o knowledge about the | |
1501 | * underlying irq chip implementation, so a request for a | |
1502 | * threaded irq without a primary hard irq context handler | |
1503 | * requires the ONESHOT flag to be set. Some irq chips like | |
1504 | * MSI based interrupts are per se one shot safe. Check the | |
1505 | * chip flags, so we can avoid the unmask dance at the end of | |
1506 | * the threaded handler for those. | |
1507 | */ | |
1508 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) | |
1509 | new->flags &= ~IRQF_ONESHOT; | |
1510 | ||
19d39a38 TG |
1511 | /* |
1512 | * Protects against a concurrent __free_irq() call which might wait | |
519cc865 | 1513 | * for synchronize_hardirq() to complete without holding the optional |
836557bd LW |
1514 | * chip bus lock and desc->lock. Also protects against handing out |
1515 | * a recycled oneshot thread_mask bit while it's still in use by | |
1516 | * its previous owner. | |
19d39a38 | 1517 | */ |
9114014c | 1518 | mutex_lock(&desc->request_mutex); |
19d39a38 TG |
1519 | |
1520 | /* | |
1521 | * Acquire bus lock as the irq_request_resources() callback below | |
1522 | * might rely on the serialization or the magic power management | |
1523 | * functions which are abusing the irq_bus_lock() callback, | |
1524 | */ | |
1525 | chip_bus_lock(desc); | |
1526 | ||
1527 | /* First installed action requests resources. */ | |
46e48e25 TG |
1528 | if (!desc->action) { |
1529 | ret = irq_request_resources(desc); | |
1530 | if (ret) { | |
1531 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | |
1532 | new->name, irq, desc->irq_data.chip->name); | |
19d39a38 | 1533 | goto out_bus_unlock; |
46e48e25 TG |
1534 | } |
1535 | } | |
9114014c | 1536 | |
1da177e4 LT |
1537 | /* |
1538 | * The following block of code has to be executed atomically | |
19d39a38 TG |
1539 | * protected against a concurrent interrupt and any of the other |
1540 | * management calls which are not serialized via | |
1541 | * desc->request_mutex or the optional bus lock. | |
1da177e4 | 1542 | */ |
239007b8 | 1543 | raw_spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
1544 | old_ptr = &desc->action; |
1545 | old = *old_ptr; | |
06fcb0c6 | 1546 | if (old) { |
e76de9f8 TG |
1547 | /* |
1548 | * Can't share interrupts unless both agree to and are | |
1549 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 1550 | * fields must have IRQF_SHARED set and the bits which |
9d591edd TG |
1551 | * set the trigger type must match. Also all must |
1552 | * agree on ONESHOT. | |
b525903c | 1553 | * Interrupt lines used for NMIs cannot be shared. |
e76de9f8 | 1554 | */ |
4f8413a3 MZ |
1555 | unsigned int oldtype; |
1556 | ||
6678ae19 | 1557 | if (irq_is_nmi(desc)) { |
b525903c JT |
1558 | pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", |
1559 | new->name, irq, desc->irq_data.chip->name); | |
1560 | ret = -EINVAL; | |
1561 | goto out_unlock; | |
1562 | } | |
1563 | ||
4f8413a3 MZ |
1564 | /* |
1565 | * If nobody did set the configuration before, inherit | |
1566 | * the one provided by the requester. | |
1567 | */ | |
1568 | if (irqd_trigger_type_was_set(&desc->irq_data)) { | |
1569 | oldtype = irqd_get_trigger_type(&desc->irq_data); | |
1570 | } else { | |
1571 | oldtype = new->flags & IRQF_TRIGGER_MASK; | |
1572 | irqd_set_trigger_type(&desc->irq_data, oldtype); | |
1573 | } | |
382bd4de | 1574 | |
3cca53b0 | 1575 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
c2ddeb29 RW |
1576 | (oldtype != (new->flags & IRQF_TRIGGER_MASK))) |
1577 | goto mismatch; | |
1578 | ||
1579 | if ((old->flags & IRQF_ONESHOT) && | |
1580 | (new->flags & IRQF_COND_ONESHOT)) | |
1581 | new->flags |= IRQF_ONESHOT; | |
1582 | else if ((old->flags ^ new->flags) & IRQF_ONESHOT) | |
f5163427 DS |
1583 | goto mismatch; |
1584 | ||
f5163427 | 1585 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
1586 | if ((old->flags & IRQF_PERCPU) != |
1587 | (new->flags & IRQF_PERCPU)) | |
f5163427 | 1588 | goto mismatch; |
1da177e4 LT |
1589 | |
1590 | /* add new interrupt at end of irq queue */ | |
1591 | do { | |
52abb700 TG |
1592 | /* |
1593 | * Or all existing action->thread_mask bits, | |
1594 | * so we can find the next zero bit for this | |
1595 | * new action. | |
1596 | */ | |
b5faba21 | 1597 | thread_mask |= old->thread_mask; |
f17c7545 IM |
1598 | old_ptr = &old->next; |
1599 | old = *old_ptr; | |
1da177e4 LT |
1600 | } while (old); |
1601 | shared = 1; | |
1602 | } | |
1603 | ||
b5faba21 | 1604 | /* |
52abb700 TG |
1605 | * Setup the thread mask for this irqaction for ONESHOT. For |
1606 | * !ONESHOT irqs the thread mask is 0 so we can avoid a | |
1607 | * conditional in irq_wake_thread(). | |
b5faba21 | 1608 | */ |
52abb700 TG |
1609 | if (new->flags & IRQF_ONESHOT) { |
1610 | /* | |
1611 | * Unlikely to have 32 resp 64 irqs sharing one line, | |
1612 | * but who knows. | |
1613 | */ | |
1614 | if (thread_mask == ~0UL) { | |
1615 | ret = -EBUSY; | |
cba4235e | 1616 | goto out_unlock; |
52abb700 TG |
1617 | } |
1618 | /* | |
1619 | * The thread_mask for the action is or'ed to | |
1620 | * desc->thread_active to indicate that the | |
1621 | * IRQF_ONESHOT thread handler has been woken, but not | |
1622 | * yet finished. The bit is cleared when a thread | |
1623 | * completes. When all threads of a shared interrupt | |
1624 | * line have completed desc->threads_active becomes | |
1625 | * zero and the interrupt line is unmasked. See | |
1626 | * handle.c:irq_wake_thread() for further information. | |
1627 | * | |
1628 | * If no thread is woken by primary (hard irq context) | |
1629 | * interrupt handlers, then desc->threads_active is | |
1630 | * also checked for zero to unmask the irq line in the | |
1631 | * affected hard irq flow handlers | |
1632 | * (handle_[fasteoi|level]_irq). | |
1633 | * | |
1634 | * The new action gets the first zero bit of | |
1635 | * thread_mask assigned. See the loop above which or's | |
1636 | * all existing action->thread_mask bits. | |
1637 | */ | |
ffc661c9 | 1638 | new->thread_mask = 1UL << ffz(thread_mask); |
1c6c6952 | 1639 | |
dc9b229a TG |
1640 | } else if (new->handler == irq_default_primary_handler && |
1641 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { | |
1c6c6952 TG |
1642 | /* |
1643 | * The interrupt was requested with handler = NULL, so | |
1644 | * we use the default primary handler for it. But it | |
1645 | * does not have the oneshot flag set. In combination | |
1646 | * with level interrupts this is deadly, because the | |
1647 | * default primary handler just wakes the thread, then | |
1648 | * the irq lines is reenabled, but the device still | |
1649 | * has the level irq asserted. Rinse and repeat.... | |
1650 | * | |
1651 | * While this works for edge type interrupts, we play | |
1652 | * it safe and reject unconditionally because we can't | |
1653 | * say for sure which type this interrupt really | |
1654 | * has. The type flags are unreliable as the | |
1655 | * underlying chip implementation can override them. | |
1656 | */ | |
025af39b LC |
1657 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", |
1658 | new->name, irq); | |
1c6c6952 | 1659 | ret = -EINVAL; |
cba4235e | 1660 | goto out_unlock; |
b5faba21 | 1661 | } |
b5faba21 | 1662 | |
1da177e4 | 1663 | if (!shared) { |
e76de9f8 | 1664 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 1665 | if (new->flags & IRQF_TRIGGER_MASK) { |
a1ff541a JL |
1666 | ret = __irq_set_trigger(desc, |
1667 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 1668 | |
19d39a38 | 1669 | if (ret) |
cba4235e | 1670 | goto out_unlock; |
091738a2 | 1671 | } |
6a6de9ef | 1672 | |
c942cee4 TG |
1673 | /* |
1674 | * Activate the interrupt. That activation must happen | |
1675 | * independently of IRQ_NOAUTOEN. request_irq() can fail | |
1676 | * and the callers are supposed to handle | |
1677 | * that. enable_irq() of an interrupt requested with | |
1678 | * IRQ_NOAUTOEN is not supposed to fail. The activation | |
1679 | * keeps it in shutdown mode, it merily associates | |
1680 | * resources if necessary and if that's not possible it | |
1681 | * fails. Interrupts which are in managed shutdown mode | |
1682 | * will simply ignore that activation request. | |
1683 | */ | |
1684 | ret = irq_activate(desc); | |
1685 | if (ret) | |
1686 | goto out_unlock; | |
1687 | ||
009b4c3b | 1688 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
32f4125e TG |
1689 | IRQS_ONESHOT | IRQS_WAITING); |
1690 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
94d39e1f | 1691 | |
a005677b TG |
1692 | if (new->flags & IRQF_PERCPU) { |
1693 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
1694 | irq_settings_set_per_cpu(desc); | |
c2b1063e TG |
1695 | if (new->flags & IRQF_NO_DEBUG) |
1696 | irq_settings_set_no_debug(desc); | |
a005677b | 1697 | } |
6a58fb3b | 1698 | |
c2b1063e TG |
1699 | if (noirqdebug) |
1700 | irq_settings_set_no_debug(desc); | |
1701 | ||
b25c340c | 1702 | if (new->flags & IRQF_ONESHOT) |
3d67baec | 1703 | desc->istate |= IRQS_ONESHOT; |
b25c340c | 1704 | |
2e051552 TG |
1705 | /* Exclude IRQ from balancing if requested */ |
1706 | if (new->flags & IRQF_NOBALANCING) { | |
1707 | irq_settings_set_no_balancing(desc); | |
1708 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
1709 | } | |
1710 | ||
cbe16f35 BS |
1711 | if (!(new->flags & IRQF_NO_AUTOEN) && |
1712 | irq_settings_can_autoenable(desc)) { | |
4cde9c6b | 1713 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
04c848d3 TG |
1714 | } else { |
1715 | /* | |
1716 | * Shared interrupts do not go well with disabling | |
1717 | * auto enable. The sharing interrupt might request | |
1718 | * it while it's still disabled and then wait for | |
1719 | * interrupts forever. | |
1720 | */ | |
1721 | WARN_ON_ONCE(new->flags & IRQF_SHARED); | |
e76de9f8 TG |
1722 | /* Undo nested disables: */ |
1723 | desc->depth = 1; | |
04c848d3 | 1724 | } |
18404756 | 1725 | |
876dbd4c TG |
1726 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1727 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | |
7ee7e87d | 1728 | unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); |
876dbd4c TG |
1729 | |
1730 | if (nmsk != omsk) | |
1731 | /* hope the handler works with current trigger mode */ | |
a395d6a7 | 1732 | pr_warn("irq %d uses trigger mode %u; requested %u\n", |
7ee7e87d | 1733 | irq, omsk, nmsk); |
1da177e4 | 1734 | } |
82736f4d | 1735 | |
f17c7545 | 1736 | *old_ptr = new; |
82736f4d | 1737 | |
cab303be TG |
1738 | irq_pm_install_action(desc, new); |
1739 | ||
8528b0f1 LT |
1740 | /* Reset broken irq detection when installing new handler */ |
1741 | desc->irq_count = 0; | |
1742 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
1743 | |
1744 | /* | |
1745 | * Check whether we disabled the irq via the spurious handler | |
1746 | * before. Reenable it and give it another chance. | |
1747 | */ | |
7acdd53e TG |
1748 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1749 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | |
79ff1cda | 1750 | __enable_irq(desc); |
1adb0850 TG |
1751 | } |
1752 | ||
239007b8 | 1753 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3a90795e | 1754 | chip_bus_sync_unlock(desc); |
9114014c | 1755 | mutex_unlock(&desc->request_mutex); |
1da177e4 | 1756 | |
b2d3d61a DL |
1757 | irq_setup_timings(desc, new); |
1758 | ||
8707898e TP |
1759 | wake_up_and_wait_for_irq_thread_ready(desc, new); |
1760 | wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); | |
69ab8494 | 1761 | |
2c6927a3 | 1762 | register_irq_proc(irq, desc); |
1da177e4 LT |
1763 | new->dir = NULL; |
1764 | register_handler_proc(irq, new); | |
1da177e4 | 1765 | return 0; |
f5163427 DS |
1766 | |
1767 | mismatch: | |
3cca53b0 | 1768 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
97fd75b7 | 1769 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
f5d89470 TG |
1770 | irq, new->flags, new->name, old->flags, old->name); |
1771 | #ifdef CONFIG_DEBUG_SHIRQ | |
13e87ec6 | 1772 | dump_stack(); |
3f050447 | 1773 | #endif |
f5d89470 | 1774 | } |
3aa551c9 TG |
1775 | ret = -EBUSY; |
1776 | ||
cba4235e | 1777 | out_unlock: |
1c389795 | 1778 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3b8249e7 | 1779 | |
46e48e25 TG |
1780 | if (!desc->action) |
1781 | irq_release_resources(desc); | |
19d39a38 TG |
1782 | out_bus_unlock: |
1783 | chip_bus_sync_unlock(desc); | |
9114014c TG |
1784 | mutex_unlock(&desc->request_mutex); |
1785 | ||
3aa551c9 | 1786 | out_thread: |
3aa551c9 TG |
1787 | if (new->thread) { |
1788 | struct task_struct *t = new->thread; | |
1789 | ||
1790 | new->thread = NULL; | |
6309727e | 1791 | kthread_stop_put(t); |
3aa551c9 | 1792 | } |
2a1d3ab8 TG |
1793 | if (new->secondary && new->secondary->thread) { |
1794 | struct task_struct *t = new->secondary->thread; | |
1795 | ||
1796 | new->secondary->thread = NULL; | |
6309727e | 1797 | kthread_stop_put(t); |
2a1d3ab8 | 1798 | } |
b6873807 SAS |
1799 | out_mput: |
1800 | module_put(desc->owner); | |
3aa551c9 | 1801 | return ret; |
1da177e4 LT |
1802 | } |
1803 | ||
31d9d9b6 | 1804 | /* |
cbf94f06 MD |
1805 | * Internal function to unregister an irqaction - used to free |
1806 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 1807 | */ |
83ac4ca9 | 1808 | static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) |
1da177e4 | 1809 | { |
83ac4ca9 | 1810 | unsigned irq = desc->irq_data.irq; |
f17c7545 | 1811 | struct irqaction *action, **action_ptr; |
1da177e4 LT |
1812 | unsigned long flags; |
1813 | ||
ae88a23b | 1814 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 1815 | |
9114014c | 1816 | mutex_lock(&desc->request_mutex); |
abc7e40c | 1817 | chip_bus_lock(desc); |
239007b8 | 1818 | raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
1819 | |
1820 | /* | |
1821 | * There can be multiple actions per IRQ descriptor, find the right | |
1822 | * one based on the dev_id: | |
1823 | */ | |
f17c7545 | 1824 | action_ptr = &desc->action; |
1da177e4 | 1825 | for (;;) { |
f17c7545 | 1826 | action = *action_ptr; |
1da177e4 | 1827 | |
ae88a23b IM |
1828 | if (!action) { |
1829 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
239007b8 | 1830 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
abc7e40c | 1831 | chip_bus_sync_unlock(desc); |
19d39a38 | 1832 | mutex_unlock(&desc->request_mutex); |
f21cfb25 | 1833 | return NULL; |
ae88a23b | 1834 | } |
1da177e4 | 1835 | |
8316e381 IM |
1836 | if (action->dev_id == dev_id) |
1837 | break; | |
f17c7545 | 1838 | action_ptr = &action->next; |
ae88a23b | 1839 | } |
dbce706e | 1840 | |
ae88a23b | 1841 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 1842 | *action_ptr = action->next; |
ae88a23b | 1843 | |
cab303be TG |
1844 | irq_pm_remove_action(desc, action); |
1845 | ||
ae88a23b | 1846 | /* If this was the last handler, shut down the IRQ line: */ |
c1bacbae | 1847 | if (!desc->action) { |
e9849777 | 1848 | irq_settings_clr_disable_unlazy(desc); |
4001d8e8 | 1849 | /* Only shutdown. Deactivate after synchronize_hardirq() */ |
46999238 | 1850 | irq_shutdown(desc); |
c1bacbae | 1851 | } |
3aa551c9 | 1852 | |
e7a297b0 PWJ |
1853 | #ifdef CONFIG_SMP |
1854 | /* make sure affinity_hint is cleaned up */ | |
1855 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
1856 | desc->affinity_hint = NULL; | |
1857 | #endif | |
1858 | ||
239007b8 | 1859 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
19d39a38 TG |
1860 | /* |
1861 | * Drop bus_lock here so the changes which were done in the chip | |
1862 | * callbacks above are synced out to the irq chips which hang | |
519cc865 | 1863 | * behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). |
19d39a38 TG |
1864 | * |
1865 | * Aside of that the bus_lock can also be taken from the threaded | |
1866 | * handler in irq_finalize_oneshot() which results in a deadlock | |
519cc865 | 1867 | * because kthread_stop() would wait forever for the thread to |
19d39a38 TG |
1868 | * complete, which is blocked on the bus lock. |
1869 | * | |
1870 | * The still held desc->request_mutex() protects against a | |
1871 | * concurrent request_irq() of this irq so the release of resources | |
1872 | * and timing data is properly serialized. | |
1873 | */ | |
abc7e40c | 1874 | chip_bus_sync_unlock(desc); |
ae88a23b IM |
1875 | |
1876 | unregister_handler_proc(irq, action); | |
1877 | ||
62e04686 TG |
1878 | /* |
1879 | * Make sure it's not being used on another CPU and if the chip | |
1880 | * supports it also make sure that there is no (not yet serviced) | |
1881 | * interrupt in flight at the hardware level. | |
1882 | */ | |
e2c12739 | 1883 | __synchronize_irq(desc); |
1da177e4 | 1884 | |
70edcd77 | 1885 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
1886 | /* |
1887 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
1888 | * event to happen even now it's being freed, so let's make sure that | |
1889 | * is so by doing an extra call to the handler .... | |
1890 | * | |
1891 | * ( We do this after actually deregistering it, to make sure that a | |
0a13ec0b | 1892 | * 'real' IRQ doesn't run in parallel with our fake. ) |
ae88a23b IM |
1893 | */ |
1894 | if (action->flags & IRQF_SHARED) { | |
1895 | local_irq_save(flags); | |
1896 | action->handler(irq, dev_id); | |
1897 | local_irq_restore(flags); | |
1da177e4 | 1898 | } |
ae88a23b | 1899 | #endif |
2d860ad7 | 1900 | |
519cc865 LW |
1901 | /* |
1902 | * The action has already been removed above, but the thread writes | |
1903 | * its oneshot mask bit when it completes. Though request_mutex is | |
1904 | * held across this which prevents __setup_irq() from handing out | |
1905 | * the same bit to a newly requested action. | |
1906 | */ | |
2d860ad7 | 1907 | if (action->thread) { |
6309727e AG |
1908 | kthread_stop_put(action->thread); |
1909 | if (action->secondary && action->secondary->thread) | |
1910 | kthread_stop_put(action->secondary->thread); | |
2d860ad7 LT |
1911 | } |
1912 | ||
19d39a38 | 1913 | /* Last action releases resources */ |
2343877f | 1914 | if (!desc->action) { |
19d39a38 | 1915 | /* |
a359f757 | 1916 | * Reacquire bus lock as irq_release_resources() might |
19d39a38 TG |
1917 | * require it to deallocate resources over the slow bus. |
1918 | */ | |
1919 | chip_bus_lock(desc); | |
4001d8e8 TG |
1920 | /* |
1921 | * There is no interrupt on the fly anymore. Deactivate it | |
1922 | * completely. | |
1923 | */ | |
17c19535 TG |
1924 | scoped_guard(raw_spinlock_irqsave, &desc->lock) |
1925 | irq_domain_deactivate_irq(&desc->irq_data); | |
4001d8e8 | 1926 | |
46e48e25 | 1927 | irq_release_resources(desc); |
19d39a38 | 1928 | chip_bus_sync_unlock(desc); |
2343877f TG |
1929 | irq_remove_timings(desc); |
1930 | } | |
46e48e25 | 1931 | |
9114014c TG |
1932 | mutex_unlock(&desc->request_mutex); |
1933 | ||
be45beb2 | 1934 | irq_chip_pm_put(&desc->irq_data); |
b6873807 | 1935 | module_put(desc->owner); |
2a1d3ab8 | 1936 | kfree(action->secondary); |
f21cfb25 MD |
1937 | return action; |
1938 | } | |
1939 | ||
1940 | /** | |
0c169edf TG |
1941 | * free_irq - free an interrupt allocated with request_irq |
1942 | * @irq: Interrupt line to free | |
1943 | * @dev_id: Device identity to free | |
f21cfb25 | 1944 | * |
0c169edf TG |
1945 | * Remove an interrupt handler. The handler is removed and if the interrupt |
1946 | * line is no longer in use by any driver it is disabled. On a shared IRQ | |
1947 | * the caller must ensure the interrupt is disabled on the card it drives | |
1948 | * before calling this function. The function does not return until any | |
1949 | * executing interrupts for this IRQ have completed. | |
f21cfb25 | 1950 | * |
0c169edf | 1951 | * This function must not be called from interrupt context. |
25ce4be7 | 1952 | * |
0c169edf | 1953 | * Returns the devname argument passed to request_irq. |
f21cfb25 | 1954 | */ |
25ce4be7 | 1955 | const void *free_irq(unsigned int irq, void *dev_id) |
f21cfb25 | 1956 | { |
70aedd24 | 1957 | struct irq_desc *desc = irq_to_desc(irq); |
25ce4be7 CH |
1958 | struct irqaction *action; |
1959 | const char *devname; | |
70aedd24 | 1960 | |
31d9d9b6 | 1961 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
25ce4be7 | 1962 | return NULL; |
70aedd24 | 1963 | |
cd7eab44 BH |
1964 | #ifdef CONFIG_SMP |
1965 | if (WARN_ON(desc->affinity_notify)) | |
1966 | desc->affinity_notify = NULL; | |
1967 | #endif | |
1968 | ||
83ac4ca9 | 1969 | action = __free_irq(desc, dev_id); |
2827a418 AM |
1970 | |
1971 | if (!action) | |
1972 | return NULL; | |
1973 | ||
25ce4be7 CH |
1974 | devname = action->name; |
1975 | kfree(action); | |
1976 | return devname; | |
1da177e4 | 1977 | } |
1da177e4 LT |
1978 | EXPORT_SYMBOL(free_irq); |
1979 | ||
b525903c JT |
1980 | /* This function must be called with desc->lock held */ |
1981 | static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) | |
1982 | { | |
1983 | const char *devname = NULL; | |
1984 | ||
1985 | desc->istate &= ~IRQS_NMI; | |
1986 | ||
1987 | if (!WARN_ON(desc->action == NULL)) { | |
1988 | irq_pm_remove_action(desc, desc->action); | |
1989 | devname = desc->action->name; | |
1990 | unregister_handler_proc(irq, desc->action); | |
1991 | ||
1992 | kfree(desc->action); | |
1993 | desc->action = NULL; | |
1994 | } | |
1995 | ||
1996 | irq_settings_clr_disable_unlazy(desc); | |
4001d8e8 | 1997 | irq_shutdown_and_deactivate(desc); |
b525903c JT |
1998 | |
1999 | irq_release_resources(desc); | |
2000 | ||
2001 | irq_chip_pm_put(&desc->irq_data); | |
2002 | module_put(desc->owner); | |
2003 | ||
2004 | return devname; | |
2005 | } | |
2006 | ||
2007 | const void *free_nmi(unsigned int irq, void *dev_id) | |
2008 | { | |
2009 | struct irq_desc *desc = irq_to_desc(irq); | |
b525903c | 2010 | |
6678ae19 | 2011 | if (!desc || WARN_ON(!irq_is_nmi(desc))) |
b525903c JT |
2012 | return NULL; |
2013 | ||
2014 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
2015 | return NULL; | |
2016 | ||
2017 | /* NMI still enabled */ | |
2018 | if (WARN_ON(desc->depth == 0)) | |
2019 | disable_nmi_nosync(irq); | |
2020 | ||
17c19535 | 2021 | guard(raw_spinlock_irqsave)(&desc->lock); |
b525903c | 2022 | irq_nmi_teardown(desc); |
17c19535 | 2023 | return __cleanup_nmi(irq, desc); |
b525903c JT |
2024 | } |
2025 | ||
1da177e4 | 2026 | /** |
0c169edf TG |
2027 | * request_threaded_irq - allocate an interrupt line |
2028 | * @irq: Interrupt line to allocate | |
2029 | * @handler: Function to be called when the IRQ occurs. | |
2030 | * Primary handler for threaded interrupts. | |
2031 | * If handler is NULL and thread_fn != NULL | |
2032 | * the default primary handler is installed. | |
2033 | * @thread_fn: Function called from the irq handler thread | |
2034 | * If NULL, no irq thread is created | |
2035 | * @irqflags: Interrupt type flags | |
2036 | * @devname: An ascii name for the claiming device | |
2037 | * @dev_id: A cookie passed back to the handler function | |
2038 | * | |
2039 | * This call allocates interrupt resources and enables the interrupt line | |
2040 | * and IRQ handling. From the point this call is made your handler function | |
2041 | * may be invoked. Since your handler function must clear any interrupt the | |
2042 | * board raises, you must take care both to initialise your hardware and to | |
2043 | * set up the interrupt handler in the right order. | |
2044 | * | |
2045 | * If you want to set up a threaded irq handler for your device then you | |
2046 | * need to supply @handler and @thread_fn. @handler is still called in hard | |
2047 | * interrupt context and has to check whether the interrupt originates from | |
2048 | * the device. If yes it needs to disable the interrupt on the device and | |
2049 | * return IRQ_WAKE_THREAD which will wake up the handler thread and run | |
2050 | * @thread_fn. This split handler design is necessary to support shared | |
2051 | * interrupts. | |
2052 | * | |
2053 | * @dev_id must be globally unique. Normally the address of the device data | |
2054 | * structure is used as the cookie. Since the handler receives this value | |
2055 | * it makes sense to use it. | |
2056 | * | |
2057 | * If your interrupt is shared you must pass a non NULL dev_id as this is | |
2058 | * required when freeing the interrupt. | |
2059 | * | |
2060 | * Flags: | |
1da177e4 | 2061 | * |
3cca53b0 | 2062 | * IRQF_SHARED Interrupt is shared |
0c5d1eb7 | 2063 | * IRQF_TRIGGER_* Specify active edge(s) or level |
04c2721d | 2064 | * IRQF_ONESHOT Run thread_fn with interrupt line masked |
1da177e4 | 2065 | */ |
3aa551c9 TG |
2066 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
2067 | irq_handler_t thread_fn, unsigned long irqflags, | |
2068 | const char *devname, void *dev_id) | |
1da177e4 | 2069 | { |
06fcb0c6 | 2070 | struct irqaction *action; |
08678b08 | 2071 | struct irq_desc *desc; |
d3c60047 | 2072 | int retval; |
1da177e4 | 2073 | |
e237a551 CF |
2074 | if (irq == IRQ_NOTCONNECTED) |
2075 | return -ENOTCONN; | |
2076 | ||
1da177e4 LT |
2077 | /* |
2078 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
2079 | * otherwise we'll have trouble later trying to figure out | |
2080 | * which interrupt is which (messes up the interrupt freeing | |
2081 | * logic etc). | |
17f48034 | 2082 | * |
cbe16f35 BS |
2083 | * Also shared interrupts do not go well with disabling auto enable. |
2084 | * The sharing interrupt might request it while it's still disabled | |
2085 | * and then wait for interrupts forever. | |
2086 | * | |
17f48034 RW |
2087 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and |
2088 | * it cannot be set along with IRQF_NO_SUSPEND. | |
1da177e4 | 2089 | */ |
17f48034 | 2090 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
cbe16f35 | 2091 | ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) || |
17f48034 RW |
2092 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || |
2093 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) | |
1da177e4 | 2094 | return -EINVAL; |
7d94f7ca | 2095 | |
cb5bc832 | 2096 | desc = irq_to_desc(irq); |
7d94f7ca | 2097 | if (!desc) |
1da177e4 | 2098 | return -EINVAL; |
7d94f7ca | 2099 | |
31d9d9b6 MZ |
2100 | if (!irq_settings_can_request(desc) || |
2101 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
6550c775 | 2102 | return -EINVAL; |
b25c340c TG |
2103 | |
2104 | if (!handler) { | |
2105 | if (!thread_fn) | |
2106 | return -EINVAL; | |
2107 | handler = irq_default_primary_handler; | |
2108 | } | |
1da177e4 | 2109 | |
45535732 | 2110 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
2111 | if (!action) |
2112 | return -ENOMEM; | |
2113 | ||
2114 | action->handler = handler; | |
3aa551c9 | 2115 | action->thread_fn = thread_fn; |
1da177e4 | 2116 | action->flags = irqflags; |
1da177e4 | 2117 | action->name = devname; |
1da177e4 LT |
2118 | action->dev_id = dev_id; |
2119 | ||
be45beb2 | 2120 | retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c SL |
2121 | if (retval < 0) { |
2122 | kfree(action); | |
be45beb2 | 2123 | return retval; |
4396f46c | 2124 | } |
be45beb2 | 2125 | |
d3c60047 | 2126 | retval = __setup_irq(irq, desc, action); |
70aedd24 | 2127 | |
2a1d3ab8 | 2128 | if (retval) { |
be45beb2 | 2129 | irq_chip_pm_put(&desc->irq_data); |
2a1d3ab8 | 2130 | kfree(action->secondary); |
377bf1e4 | 2131 | kfree(action); |
2a1d3ab8 | 2132 | } |
377bf1e4 | 2133 | |
6d83f94d | 2134 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
6ce51c43 | 2135 | if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b8 DW |
2136 | /* |
2137 | * It's a shared IRQ -- the driver ought to be prepared for it | |
2138 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
2139 | * We disable the irq to make sure that a 'real' IRQ doesn't |
2140 | * run in parallel with our fake. | |
a304e1b8 | 2141 | */ |
59845b1f | 2142 | unsigned long flags; |
a304e1b8 | 2143 | |
377bf1e4 | 2144 | disable_irq(irq); |
59845b1f | 2145 | local_irq_save(flags); |
377bf1e4 | 2146 | |
59845b1f | 2147 | handler(irq, dev_id); |
377bf1e4 | 2148 | |
59845b1f | 2149 | local_irq_restore(flags); |
377bf1e4 | 2150 | enable_irq(irq); |
a304e1b8 DW |
2151 | } |
2152 | #endif | |
1da177e4 LT |
2153 | return retval; |
2154 | } | |
3aa551c9 | 2155 | EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d MZ |
2156 | |
2157 | /** | |
0c169edf TG |
2158 | * request_any_context_irq - allocate an interrupt line |
2159 | * @irq: Interrupt line to allocate | |
2160 | * @handler: Function to be called when the IRQ occurs. | |
2161 | * Threaded handler for threaded interrupts. | |
2162 | * @flags: Interrupt type flags | |
2163 | * @name: An ascii name for the claiming device | |
2164 | * @dev_id: A cookie passed back to the handler function | |
2165 | * | |
2166 | * This call allocates interrupt resources and enables the interrupt line | |
2167 | * and IRQ handling. It selects either a hardirq or threaded handling | |
2168 | * method depending on the context. | |
2169 | * | |
2170 | * Returns: On failure, it returns a negative value. On success, it returns either | |
2171 | * IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
ae731f8d MZ |
2172 | */ |
2173 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
2174 | unsigned long flags, const char *name, void *dev_id) | |
2175 | { | |
e237a551 | 2176 | struct irq_desc *desc; |
ae731f8d MZ |
2177 | int ret; |
2178 | ||
e237a551 CF |
2179 | if (irq == IRQ_NOTCONNECTED) |
2180 | return -ENOTCONN; | |
2181 | ||
2182 | desc = irq_to_desc(irq); | |
ae731f8d MZ |
2183 | if (!desc) |
2184 | return -EINVAL; | |
2185 | ||
1ccb4e61 | 2186 | if (irq_settings_is_nested_thread(desc)) { |
ae731f8d MZ |
2187 | ret = request_threaded_irq(irq, NULL, handler, |
2188 | flags, name, dev_id); | |
2189 | return !ret ? IRQC_IS_NESTED : ret; | |
2190 | } | |
2191 | ||
2192 | ret = request_irq(irq, handler, flags, name, dev_id); | |
2193 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
2194 | } | |
2195 | EXPORT_SYMBOL_GPL(request_any_context_irq); | |
31d9d9b6 | 2196 | |
b525903c | 2197 | /** |
0c169edf TG |
2198 | * request_nmi - allocate an interrupt line for NMI delivery |
2199 | * @irq: Interrupt line to allocate | |
2200 | * @handler: Function to be called when the IRQ occurs. | |
2201 | * Threaded handler for threaded interrupts. | |
2202 | * @irqflags: Interrupt type flags | |
2203 | * @name: An ascii name for the claiming device | |
2204 | * @dev_id: A cookie passed back to the handler function | |
2205 | * | |
2206 | * This call allocates interrupt resources and enables the interrupt line | |
2207 | * and IRQ handling. It sets up the IRQ line to be handled as an NMI. | |
2208 | * | |
2209 | * An interrupt line delivering NMIs cannot be shared and IRQ handling | |
2210 | * cannot be threaded. | |
2211 | * | |
2212 | * Interrupt lines requested for NMI delivering must produce per cpu | |
2213 | * interrupts and have auto enabling setting disabled. | |
2214 | * | |
2215 | * @dev_id must be globally unique. Normally the address of the device data | |
2216 | * structure is used as the cookie. Since the handler receives this value | |
2217 | * it makes sense to use it. | |
2218 | * | |
2219 | * If the interrupt line cannot be used to deliver NMIs, function will fail | |
2220 | * and return a negative value. | |
b525903c JT |
2221 | */ |
2222 | int request_nmi(unsigned int irq, irq_handler_t handler, | |
2223 | unsigned long irqflags, const char *name, void *dev_id) | |
2224 | { | |
2225 | struct irqaction *action; | |
2226 | struct irq_desc *desc; | |
b525903c JT |
2227 | int retval; |
2228 | ||
2229 | if (irq == IRQ_NOTCONNECTED) | |
2230 | return -ENOTCONN; | |
2231 | ||
2232 | /* NMI cannot be shared, used for Polling */ | |
2233 | if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) | |
2234 | return -EINVAL; | |
2235 | ||
2236 | if (!(irqflags & IRQF_PERCPU)) | |
2237 | return -EINVAL; | |
2238 | ||
2239 | if (!handler) | |
2240 | return -EINVAL; | |
2241 | ||
2242 | desc = irq_to_desc(irq); | |
2243 | ||
cbe16f35 BS |
2244 | if (!desc || (irq_settings_can_autoenable(desc) && |
2245 | !(irqflags & IRQF_NO_AUTOEN)) || | |
b525903c JT |
2246 | !irq_settings_can_request(desc) || |
2247 | WARN_ON(irq_settings_is_per_cpu_devid(desc)) || | |
2248 | !irq_supports_nmi(desc)) | |
2249 | return -EINVAL; | |
2250 | ||
2251 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
2252 | if (!action) | |
2253 | return -ENOMEM; | |
2254 | ||
2255 | action->handler = handler; | |
2256 | action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; | |
2257 | action->name = name; | |
2258 | action->dev_id = dev_id; | |
2259 | ||
2260 | retval = irq_chip_pm_get(&desc->irq_data); | |
2261 | if (retval < 0) | |
2262 | goto err_out; | |
2263 | ||
2264 | retval = __setup_irq(irq, desc, action); | |
2265 | if (retval) | |
2266 | goto err_irq_setup; | |
2267 | ||
17c19535 TG |
2268 | scoped_guard(raw_spinlock_irqsave, &desc->lock) { |
2269 | /* Setup NMI state */ | |
2270 | desc->istate |= IRQS_NMI; | |
2271 | retval = irq_nmi_setup(desc); | |
2272 | if (retval) { | |
2273 | __cleanup_nmi(irq, desc); | |
2274 | return -EINVAL; | |
2275 | } | |
2276 | return 0; | |
b525903c JT |
2277 | } |
2278 | ||
b525903c JT |
2279 | err_irq_setup: |
2280 | irq_chip_pm_put(&desc->irq_data); | |
2281 | err_out: | |
2282 | kfree(action); | |
2283 | ||
2284 | return retval; | |
2285 | } | |
2286 | ||
1e7c5fd2 | 2287 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
31d9d9b6 | 2288 | { |
508bd94c TG |
2289 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { |
2290 | struct irq_desc *desc = scoped_irqdesc; | |
1e7c5fd2 | 2291 | |
508bd94c TG |
2292 | /* |
2293 | * If the trigger type is not specified by the caller, then | |
2294 | * use the default for this interrupt. | |
2295 | */ | |
2296 | type &= IRQ_TYPE_SENSE_MASK; | |
2297 | if (type == IRQ_TYPE_NONE) | |
2298 | type = irqd_get_trigger_type(&desc->irq_data); | |
2299 | ||
2300 | if (type != IRQ_TYPE_NONE) { | |
2301 | if (__irq_set_trigger(desc, type)) { | |
2302 | WARN(1, "failed to set type for IRQ%d\n", irq); | |
2303 | return; | |
2304 | } | |
1e7c5fd2 | 2305 | } |
508bd94c | 2306 | irq_percpu_enable(desc, smp_processor_id()); |
1e7c5fd2 | 2307 | } |
31d9d9b6 | 2308 | } |
36a5df85 | 2309 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
31d9d9b6 | 2310 | |
4b078c3f JT |
2311 | void enable_percpu_nmi(unsigned int irq, unsigned int type) |
2312 | { | |
2313 | enable_percpu_irq(irq, type); | |
2314 | } | |
2315 | ||
f0cb3220 TP |
2316 | /** |
2317 | * irq_percpu_is_enabled - Check whether the per cpu irq is enabled | |
2318 | * @irq: Linux irq number to check for | |
2319 | * | |
2320 | * Must be called from a non migratable context. Returns the enable | |
2321 | * state of a per cpu interrupt on the current cpu. | |
2322 | */ | |
2323 | bool irq_percpu_is_enabled(unsigned int irq) | |
2324 | { | |
b171f712 TG |
2325 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) |
2326 | return cpumask_test_cpu(smp_processor_id(), scoped_irqdesc->percpu_enabled); | |
2327 | return false; | |
f0cb3220 TP |
2328 | } |
2329 | EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); | |
2330 | ||
31d9d9b6 MZ |
2331 | void disable_percpu_irq(unsigned int irq) |
2332 | { | |
8e3f672b TG |
2333 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) |
2334 | irq_percpu_disable(scoped_irqdesc, smp_processor_id()); | |
31d9d9b6 | 2335 | } |
36a5df85 | 2336 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
31d9d9b6 | 2337 | |
4b078c3f JT |
2338 | void disable_percpu_nmi(unsigned int irq) |
2339 | { | |
2340 | disable_percpu_irq(irq); | |
2341 | } | |
2342 | ||
31d9d9b6 MZ |
2343 | /* |
2344 | * Internal function to unregister a percpu irqaction. | |
2345 | */ | |
2346 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
2347 | { | |
2348 | struct irq_desc *desc = irq_to_desc(irq); | |
2349 | struct irqaction *action; | |
31d9d9b6 MZ |
2350 | |
2351 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | |
2352 | ||
2353 | if (!desc) | |
2354 | return NULL; | |
2355 | ||
17c19535 TG |
2356 | scoped_guard(raw_spinlock_irqsave, &desc->lock) { |
2357 | action = desc->action; | |
2358 | if (!action || action->percpu_dev_id != dev_id) { | |
2359 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
2360 | return NULL; | |
2361 | } | |
31d9d9b6 | 2362 | |
17c19535 TG |
2363 | if (!cpumask_empty(desc->percpu_enabled)) { |
2364 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | |
2365 | irq, cpumask_first(desc->percpu_enabled)); | |
2366 | return NULL; | |
2367 | } | |
31d9d9b6 | 2368 | |
17c19535 TG |
2369 | /* Found it - now remove it from the list of entries: */ |
2370 | desc->action = NULL; | |
2371 | desc->istate &= ~IRQS_NMI; | |
31d9d9b6 MZ |
2372 | } |
2373 | ||
31d9d9b6 | 2374 | unregister_handler_proc(irq, action); |
be45beb2 | 2375 | irq_chip_pm_put(&desc->irq_data); |
31d9d9b6 MZ |
2376 | module_put(desc->owner); |
2377 | return action; | |
31d9d9b6 MZ |
2378 | } |
2379 | ||
31d9d9b6 | 2380 | /** |
0c169edf TG |
2381 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq |
2382 | * @irq: Interrupt line to free | |
2383 | * @dev_id: Device identity to free | |
31d9d9b6 | 2384 | * |
0c169edf TG |
2385 | * Remove a percpu interrupt handler. The handler is removed, but the |
2386 | * interrupt line is not disabled. This must be done on each CPU before | |
2387 | * calling this function. The function does not return until any executing | |
2388 | * interrupts for this IRQ have completed. | |
31d9d9b6 | 2389 | * |
0c169edf | 2390 | * This function must not be called from interrupt context. |
31d9d9b6 MZ |
2391 | */ |
2392 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
2393 | { | |
2394 | struct irq_desc *desc = irq_to_desc(irq); | |
2395 | ||
2396 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
2397 | return; | |
2398 | ||
2399 | chip_bus_lock(desc); | |
2400 | kfree(__free_percpu_irq(irq, dev_id)); | |
2401 | chip_bus_sync_unlock(desc); | |
2402 | } | |
aec2e2ad | 2403 | EXPORT_SYMBOL_GPL(free_percpu_irq); |
31d9d9b6 | 2404 | |
4b078c3f JT |
2405 | void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) |
2406 | { | |
2407 | struct irq_desc *desc = irq_to_desc(irq); | |
2408 | ||
2409 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
2410 | return; | |
2411 | ||
6678ae19 | 2412 | if (WARN_ON(!irq_is_nmi(desc))) |
4b078c3f JT |
2413 | return; |
2414 | ||
2415 | kfree(__free_percpu_irq(irq, dev_id)); | |
2416 | } | |
2417 | ||
31d9d9b6 | 2418 | /** |
0c169edf TG |
2419 | * setup_percpu_irq - setup a per-cpu interrupt |
2420 | * @irq: Interrupt line to setup | |
2421 | * @act: irqaction for the interrupt | |
31d9d9b6 MZ |
2422 | * |
2423 | * Used to statically setup per-cpu interrupts in the early boot process. | |
2424 | */ | |
2425 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | |
2426 | { | |
2427 | struct irq_desc *desc = irq_to_desc(irq); | |
2428 | int retval; | |
2429 | ||
2430 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
2431 | return -EINVAL; | |
be45beb2 JH |
2432 | |
2433 | retval = irq_chip_pm_get(&desc->irq_data); | |
2434 | if (retval < 0) | |
2435 | return retval; | |
2436 | ||
31d9d9b6 | 2437 | retval = __setup_irq(irq, desc, act); |
31d9d9b6 | 2438 | |
be45beb2 JH |
2439 | if (retval) |
2440 | irq_chip_pm_put(&desc->irq_data); | |
2441 | ||
31d9d9b6 MZ |
2442 | return retval; |
2443 | } | |
2444 | ||
2445 | /** | |
0c169edf TG |
2446 | * __request_percpu_irq - allocate a percpu interrupt line |
2447 | * @irq: Interrupt line to allocate | |
2448 | * @handler: Function to be called when the IRQ occurs. | |
2449 | * @flags: Interrupt type flags (IRQF_TIMER only) | |
2450 | * @devname: An ascii name for the claiming device | |
2451 | * @dev_id: A percpu cookie passed back to the handler function | |
2452 | * | |
2453 | * This call allocates interrupt resources and enables the interrupt on the | |
2454 | * local CPU. If the interrupt is supposed to be enabled on other CPUs, it | |
2455 | * has to be done on each CPU using enable_percpu_irq(). | |
2456 | * | |
2457 | * @dev_id must be globally unique. It is a per-cpu variable, and | |
2458 | * the handler gets called with the interrupted CPU's instance of | |
2459 | * that variable. | |
31d9d9b6 | 2460 | */ |
c80081b9 DL |
2461 | int __request_percpu_irq(unsigned int irq, irq_handler_t handler, |
2462 | unsigned long flags, const char *devname, | |
2463 | void __percpu *dev_id) | |
31d9d9b6 MZ |
2464 | { |
2465 | struct irqaction *action; | |
2466 | struct irq_desc *desc; | |
2467 | int retval; | |
2468 | ||
2469 | if (!dev_id) | |
2470 | return -EINVAL; | |
2471 | ||
2472 | desc = irq_to_desc(irq); | |
2473 | if (!desc || !irq_settings_can_request(desc) || | |
2474 | !irq_settings_is_per_cpu_devid(desc)) | |
2475 | return -EINVAL; | |
2476 | ||
c80081b9 DL |
2477 | if (flags && flags != IRQF_TIMER) |
2478 | return -EINVAL; | |
2479 | ||
31d9d9b6 MZ |
2480 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
2481 | if (!action) | |
2482 | return -ENOMEM; | |
2483 | ||
2484 | action->handler = handler; | |
c80081b9 | 2485 | action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; |
31d9d9b6 MZ |
2486 | action->name = devname; |
2487 | action->percpu_dev_id = dev_id; | |
2488 | ||
be45beb2 | 2489 | retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c SL |
2490 | if (retval < 0) { |
2491 | kfree(action); | |
be45beb2 | 2492 | return retval; |
4396f46c | 2493 | } |
be45beb2 | 2494 | |
31d9d9b6 | 2495 | retval = __setup_irq(irq, desc, action); |
31d9d9b6 | 2496 | |
be45beb2 JH |
2497 | if (retval) { |
2498 | irq_chip_pm_put(&desc->irq_data); | |
31d9d9b6 | 2499 | kfree(action); |
be45beb2 | 2500 | } |
31d9d9b6 MZ |
2501 | |
2502 | return retval; | |
2503 | } | |
c80081b9 | 2504 | EXPORT_SYMBOL_GPL(__request_percpu_irq); |
1b7047ed | 2505 | |
4b078c3f | 2506 | /** |
0c169edf TG |
2507 | * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery |
2508 | * @irq: Interrupt line to allocate | |
2509 | * @handler: Function to be called when the IRQ occurs. | |
2510 | * @name: An ascii name for the claiming device | |
2511 | * @dev_id: A percpu cookie passed back to the handler function | |
4b078c3f | 2512 | * |
0c169edf TG |
2513 | * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs |
2514 | * have to be setup on each CPU by calling prepare_percpu_nmi() before | |
2515 | * being enabled on the same CPU by using enable_percpu_nmi(). | |
4b078c3f | 2516 | * |
0c169edf TG |
2517 | * @dev_id must be globally unique. It is a per-cpu variable, and the |
2518 | * handler gets called with the interrupted CPU's instance of that | |
2519 | * variable. | |
4b078c3f | 2520 | * |
0c169edf TG |
2521 | * Interrupt lines requested for NMI delivering should have auto enabling |
2522 | * setting disabled. | |
4b078c3f | 2523 | * |
0c169edf TG |
2524 | * If the interrupt line cannot be used to deliver NMIs, function |
2525 | * will fail returning a negative value. | |
4b078c3f JT |
2526 | */ |
2527 | int request_percpu_nmi(unsigned int irq, irq_handler_t handler, | |
2528 | const char *name, void __percpu *dev_id) | |
2529 | { | |
2530 | struct irqaction *action; | |
2531 | struct irq_desc *desc; | |
4b078c3f JT |
2532 | int retval; |
2533 | ||
2534 | if (!handler) | |
2535 | return -EINVAL; | |
2536 | ||
2537 | desc = irq_to_desc(irq); | |
2538 | ||
2539 | if (!desc || !irq_settings_can_request(desc) || | |
2540 | !irq_settings_is_per_cpu_devid(desc) || | |
2541 | irq_settings_can_autoenable(desc) || | |
2542 | !irq_supports_nmi(desc)) | |
2543 | return -EINVAL; | |
2544 | ||
2545 | /* The line cannot already be NMI */ | |
6678ae19 | 2546 | if (irq_is_nmi(desc)) |
4b078c3f JT |
2547 | return -EINVAL; |
2548 | ||
2549 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
2550 | if (!action) | |
2551 | return -ENOMEM; | |
2552 | ||
2553 | action->handler = handler; | |
2554 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD | |
2555 | | IRQF_NOBALANCING; | |
2556 | action->name = name; | |
2557 | action->percpu_dev_id = dev_id; | |
2558 | ||
2559 | retval = irq_chip_pm_get(&desc->irq_data); | |
2560 | if (retval < 0) | |
2561 | goto err_out; | |
2562 | ||
2563 | retval = __setup_irq(irq, desc, action); | |
2564 | if (retval) | |
2565 | goto err_irq_setup; | |
2566 | ||
97f4b999 TG |
2567 | scoped_guard(raw_spinlock_irqsave, &desc->lock) |
2568 | desc->istate |= IRQS_NMI; | |
4b078c3f JT |
2569 | return 0; |
2570 | ||
2571 | err_irq_setup: | |
2572 | irq_chip_pm_put(&desc->irq_data); | |
2573 | err_out: | |
2574 | kfree(action); | |
2575 | ||
2576 | return retval; | |
2577 | } | |
2578 | ||
2579 | /** | |
0c169edf TG |
2580 | * prepare_percpu_nmi - performs CPU local setup for NMI delivery |
2581 | * @irq: Interrupt line to prepare for NMI delivery | |
4b078c3f | 2582 | * |
0c169edf TG |
2583 | * This call prepares an interrupt line to deliver NMI on the current CPU, |
2584 | * before that interrupt line gets enabled with enable_percpu_nmi(). | |
4b078c3f | 2585 | * |
0c169edf TG |
2586 | * As a CPU local operation, this should be called from non-preemptible |
2587 | * context. | |
4b078c3f | 2588 | * |
0c169edf TG |
2589 | * If the interrupt line cannot be used to deliver NMIs, function will fail |
2590 | * returning a negative value. | |
4b078c3f JT |
2591 | */ |
2592 | int prepare_percpu_nmi(unsigned int irq) | |
2593 | { | |
65dd1f7c | 2594 | int ret = -EINVAL; |
4b078c3f JT |
2595 | |
2596 | WARN_ON(preemptible()); | |
2597 | ||
65dd1f7c TG |
2598 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { |
2599 | if (WARN(!irq_is_nmi(scoped_irqdesc), | |
2600 | "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) | |
2601 | return -EINVAL; | |
4b078c3f | 2602 | |
65dd1f7c TG |
2603 | ret = irq_nmi_setup(scoped_irqdesc); |
2604 | if (ret) | |
2605 | pr_err("Failed to setup NMI delivery: irq %u\n", irq); | |
4b078c3f | 2606 | } |
4b078c3f JT |
2607 | return ret; |
2608 | } | |
2609 | ||
2610 | /** | |
0c169edf TG |
2611 | * teardown_percpu_nmi - undoes NMI setup of IRQ line |
2612 | * @irq: Interrupt line from which CPU local NMI configuration should be removed | |
4b078c3f | 2613 | * |
0c169edf | 2614 | * This call undoes the setup done by prepare_percpu_nmi(). |
4b078c3f | 2615 | * |
0c169edf TG |
2616 | * IRQ line should not be enabled for the current CPU. |
2617 | * As a CPU local operation, this should be called from non-preemptible | |
2618 | * context. | |
4b078c3f JT |
2619 | */ |
2620 | void teardown_percpu_nmi(unsigned int irq) | |
2621 | { | |
4b078c3f JT |
2622 | WARN_ON(preemptible()); |
2623 | ||
5fec6d5c TG |
2624 | scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { |
2625 | if (WARN_ON(!irq_is_nmi(scoped_irqdesc))) | |
2626 | return; | |
2627 | irq_nmi_teardown(scoped_irqdesc); | |
2628 | } | |
4b078c3f JT |
2629 | } |
2630 | ||
827bafd5 | 2631 | static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) |
62e04686 TG |
2632 | { |
2633 | struct irq_chip *chip; | |
2634 | int err = -EINVAL; | |
2635 | ||
2636 | do { | |
2637 | chip = irq_data_get_irq_chip(data); | |
1d0326f3 MV |
2638 | if (WARN_ON_ONCE(!chip)) |
2639 | return -ENODEV; | |
62e04686 TG |
2640 | if (chip->irq_get_irqchip_state) |
2641 | break; | |
2642 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
2643 | data = data->parent_data; | |
2644 | #else | |
2645 | data = NULL; | |
2646 | #endif | |
2647 | } while (data); | |
2648 | ||
2649 | if (data) | |
2650 | err = chip->irq_get_irqchip_state(data, which, state); | |
2651 | return err; | |
2652 | } | |
2653 | ||
1b7047ed | 2654 | /** |
0c169edf TG |
2655 | * irq_get_irqchip_state - returns the irqchip state of a interrupt. |
2656 | * @irq: Interrupt line that is forwarded to a VM | |
2657 | * @which: One of IRQCHIP_STATE_* the caller wants to know about | |
2658 | * @state: a pointer to a boolean where the state is to be stored | |
1b7047ed | 2659 | * |
0c169edf TG |
2660 | * This call snapshots the internal irqchip state of an interrupt, |
2661 | * returning into @state the bit corresponding to stage @which | |
1b7047ed | 2662 | * |
0c169edf TG |
2663 | * This function should be called with preemption disabled if the interrupt |
2664 | * controller has per-cpu registers. | |
1b7047ed | 2665 | */ |
782249a9 | 2666 | int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) |
1b7047ed | 2667 | { |
782249a9 TG |
2668 | scoped_irqdesc_get_and_buslock(irq, 0) { |
2669 | struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); | |
1b7047ed | 2670 | |
782249a9 TG |
2671 | return __irq_get_irqchip_state(data, which, state); |
2672 | } | |
2673 | return -EINVAL; | |
1b7047ed | 2674 | } |
1ee4fb3e | 2675 | EXPORT_SYMBOL_GPL(irq_get_irqchip_state); |
1b7047ed MZ |
2676 | |
2677 | /** | |
0c169edf TG |
2678 | * irq_set_irqchip_state - set the state of a forwarded interrupt. |
2679 | * @irq: Interrupt line that is forwarded to a VM | |
2680 | * @which: State to be restored (one of IRQCHIP_STATE_*) | |
2681 | * @val: Value corresponding to @which | |
1b7047ed | 2682 | * |
0c169edf TG |
2683 | * This call sets the internal irqchip state of an interrupt, depending on |
2684 | * the value of @which. | |
1b7047ed | 2685 | * |
0c169edf TG |
2686 | * This function should be called with migration disabled if the interrupt |
2687 | * controller has per-cpu registers. | |
1b7047ed | 2688 | */ |
0c169edf | 2689 | int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) |
1b7047ed | 2690 | { |
193879e2 TG |
2691 | scoped_irqdesc_get_and_buslock(irq, 0) { |
2692 | struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); | |
2693 | struct irq_chip *chip; | |
1b7047ed | 2694 | |
193879e2 TG |
2695 | do { |
2696 | chip = irq_data_get_irq_chip(data); | |
1b7047ed | 2697 | |
193879e2 TG |
2698 | if (WARN_ON_ONCE(!chip)) |
2699 | return -ENODEV; | |
1b7047ed | 2700 | |
193879e2 TG |
2701 | if (chip->irq_set_irqchip_state) |
2702 | break; | |
1b7047ed | 2703 | |
193879e2 TG |
2704 | data = irqd_get_parent_data(data); |
2705 | } while (data); | |
1b7047ed | 2706 | |
193879e2 TG |
2707 | if (data) |
2708 | return chip->irq_set_irqchip_state(data, which, val); | |
2709 | } | |
2710 | return -EINVAL; | |
1b7047ed | 2711 | } |
1ee4fb3e | 2712 | EXPORT_SYMBOL_GPL(irq_set_irqchip_state); |
a313357e TG |
2713 | |
2714 | /** | |
2715 | * irq_has_action - Check whether an interrupt is requested | |
2716 | * @irq: The linux irq number | |
2717 | * | |
2718 | * Returns: A snapshot of the current state | |
2719 | */ | |
2720 | bool irq_has_action(unsigned int irq) | |
2721 | { | |
2722 | bool res; | |
2723 | ||
2724 | rcu_read_lock(); | |
2725 | res = irq_desc_has_action(irq_to_desc(irq)); | |
2726 | rcu_read_unlock(); | |
2727 | return res; | |
2728 | } | |
2729 | EXPORT_SYMBOL_GPL(irq_has_action); | |
fdd02963 TG |
2730 | |
2731 | /** | |
2732 | * irq_check_status_bit - Check whether bits in the irq descriptor status are set | |
2733 | * @irq: The linux irq number | |
2734 | * @bitmask: The bitmask to evaluate | |
2735 | * | |
2736 | * Returns: True if one of the bits in @bitmask is set | |
2737 | */ | |
2738 | bool irq_check_status_bit(unsigned int irq, unsigned int bitmask) | |
2739 | { | |
2740 | struct irq_desc *desc; | |
2741 | bool res = false; | |
2742 | ||
2743 | rcu_read_lock(); | |
2744 | desc = irq_to_desc(irq); | |
2745 | if (desc) | |
2746 | res = !!(desc->status_use_accessors & bitmask); | |
2747 | rcu_read_unlock(); | |
2748 | return res; | |
2749 | } | |
ce09ccc5 | 2750 | EXPORT_SYMBOL_GPL(irq_check_status_bit); |