Commit | Line | Data |
---|---|---|
52a65ff5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
dd87eb3a | 2 | /* |
dd87eb3a TG |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
5 | * | |
99bfce5d TG |
6 | * This file contains the core interrupt handling code, for irq-chip based |
7 | * architectures. Detailed information is available in | |
8 | * Documentation/core-api/genericirq.rst | |
dd87eb3a TG |
9 | */ |
10 | ||
11 | #include <linux/irq.h> | |
7fe3730d | 12 | #include <linux/msi.h> |
dd87eb3a TG |
13 | #include <linux/module.h> |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel_stat.h> | |
f8264e34 | 16 | #include <linux/irqdomain.h> |
dd87eb3a | 17 | |
f069686e SR |
18 | #include <trace/events/irq.h> |
19 | ||
dd87eb3a TG |
20 | #include "internals.h" |
21 | ||
e509bd7d MW |
22 | static irqreturn_t bad_chained_irq(int irq, void *dev_id) |
23 | { | |
24 | WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); | |
25 | return IRQ_NONE; | |
26 | } | |
27 | ||
28 | /* | |
29 | * Chained handlers should never call action on their IRQ. This default | |
30 | * action will emit warning if such thing happens. | |
31 | */ | |
32 | struct irqaction chained_action = { | |
33 | .handler = bad_chained_irq, | |
34 | }; | |
35 | ||
dd87eb3a | 36 | /** |
a0cd9ca2 | 37 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
38 | * @irq: irq number |
39 | * @chip: pointer to irq chip description structure | |
40 | */ | |
393e1280 | 41 | int irq_set_chip(unsigned int irq, const struct irq_chip *chip) |
dd87eb3a | 42 | { |
dd87eb3a | 43 | unsigned long flags; |
31d9d9b6 | 44 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 45 | |
02725e74 | 46 | if (!desc) |
dd87eb3a | 47 | return -EINVAL; |
dd87eb3a | 48 | |
393e1280 | 49 | desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); |
02725e74 | 50 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
51 | /* |
52 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
f63b6a05 | 53 | * allocated_irqs. |
d72274e5 | 54 | */ |
f63b6a05 | 55 | irq_mark_irq(irq); |
dd87eb3a TG |
56 | return 0; |
57 | } | |
a0cd9ca2 | 58 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
59 | |
60 | /** | |
8c67d247 | 61 | * irq_set_irq_type - set the irq trigger type for an irq |
dd87eb3a | 62 | * @irq: irq number |
0c5d1eb7 | 63 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 64 | */ |
a0cd9ca2 | 65 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 66 | { |
dd87eb3a | 67 | unsigned long flags; |
31d9d9b6 | 68 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 69 | int ret = 0; |
dd87eb3a | 70 | |
02725e74 TG |
71 | if (!desc) |
72 | return -EINVAL; | |
dd87eb3a | 73 | |
a1ff541a | 74 | ret = __irq_set_trigger(desc, type); |
02725e74 | 75 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
76 | return ret; |
77 | } | |
a0cd9ca2 | 78 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
79 | |
80 | /** | |
a0cd9ca2 | 81 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
82 | * @irq: Interrupt number |
83 | * @data: Pointer to interrupt specific data | |
84 | * | |
85 | * Set the hardware irq controller data for an irq | |
86 | */ | |
a0cd9ca2 | 87 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 88 | { |
dd87eb3a | 89 | unsigned long flags; |
31d9d9b6 | 90 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 91 | |
02725e74 | 92 | if (!desc) |
dd87eb3a | 93 | return -EINVAL; |
af7080e0 | 94 | desc->irq_common_data.handler_data = data; |
02725e74 | 95 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
96 | return 0; |
97 | } | |
a0cd9ca2 | 98 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 99 | |
5b912c10 | 100 | /** |
51906e77 AG |
101 | * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset |
102 | * @irq_base: Interrupt number base | |
103 | * @irq_offset: Interrupt number offset | |
104 | * @entry: Pointer to MSI descriptor data | |
5b912c10 | 105 | * |
51906e77 | 106 | * Set the MSI descriptor entry for an irq at offset |
5b912c10 | 107 | */ |
51906e77 AG |
108 | int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
109 | struct msi_desc *entry) | |
5b912c10 | 110 | { |
5b912c10 | 111 | unsigned long flags; |
51906e77 | 112 | struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 113 | |
02725e74 | 114 | if (!desc) |
5b912c10 | 115 | return -EINVAL; |
b237721c | 116 | desc->irq_common_data.msi_desc = entry; |
51906e77 AG |
117 | if (entry && !irq_offset) |
118 | entry->irq = irq_base; | |
02725e74 | 119 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
120 | return 0; |
121 | } | |
122 | ||
51906e77 AG |
123 | /** |
124 | * irq_set_msi_desc - set MSI descriptor data for an irq | |
125 | * @irq: Interrupt number | |
126 | * @entry: Pointer to MSI descriptor data | |
127 | * | |
128 | * Set the MSI descriptor entry for an irq | |
129 | */ | |
130 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | |
131 | { | |
132 | return irq_set_msi_desc_off(irq, 0, entry); | |
133 | } | |
134 | ||
dd87eb3a | 135 | /** |
a0cd9ca2 | 136 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
137 | * @irq: Interrupt number |
138 | * @data: Pointer to chip specific data | |
139 | * | |
140 | * Set the hardware irq chip data for an irq | |
141 | */ | |
a0cd9ca2 | 142 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 143 | { |
dd87eb3a | 144 | unsigned long flags; |
31d9d9b6 | 145 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 146 | |
02725e74 | 147 | if (!desc) |
dd87eb3a | 148 | return -EINVAL; |
6b8ff312 | 149 | desc->irq_data.chip_data = data; |
02725e74 | 150 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
151 | return 0; |
152 | } | |
a0cd9ca2 | 153 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 154 | |
f303a6dd TG |
155 | struct irq_data *irq_get_irq_data(unsigned int irq) |
156 | { | |
157 | struct irq_desc *desc = irq_to_desc(irq); | |
158 | ||
159 | return desc ? &desc->irq_data : NULL; | |
160 | } | |
161 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
162 | ||
c1594b77 TG |
163 | static void irq_state_clr_disabled(struct irq_desc *desc) |
164 | { | |
801a0e9a | 165 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
166 | } |
167 | ||
6e40262e TG |
168 | static void irq_state_clr_masked(struct irq_desc *desc) |
169 | { | |
32f4125e | 170 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
171 | } |
172 | ||
201d7f47 TG |
173 | static void irq_state_clr_started(struct irq_desc *desc) |
174 | { | |
175 | irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); | |
176 | } | |
177 | ||
178 | static void irq_state_set_started(struct irq_desc *desc) | |
179 | { | |
180 | irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); | |
181 | } | |
182 | ||
761ea388 TG |
183 | enum { |
184 | IRQ_STARTUP_NORMAL, | |
185 | IRQ_STARTUP_MANAGED, | |
186 | IRQ_STARTUP_ABORT, | |
187 | }; | |
188 | ||
189 | #ifdef CONFIG_SMP | |
190 | static int | |
4d0b8298 SH |
191 | __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, |
192 | bool force) | |
761ea388 TG |
193 | { |
194 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
195 | ||
196 | if (!irqd_affinity_is_managed(d)) | |
197 | return IRQ_STARTUP_NORMAL; | |
198 | ||
199 | irqd_clr_managed_shutdown(d); | |
200 | ||
9cb067ef | 201 | if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { |
761ea388 TG |
202 | /* |
203 | * Catch code which fiddles with enable_irq() on a managed | |
204 | * and potentially shutdown IRQ. Chained interrupt | |
205 | * installment or irq auto probing should not happen on | |
c942cee4 | 206 | * managed irqs either. |
761ea388 TG |
207 | */ |
208 | if (WARN_ON_ONCE(force)) | |
c942cee4 | 209 | return IRQ_STARTUP_ABORT; |
761ea388 TG |
210 | /* |
211 | * The interrupt was requested, but there is no online CPU | |
212 | * in it's affinity mask. Put it into managed shutdown | |
213 | * state and let the cpu hotplug mechanism start it up once | |
214 | * a CPU in the mask becomes available. | |
215 | */ | |
761ea388 TG |
216 | return IRQ_STARTUP_ABORT; |
217 | } | |
bb9b428a TG |
218 | /* |
219 | * Managed interrupts have reserved resources, so this should not | |
220 | * happen. | |
221 | */ | |
42e1cc2d | 222 | if (WARN_ON(irq_domain_activate_irq(d, false))) |
bb9b428a | 223 | return IRQ_STARTUP_ABORT; |
761ea388 TG |
224 | return IRQ_STARTUP_MANAGED; |
225 | } | |
226 | #else | |
2372a519 | 227 | static __always_inline int |
4d0b8298 SH |
228 | __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, |
229 | bool force) | |
761ea388 TG |
230 | { |
231 | return IRQ_STARTUP_NORMAL; | |
232 | } | |
233 | #endif | |
234 | ||
708d174b TG |
235 | static int __irq_startup(struct irq_desc *desc) |
236 | { | |
237 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
238 | int ret = 0; | |
239 | ||
c942cee4 TG |
240 | /* Warn if this interrupt is not activated but try nevertheless */ |
241 | WARN_ON_ONCE(!irqd_is_activated(d)); | |
242 | ||
708d174b TG |
243 | if (d->chip->irq_startup) { |
244 | ret = d->chip->irq_startup(d); | |
245 | irq_state_clr_disabled(desc); | |
246 | irq_state_clr_masked(desc); | |
247 | } else { | |
248 | irq_enable(desc); | |
249 | } | |
250 | irq_state_set_started(desc); | |
251 | return ret; | |
252 | } | |
253 | ||
4cde9c6b | 254 | int irq_startup(struct irq_desc *desc, bool resend, bool force) |
46999238 | 255 | { |
761ea388 | 256 | struct irq_data *d = irq_desc_get_irq_data(desc); |
4d0b8298 | 257 | const struct cpumask *aff = irq_data_get_affinity_mask(d); |
b4bc724e TG |
258 | int ret = 0; |
259 | ||
46999238 TG |
260 | desc->depth = 0; |
261 | ||
761ea388 | 262 | if (irqd_is_started(d)) { |
b4bc724e | 263 | irq_enable(desc); |
201d7f47 | 264 | } else { |
761ea388 TG |
265 | switch (__irq_startup_managed(desc, aff, force)) { |
266 | case IRQ_STARTUP_NORMAL: | |
826da771 TG |
267 | if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) |
268 | irq_setup_affinity(desc); | |
761ea388 | 269 | ret = __irq_startup(desc); |
826da771 TG |
270 | if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) |
271 | irq_setup_affinity(desc); | |
761ea388 TG |
272 | break; |
273 | case IRQ_STARTUP_MANAGED: | |
e43b3b58 | 274 | irq_do_set_affinity(d, aff, false); |
761ea388 | 275 | ret = __irq_startup(desc); |
761ea388 TG |
276 | break; |
277 | case IRQ_STARTUP_ABORT: | |
c942cee4 | 278 | irqd_set_managed_shutdown(d); |
761ea388 TG |
279 | return 0; |
280 | } | |
3aae994f | 281 | } |
b4bc724e | 282 | if (resend) |
acd26bcf | 283 | check_irq_resend(desc, false); |
201d7f47 | 284 | |
b4bc724e | 285 | return ret; |
46999238 TG |
286 | } |
287 | ||
c942cee4 TG |
288 | int irq_activate(struct irq_desc *desc) |
289 | { | |
290 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
291 | ||
292 | if (!irqd_affinity_is_managed(d)) | |
42e1cc2d | 293 | return irq_domain_activate_irq(d, false); |
c942cee4 TG |
294 | return 0; |
295 | } | |
296 | ||
1beaeacd | 297 | int irq_activate_and_startup(struct irq_desc *desc, bool resend) |
c942cee4 TG |
298 | { |
299 | if (WARN_ON(irq_activate(desc))) | |
1beaeacd TG |
300 | return 0; |
301 | return irq_startup(desc, resend, IRQ_START_FORCE); | |
c942cee4 TG |
302 | } |
303 | ||
201d7f47 TG |
304 | static void __irq_disable(struct irq_desc *desc, bool mask); |
305 | ||
46999238 TG |
306 | void irq_shutdown(struct irq_desc *desc) |
307 | { | |
201d7f47 TG |
308 | if (irqd_is_started(&desc->irq_data)) { |
309 | desc->depth = 1; | |
310 | if (desc->irq_data.chip->irq_shutdown) { | |
311 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
312 | irq_state_set_disabled(desc); | |
313 | irq_state_set_masked(desc); | |
314 | } else { | |
315 | __irq_disable(desc, true); | |
316 | } | |
317 | irq_state_clr_started(desc); | |
318 | } | |
4001d8e8 TG |
319 | } |
320 | ||
321 | ||
322 | void irq_shutdown_and_deactivate(struct irq_desc *desc) | |
323 | { | |
324 | irq_shutdown(desc); | |
201d7f47 TG |
325 | /* |
326 | * This must be called even if the interrupt was never started up, | |
327 | * because the activation can happen before the interrupt is | |
328 | * available for request/startup. It has it's own state tracking so | |
329 | * it's safe to call it unconditionally. | |
330 | */ | |
f8264e34 | 331 | irq_domain_deactivate_irq(&desc->irq_data); |
46999238 TG |
332 | } |
333 | ||
87923470 TG |
334 | void irq_enable(struct irq_desc *desc) |
335 | { | |
bf22ff45 JC |
336 | if (!irqd_irq_disabled(&desc->irq_data)) { |
337 | unmask_irq(desc); | |
338 | } else { | |
339 | irq_state_clr_disabled(desc); | |
340 | if (desc->irq_data.chip->irq_enable) { | |
341 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
342 | irq_state_clr_masked(desc); | |
343 | } else { | |
344 | unmask_irq(desc); | |
345 | } | |
346 | } | |
dd87eb3a TG |
347 | } |
348 | ||
201d7f47 TG |
349 | static void __irq_disable(struct irq_desc *desc, bool mask) |
350 | { | |
bf22ff45 JC |
351 | if (irqd_irq_disabled(&desc->irq_data)) { |
352 | if (mask) | |
353 | mask_irq(desc); | |
354 | } else { | |
355 | irq_state_set_disabled(desc); | |
356 | if (desc->irq_data.chip->irq_disable) { | |
357 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
358 | irq_state_set_masked(desc); | |
359 | } else if (mask) { | |
360 | mask_irq(desc); | |
361 | } | |
201d7f47 TG |
362 | } |
363 | } | |
364 | ||
d671a605 | 365 | /** |
f788e7bf | 366 | * irq_disable - Mark interrupt disabled |
d671a605 AF |
367 | * @desc: irq descriptor which should be disabled |
368 | * | |
369 | * If the chip does not implement the irq_disable callback, we | |
370 | * use a lazy disable approach. That means we mark the interrupt | |
371 | * disabled, but leave the hardware unmasked. That's an | |
372 | * optimization because we avoid the hardware access for the | |
373 | * common case where no interrupt happens after we marked it | |
374 | * disabled. If an interrupt happens, then the interrupt flow | |
375 | * handler masks the line at the hardware level and marks it | |
376 | * pending. | |
e9849777 TG |
377 | * |
378 | * If the interrupt chip does not implement the irq_disable callback, | |
379 | * a driver can disable the lazy approach for a particular irq line by | |
380 | * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can | |
381 | * be used for devices which cannot disable the interrupt at the | |
382 | * device level under certain circumstances and have to use | |
383 | * disable_irq[_nosync] instead. | |
d671a605 | 384 | */ |
50f7c032 | 385 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 386 | { |
201d7f47 | 387 | __irq_disable(desc, irq_settings_disable_unlazy(desc)); |
89d694b9 TG |
388 | } |
389 | ||
31d9d9b6 MZ |
390 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
391 | { | |
392 | if (desc->irq_data.chip->irq_enable) | |
393 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
394 | else | |
395 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
396 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
397 | } | |
398 | ||
399 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
400 | { | |
401 | if (desc->irq_data.chip->irq_disable) | |
402 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
403 | else | |
404 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
405 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
406 | } | |
407 | ||
9205e31d | 408 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 409 | { |
bf22ff45 | 410 | if (desc->irq_data.chip->irq_mask_ack) { |
9205e31d | 411 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); |
bf22ff45 JC |
412 | irq_state_set_masked(desc); |
413 | } else { | |
414 | mask_irq(desc); | |
22a49163 TG |
415 | if (desc->irq_data.chip->irq_ack) |
416 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 417 | } |
0b1adaa0 TG |
418 | } |
419 | ||
d4d5e089 | 420 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 421 | { |
bf22ff45 JC |
422 | if (irqd_irq_masked(&desc->irq_data)) |
423 | return; | |
424 | ||
e2c0f8ff TG |
425 | if (desc->irq_data.chip->irq_mask) { |
426 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 427 | irq_state_set_masked(desc); |
0b1adaa0 TG |
428 | } |
429 | } | |
430 | ||
d4d5e089 | 431 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 432 | { |
bf22ff45 JC |
433 | if (!irqd_irq_masked(&desc->irq_data)) |
434 | return; | |
435 | ||
0eda58b7 TG |
436 | if (desc->irq_data.chip->irq_unmask) { |
437 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 438 | irq_state_clr_masked(desc); |
0b1adaa0 | 439 | } |
dd87eb3a TG |
440 | } |
441 | ||
328a4978 TG |
442 | void unmask_threaded_irq(struct irq_desc *desc) |
443 | { | |
444 | struct irq_chip *chip = desc->irq_data.chip; | |
445 | ||
446 | if (chip->flags & IRQCHIP_EOI_THREADED) | |
447 | chip->irq_eoi(&desc->irq_data); | |
448 | ||
bf22ff45 | 449 | unmask_irq(desc); |
328a4978 TG |
450 | } |
451 | ||
399b5da2 TG |
452 | /* |
453 | * handle_nested_irq - Handle a nested irq from a irq thread | |
454 | * @irq: the interrupt number | |
455 | * | |
456 | * Handle interrupts which are nested into a threaded interrupt | |
457 | * handler. The handler function is called inside the calling | |
458 | * threads context. | |
459 | */ | |
460 | void handle_nested_irq(unsigned int irq) | |
461 | { | |
462 | struct irq_desc *desc = irq_to_desc(irq); | |
463 | struct irqaction *action; | |
464 | irqreturn_t action_ret; | |
465 | ||
466 | might_sleep(); | |
467 | ||
239007b8 | 468 | raw_spin_lock_irq(&desc->lock); |
399b5da2 | 469 | |
293a7a0a | 470 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
399b5da2 TG |
471 | |
472 | action = desc->action; | |
23812b9d NJ |
473 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
474 | desc->istate |= IRQS_PENDING; | |
399b5da2 | 475 | goto out_unlock; |
23812b9d | 476 | } |
399b5da2 | 477 | |
a946e8c7 | 478 | kstat_incr_irqs_this_cpu(desc); |
32f4125e | 479 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 480 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 | 481 | |
45e52022 CK |
482 | action_ret = IRQ_NONE; |
483 | for_each_action_of_desc(desc, action) | |
484 | action_ret |= action->thread_fn(action->irq, action->dev_id); | |
485 | ||
c2b1063e | 486 | if (!irq_settings_no_debug(desc)) |
0dcdbc97 | 487 | note_interrupt(desc, action_ret); |
399b5da2 | 488 | |
239007b8 | 489 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 490 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
491 | |
492 | out_unlock: | |
239007b8 | 493 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
494 | } |
495 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
496 | ||
fe200ae4 TG |
497 | static bool irq_check_poll(struct irq_desc *desc) |
498 | { | |
6954b75b | 499 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
500 | return false; |
501 | return irq_wait_for_poll(desc); | |
502 | } | |
503 | ||
c7bd3ec0 TG |
504 | static bool irq_may_run(struct irq_desc *desc) |
505 | { | |
9ce7a258 TG |
506 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; |
507 | ||
508 | /* | |
509 | * If the interrupt is not in progress and is not an armed | |
510 | * wakeup interrupt, proceed. | |
511 | */ | |
512 | if (!irqd_has_set(&desc->irq_data, mask)) | |
c7bd3ec0 | 513 | return true; |
9ce7a258 TG |
514 | |
515 | /* | |
516 | * If the interrupt is an armed wakeup source, mark it pending | |
517 | * and suspended, disable it and notify the pm core about the | |
518 | * event. | |
519 | */ | |
520 | if (irq_pm_check_wakeup(desc)) | |
521 | return false; | |
522 | ||
523 | /* | |
524 | * Handle a potential concurrent poll on a different core. | |
525 | */ | |
c7bd3ec0 TG |
526 | return irq_check_poll(desc); |
527 | } | |
528 | ||
dd87eb3a TG |
529 | /** |
530 | * handle_simple_irq - Simple and software-decoded IRQs. | |
dd87eb3a | 531 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
532 | * |
533 | * Simple interrupts are either sent from a demultiplexing interrupt | |
534 | * handler or come from hardware, where no interrupt hardware control | |
535 | * is necessary. | |
536 | * | |
537 | * Note: The caller is expected to handle the ack, clear, mask and | |
538 | * unmask issues if necessary. | |
539 | */ | |
bd0b9ac4 | 540 | void handle_simple_irq(struct irq_desc *desc) |
dd87eb3a | 541 | { |
239007b8 | 542 | raw_spin_lock(&desc->lock); |
dd87eb3a | 543 | |
c7bd3ec0 TG |
544 | if (!irq_may_run(desc)) |
545 | goto out_unlock; | |
fe200ae4 | 546 | |
163ef309 | 547 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a | 548 | |
23812b9d NJ |
549 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
550 | desc->istate |= IRQS_PENDING; | |
dd87eb3a | 551 | goto out_unlock; |
23812b9d | 552 | } |
dd87eb3a | 553 | |
a946e8c7 | 554 | kstat_incr_irqs_this_cpu(desc); |
107781e7 | 555 | handle_irq_event(desc); |
dd87eb3a | 556 | |
dd87eb3a | 557 | out_unlock: |
239007b8 | 558 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 559 | } |
edf76f83 | 560 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 561 | |
edd14cfe KB |
562 | /** |
563 | * handle_untracked_irq - Simple and software-decoded IRQs. | |
564 | * @desc: the interrupt description structure for this irq | |
565 | * | |
566 | * Untracked interrupts are sent from a demultiplexing interrupt | |
567 | * handler when the demultiplexer does not know which device it its | |
568 | * multiplexed irq domain generated the interrupt. IRQ's handled | |
569 | * through here are not subjected to stats tracking, randomness, or | |
570 | * spurious interrupt detection. | |
571 | * | |
572 | * Note: Like handle_simple_irq, the caller is expected to handle | |
573 | * the ack, clear, mask and unmask issues if necessary. | |
574 | */ | |
575 | void handle_untracked_irq(struct irq_desc *desc) | |
576 | { | |
edd14cfe KB |
577 | raw_spin_lock(&desc->lock); |
578 | ||
579 | if (!irq_may_run(desc)) | |
580 | goto out_unlock; | |
581 | ||
582 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
583 | ||
584 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { | |
585 | desc->istate |= IRQS_PENDING; | |
586 | goto out_unlock; | |
587 | } | |
588 | ||
589 | desc->istate &= ~IRQS_PENDING; | |
590 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
591 | raw_spin_unlock(&desc->lock); | |
592 | ||
5320eb42 | 593 | __handle_irq_event_percpu(desc); |
edd14cfe KB |
594 | |
595 | raw_spin_lock(&desc->lock); | |
596 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
597 | ||
598 | out_unlock: | |
599 | raw_spin_unlock(&desc->lock); | |
600 | } | |
601 | EXPORT_SYMBOL_GPL(handle_untracked_irq); | |
602 | ||
ac563761 TG |
603 | /* |
604 | * Called unconditionally from handle_level_irq() and only for oneshot | |
605 | * interrupts from handle_fasteoi_irq() | |
606 | */ | |
607 | static void cond_unmask_irq(struct irq_desc *desc) | |
608 | { | |
609 | /* | |
610 | * We need to unmask in the following cases: | |
611 | * - Standard level irq (IRQF_ONESHOT is not set) | |
612 | * - Oneshot irq which did not wake the thread (caused by a | |
613 | * spurious interrupt or a primary handler handling it | |
614 | * completely). | |
615 | */ | |
616 | if (!irqd_irq_disabled(&desc->irq_data) && | |
617 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
618 | unmask_irq(desc); | |
619 | } | |
620 | ||
dd87eb3a TG |
621 | /** |
622 | * handle_level_irq - Level type irq handler | |
dd87eb3a | 623 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
624 | * |
625 | * Level type interrupts are active as long as the hardware line has | |
626 | * the active level. This may require to mask the interrupt and unmask | |
627 | * it after the associated handler has acknowledged the device, so the | |
628 | * interrupt line is back to inactive. | |
629 | */ | |
bd0b9ac4 | 630 | void handle_level_irq(struct irq_desc *desc) |
dd87eb3a | 631 | { |
239007b8 | 632 | raw_spin_lock(&desc->lock); |
9205e31d | 633 | mask_ack_irq(desc); |
dd87eb3a | 634 | |
c7bd3ec0 TG |
635 | if (!irq_may_run(desc)) |
636 | goto out_unlock; | |
fe200ae4 | 637 | |
163ef309 | 638 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
639 | |
640 | /* | |
641 | * If its disabled or no action available | |
642 | * keep it masked and get out of here | |
643 | */ | |
d4dc0f90 TG |
644 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
645 | desc->istate |= IRQS_PENDING; | |
86998aa6 | 646 | goto out_unlock; |
d4dc0f90 | 647 | } |
dd87eb3a | 648 | |
a946e8c7 | 649 | kstat_incr_irqs_this_cpu(desc); |
1529866c | 650 | handle_irq_event(desc); |
b25c340c | 651 | |
ac563761 TG |
652 | cond_unmask_irq(desc); |
653 | ||
86998aa6 | 654 | out_unlock: |
239007b8 | 655 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 656 | } |
14819ea1 | 657 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 658 | |
328a4978 TG |
659 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) |
660 | { | |
661 | if (!(desc->istate & IRQS_ONESHOT)) { | |
662 | chip->irq_eoi(&desc->irq_data); | |
663 | return; | |
664 | } | |
665 | /* | |
666 | * We need to unmask in the following cases: | |
667 | * - Oneshot irq which did not wake the thread (caused by a | |
668 | * spurious interrupt or a primary handler handling it | |
669 | * completely). | |
670 | */ | |
671 | if (!irqd_irq_disabled(&desc->irq_data) && | |
672 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | |
673 | chip->irq_eoi(&desc->irq_data); | |
674 | unmask_irq(desc); | |
675 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | |
676 | chip->irq_eoi(&desc->irq_data); | |
677 | } | |
678 | } | |
679 | ||
dd87eb3a | 680 | /** |
47c2a3aa | 681 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a | 682 | * @desc: the interrupt description structure for this irq |
dd87eb3a | 683 | * |
47c2a3aa | 684 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
685 | * call when the interrupt has been serviced. This enables support |
686 | * for modern forms of interrupt handlers, which handle the flow | |
687 | * details in hardware, transparently. | |
688 | */ | |
bd0b9ac4 | 689 | void handle_fasteoi_irq(struct irq_desc *desc) |
dd87eb3a | 690 | { |
328a4978 TG |
691 | struct irq_chip *chip = desc->irq_data.chip; |
692 | ||
239007b8 | 693 | raw_spin_lock(&desc->lock); |
dd87eb3a | 694 | |
c7bd3ec0 TG |
695 | if (!irq_may_run(desc)) |
696 | goto out; | |
dd87eb3a | 697 | |
163ef309 | 698 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
699 | |
700 | /* | |
701 | * If its disabled or no action available | |
76d21601 | 702 | * then mask it and get out of here: |
dd87eb3a | 703 | */ |
32f4125e | 704 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 705 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 706 | mask_irq(desc); |
dd87eb3a | 707 | goto out; |
98bb244b | 708 | } |
c69e3758 | 709 | |
a946e8c7 | 710 | kstat_incr_irqs_this_cpu(desc); |
c69e3758 TG |
711 | if (desc->istate & IRQS_ONESHOT) |
712 | mask_irq(desc); | |
713 | ||
a7ae4de5 | 714 | handle_irq_event(desc); |
77694b40 | 715 | |
328a4978 | 716 | cond_unmask_eoi_irq(desc, chip); |
ac563761 | 717 | |
239007b8 | 718 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
719 | return; |
720 | out: | |
328a4978 TG |
721 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
722 | chip->irq_eoi(&desc->irq_data); | |
723 | raw_spin_unlock(&desc->lock); | |
dd87eb3a | 724 | } |
7cad45ee | 725 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); |
dd87eb3a | 726 | |
2dcf1fbc JT |
727 | /** |
728 | * handle_fasteoi_nmi - irq handler for NMI interrupt lines | |
729 | * @desc: the interrupt description structure for this irq | |
730 | * | |
731 | * A simple NMI-safe handler, considering the restrictions | |
732 | * from request_nmi. | |
733 | * | |
734 | * Only a single callback will be issued to the chip: an ->eoi() | |
735 | * call when the interrupt has been serviced. This enables support | |
736 | * for modern forms of interrupt handlers, which handle the flow | |
737 | * details in hardware, transparently. | |
738 | */ | |
739 | void handle_fasteoi_nmi(struct irq_desc *desc) | |
740 | { | |
741 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
742 | struct irqaction *action = desc->action; | |
743 | unsigned int irq = irq_desc_get_irq(desc); | |
744 | irqreturn_t res; | |
745 | ||
c09cb129 ST |
746 | __kstat_incr_irqs_this_cpu(desc); |
747 | ||
2dcf1fbc JT |
748 | trace_irq_handler_entry(irq, action); |
749 | /* | |
750 | * NMIs cannot be shared, there is only one action. | |
751 | */ | |
752 | res = action->handler(irq, action->dev_id); | |
753 | trace_irq_handler_exit(irq, action, res); | |
754 | ||
755 | if (chip->irq_eoi) | |
756 | chip->irq_eoi(&desc->irq_data); | |
757 | } | |
758 | EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); | |
759 | ||
dd87eb3a TG |
760 | /** |
761 | * handle_edge_irq - edge type IRQ handler | |
dd87eb3a | 762 | * @desc: the interrupt description structure for this irq |
dd87eb3a | 763 | * |
5c982c58 | 764 | * Interrupt occurs on the falling and/or rising edge of a hardware |
25985edc | 765 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
766 | * and must be acked in order to be reenabled. After the ack another |
767 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 768 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
769 | * might be necessary to disable (mask) the interrupt depending on the |
770 | * controller hardware. This requires to reenable the interrupt inside | |
771 | * of the loop which handles the interrupts which have arrived while | |
772 | * the handler was running. If all pending interrupts are handled, the | |
773 | * loop is left. | |
774 | */ | |
bd0b9ac4 | 775 | void handle_edge_irq(struct irq_desc *desc) |
dd87eb3a | 776 | { |
239007b8 | 777 | raw_spin_lock(&desc->lock); |
dd87eb3a | 778 | |
163ef309 | 779 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
c3d7acd0 | 780 | |
c7bd3ec0 TG |
781 | if (!irq_may_run(desc)) { |
782 | desc->istate |= IRQS_PENDING; | |
783 | mask_ack_irq(desc); | |
784 | goto out_unlock; | |
dd87eb3a | 785 | } |
c3d7acd0 | 786 | |
dd87eb3a | 787 | /* |
c3d7acd0 TG |
788 | * If its disabled or no action available then mask it and get |
789 | * out of here. | |
dd87eb3a | 790 | */ |
c3d7acd0 TG |
791 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
792 | desc->istate |= IRQS_PENDING; | |
793 | mask_ack_irq(desc); | |
794 | goto out_unlock; | |
dd87eb3a | 795 | } |
c3d7acd0 | 796 | |
b51bf95c | 797 | kstat_incr_irqs_this_cpu(desc); |
dd87eb3a TG |
798 | |
799 | /* Start handling the irq */ | |
22a49163 | 800 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 801 | |
dd87eb3a | 802 | do { |
a60a5dc2 | 803 | if (unlikely(!desc->action)) { |
e2c0f8ff | 804 | mask_irq(desc); |
dd87eb3a TG |
805 | goto out_unlock; |
806 | } | |
807 | ||
808 | /* | |
809 | * When another irq arrived while we were handling | |
810 | * one, we could have masked the irq. | |
a359f757 | 811 | * Reenable it, if it was not disabled in meantime. |
dd87eb3a | 812 | */ |
2a0d6fb3 | 813 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
814 | if (!irqd_irq_disabled(&desc->irq_data) && |
815 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 816 | unmask_irq(desc); |
dd87eb3a TG |
817 | } |
818 | ||
a60a5dc2 | 819 | handle_irq_event(desc); |
dd87eb3a | 820 | |
2a0d6fb3 | 821 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 822 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 823 | |
dd87eb3a | 824 | out_unlock: |
239007b8 | 825 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 826 | } |
3911ff30 | 827 | EXPORT_SYMBOL(handle_edge_irq); |
dd87eb3a | 828 | |
0521c8fb TG |
829 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
830 | /** | |
831 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
0521c8fb TG |
832 | * @desc: the interrupt description structure for this irq |
833 | * | |
834 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
835 | * mask/unmask logic. | |
836 | */ | |
bd0b9ac4 | 837 | void handle_edge_eoi_irq(struct irq_desc *desc) |
0521c8fb TG |
838 | { |
839 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
840 | ||
841 | raw_spin_lock(&desc->lock); | |
842 | ||
843 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
c3d7acd0 | 844 | |
c7bd3ec0 TG |
845 | if (!irq_may_run(desc)) { |
846 | desc->istate |= IRQS_PENDING; | |
847 | goto out_eoi; | |
0521c8fb | 848 | } |
c3d7acd0 | 849 | |
0521c8fb | 850 | /* |
c3d7acd0 TG |
851 | * If its disabled or no action available then mask it and get |
852 | * out of here. | |
0521c8fb | 853 | */ |
c3d7acd0 TG |
854 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
855 | desc->istate |= IRQS_PENDING; | |
856 | goto out_eoi; | |
0521c8fb | 857 | } |
c3d7acd0 | 858 | |
b51bf95c | 859 | kstat_incr_irqs_this_cpu(desc); |
0521c8fb TG |
860 | |
861 | do { | |
862 | if (unlikely(!desc->action)) | |
863 | goto out_eoi; | |
864 | ||
865 | handle_irq_event(desc); | |
866 | ||
867 | } while ((desc->istate & IRQS_PENDING) && | |
868 | !irqd_irq_disabled(&desc->irq_data)); | |
869 | ||
ac0e0447 | 870 | out_eoi: |
0521c8fb TG |
871 | chip->irq_eoi(&desc->irq_data); |
872 | raw_spin_unlock(&desc->lock); | |
873 | } | |
874 | #endif | |
875 | ||
dd87eb3a | 876 | /** |
24b26d42 | 877 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a | 878 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
879 | * |
880 | * Per CPU interrupts on SMP machines without locking requirements | |
881 | */ | |
bd0b9ac4 | 882 | void handle_percpu_irq(struct irq_desc *desc) |
dd87eb3a | 883 | { |
35e857cb | 884 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 885 | |
1136b072 TG |
886 | /* |
887 | * PER CPU interrupts are not serialized. Do not touch | |
888 | * desc->tot_count. | |
889 | */ | |
890 | __kstat_incr_irqs_this_cpu(desc); | |
dd87eb3a | 891 | |
849f061c TG |
892 | if (chip->irq_ack) |
893 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 894 | |
71f64340 | 895 | handle_irq_event_percpu(desc); |
dd87eb3a | 896 | |
849f061c TG |
897 | if (chip->irq_eoi) |
898 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
899 | } |
900 | ||
31d9d9b6 MZ |
901 | /** |
902 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
31d9d9b6 MZ |
903 | * @desc: the interrupt description structure for this irq |
904 | * | |
905 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
906 | * handle_percpu_irq() above but with the following extras: | |
907 | * | |
908 | * action->percpu_dev_id is a pointer to percpu variables which | |
909 | * contain the real device id for the cpu on which this handler is | |
910 | * called | |
911 | */ | |
bd0b9ac4 | 912 | void handle_percpu_devid_irq(struct irq_desc *desc) |
31d9d9b6 MZ |
913 | { |
914 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
915 | struct irqaction *action = desc->action; | |
bd0b9ac4 | 916 | unsigned int irq = irq_desc_get_irq(desc); |
31d9d9b6 MZ |
917 | irqreturn_t res; |
918 | ||
1136b072 TG |
919 | /* |
920 | * PER CPU interrupts are not serialized. Do not touch | |
921 | * desc->tot_count. | |
922 | */ | |
923 | __kstat_incr_irqs_this_cpu(desc); | |
31d9d9b6 MZ |
924 | |
925 | if (chip->irq_ack) | |
926 | chip->irq_ack(&desc->irq_data); | |
927 | ||
fc590c22 TG |
928 | if (likely(action)) { |
929 | trace_irq_handler_entry(irq, action); | |
930 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); | |
931 | trace_irq_handler_exit(irq, action, res); | |
932 | } else { | |
933 | unsigned int cpu = smp_processor_id(); | |
934 | bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); | |
935 | ||
936 | if (enabled) | |
937 | irq_percpu_disable(desc, cpu); | |
938 | ||
939 | pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", | |
940 | enabled ? " and unmasked" : "", irq, cpu); | |
941 | } | |
31d9d9b6 MZ |
942 | |
943 | if (chip->irq_eoi) | |
944 | chip->irq_eoi(&desc->irq_data); | |
945 | } | |
946 | ||
2dcf1fbc JT |
947 | /** |
948 | * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu | |
949 | * dev ids | |
950 | * @desc: the interrupt description structure for this irq | |
951 | * | |
952 | * Similar to handle_fasteoi_nmi, but handling the dev_id cookie | |
953 | * as a percpu pointer. | |
954 | */ | |
955 | void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) | |
956 | { | |
957 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
958 | struct irqaction *action = desc->action; | |
959 | unsigned int irq = irq_desc_get_irq(desc); | |
960 | irqreturn_t res; | |
961 | ||
c09cb129 ST |
962 | __kstat_incr_irqs_this_cpu(desc); |
963 | ||
2dcf1fbc JT |
964 | trace_irq_handler_entry(irq, action); |
965 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); | |
966 | trace_irq_handler_exit(irq, action, res); | |
967 | ||
968 | if (chip->irq_eoi) | |
969 | chip->irq_eoi(&desc->irq_data); | |
970 | } | |
971 | ||
b8129a1f | 972 | static void |
3b0f95be RK |
973 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, |
974 | int is_chained, const char *name) | |
dd87eb3a | 975 | { |
091738a2 | 976 | if (!handle) { |
dd87eb3a | 977 | handle = handle_bad_irq; |
091738a2 | 978 | } else { |
f86eff22 MZ |
979 | struct irq_data *irq_data = &desc->irq_data; |
980 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
981 | /* | |
982 | * With hierarchical domains we might run into a | |
983 | * situation where the outermost chip is not yet set | |
984 | * up, but the inner chips are there. Instead of | |
985 | * bailing we install the handler, but obviously we | |
986 | * cannot enable/startup the interrupt at this point. | |
987 | */ | |
988 | while (irq_data) { | |
989 | if (irq_data->chip != &no_irq_chip) | |
990 | break; | |
991 | /* | |
992 | * Bail out if the outer chip is not set up | |
c5f48c0a | 993 | * and the interrupt supposed to be started |
f86eff22 MZ |
994 | * right away. |
995 | */ | |
996 | if (WARN_ON(is_chained)) | |
3b0f95be | 997 | return; |
f86eff22 MZ |
998 | /* Try the parent */ |
999 | irq_data = irq_data->parent_data; | |
1000 | } | |
1001 | #endif | |
1002 | if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) | |
3b0f95be | 1003 | return; |
f8b5473f | 1004 | } |
dd87eb3a | 1005 | |
dd87eb3a TG |
1006 | /* Uninstall? */ |
1007 | if (handle == handle_bad_irq) { | |
6b8ff312 | 1008 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 1009 | mask_ack_irq(desc); |
801a0e9a | 1010 | irq_state_set_disabled(desc); |
668a9fe5 | 1011 | if (is_chained) { |
e509bd7d | 1012 | desc->action = NULL; |
668a9fe5 MZ |
1013 | WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); |
1014 | } | |
dd87eb3a TG |
1015 | desc->depth = 1; |
1016 | } | |
1017 | desc->handle_irq = handle; | |
a460e745 | 1018 | desc->name = name; |
dd87eb3a TG |
1019 | |
1020 | if (handle != handle_bad_irq && is_chained) { | |
1984e075 MZ |
1021 | unsigned int type = irqd_get_trigger_type(&desc->irq_data); |
1022 | ||
1e12c4a9 MZ |
1023 | /* |
1024 | * We're about to start this interrupt immediately, | |
1025 | * hence the need to set the trigger configuration. | |
1026 | * But the .set_type callback may have overridden the | |
1027 | * flow handler, ignoring that we're dealing with a | |
1028 | * chained interrupt. Reset it immediately because we | |
1029 | * do know better. | |
1030 | */ | |
1984e075 MZ |
1031 | if (type != IRQ_TYPE_NONE) { |
1032 | __irq_set_trigger(desc, type); | |
1033 | desc->handle_irq = handle; | |
1034 | } | |
1e12c4a9 | 1035 | |
1ccb4e61 TG |
1036 | irq_settings_set_noprobe(desc); |
1037 | irq_settings_set_norequest(desc); | |
7f1b1244 | 1038 | irq_settings_set_nothread(desc); |
e509bd7d | 1039 | desc->action = &chained_action; |
668a9fe5 | 1040 | WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); |
c942cee4 | 1041 | irq_activate_and_startup(desc, IRQ_RESEND); |
dd87eb3a | 1042 | } |
3b0f95be RK |
1043 | } |
1044 | ||
1045 | void | |
1046 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
1047 | const char *name) | |
1048 | { | |
1049 | unsigned long flags; | |
1050 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
1051 | ||
1052 | if (!desc) | |
1053 | return; | |
1054 | ||
1055 | __irq_do_set_handler(desc, handle, is_chained, name); | |
02725e74 | 1056 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a | 1057 | } |
3836ca08 | 1058 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a | 1059 | |
3b0f95be RK |
1060 | void |
1061 | irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |
1062 | void *data) | |
1063 | { | |
1064 | unsigned long flags; | |
1065 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
1066 | ||
1067 | if (!desc) | |
1068 | return; | |
1069 | ||
af7080e0 | 1070 | desc->irq_common_data.handler_data = data; |
2c4569ca | 1071 | __irq_do_set_handler(desc, handle, 1, NULL); |
3b0f95be RK |
1072 | |
1073 | irq_put_desc_busunlock(desc, flags); | |
1074 | } | |
1075 | EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); | |
1076 | ||
dd87eb3a | 1077 | void |
393e1280 | 1078 | irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, |
a460e745 | 1079 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 1080 | { |
35e857cb | 1081 | irq_set_chip(irq, chip); |
3836ca08 | 1082 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 1083 | } |
b3ae66f2 | 1084 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
46f4f8f6 | 1085 | |
44247184 | 1086 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 1087 | { |
e8f24189 | 1088 | unsigned long flags, trigger, tmp; |
31d9d9b6 | 1089 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 1090 | |
44247184 | 1091 | if (!desc) |
46f4f8f6 | 1092 | return; |
04c848d3 TG |
1093 | |
1094 | /* | |
1095 | * Warn when a driver sets the no autoenable flag on an already | |
1096 | * active interrupt. | |
1097 | */ | |
1098 | WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); | |
1099 | ||
a005677b TG |
1100 | irq_settings_clr_and_set(desc, clr, set); |
1101 | ||
e8f24189 MZ |
1102 | trigger = irqd_get_trigger_type(&desc->irq_data); |
1103 | ||
876dbd4c | 1104 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 1105 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
1106 | if (irq_settings_has_no_balance_set(desc)) |
1107 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
1108 | if (irq_settings_is_per_cpu(desc)) | |
1109 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
1110 | if (irq_settings_can_move_pcntxt(desc)) |
1111 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
1112 | if (irq_settings_is_level(desc)) |
1113 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 1114 | |
e8f24189 MZ |
1115 | tmp = irq_settings_get_trigger_mask(desc); |
1116 | if (tmp != IRQ_TYPE_NONE) | |
1117 | trigger = tmp; | |
1118 | ||
1119 | irqd_set(&desc->irq_data, trigger); | |
876dbd4c | 1120 | |
02725e74 | 1121 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 1122 | } |
edf76f83 | 1123 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 | 1124 | |
8d15a729 | 1125 | #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE |
0fdb4b25 DD |
1126 | /** |
1127 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
1128 | * | |
1129 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
1130 | * for each. | |
1131 | */ | |
1132 | void irq_cpu_online(void) | |
1133 | { | |
1134 | struct irq_desc *desc; | |
1135 | struct irq_chip *chip; | |
1136 | unsigned long flags; | |
1137 | unsigned int irq; | |
1138 | ||
1139 | for_each_active_irq(irq) { | |
1140 | desc = irq_to_desc(irq); | |
1141 | if (!desc) | |
1142 | continue; | |
1143 | ||
1144 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1145 | ||
1146 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
1147 | if (chip && chip->irq_cpu_online && |
1148 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 1149 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
1150 | chip->irq_cpu_online(&desc->irq_data); |
1151 | ||
1152 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | /** | |
1157 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
1158 | * | |
1159 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
1160 | * for each. | |
1161 | */ | |
1162 | void irq_cpu_offline(void) | |
1163 | { | |
1164 | struct irq_desc *desc; | |
1165 | struct irq_chip *chip; | |
1166 | unsigned long flags; | |
1167 | unsigned int irq; | |
1168 | ||
1169 | for_each_active_irq(irq) { | |
1170 | desc = irq_to_desc(irq); | |
1171 | if (!desc) | |
1172 | continue; | |
1173 | ||
1174 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1175 | ||
1176 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
1177 | if (chip && chip->irq_cpu_offline && |
1178 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 1179 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
1180 | chip->irq_cpu_offline(&desc->irq_data); |
1181 | ||
1182 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1183 | } | |
1184 | } | |
8d15a729 | 1185 | #endif |
85f08c17 JL |
1186 | |
1187 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
7703b08c DD |
1188 | |
1189 | #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS | |
1190 | /** | |
1191 | * handle_fasteoi_ack_irq - irq handler for edge hierarchy | |
1192 | * stacked on transparent controllers | |
1193 | * | |
1194 | * @desc: the interrupt description structure for this irq | |
1195 | * | |
1196 | * Like handle_fasteoi_irq(), but for use with hierarchy where | |
1197 | * the irq_chip also needs to have its ->irq_ack() function | |
1198 | * called. | |
1199 | */ | |
1200 | void handle_fasteoi_ack_irq(struct irq_desc *desc) | |
1201 | { | |
1202 | struct irq_chip *chip = desc->irq_data.chip; | |
1203 | ||
1204 | raw_spin_lock(&desc->lock); | |
1205 | ||
1206 | if (!irq_may_run(desc)) | |
1207 | goto out; | |
1208 | ||
1209 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
1210 | ||
1211 | /* | |
1212 | * If its disabled or no action available | |
1213 | * then mask it and get out of here: | |
1214 | */ | |
1215 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { | |
1216 | desc->istate |= IRQS_PENDING; | |
1217 | mask_irq(desc); | |
1218 | goto out; | |
1219 | } | |
1220 | ||
1221 | kstat_incr_irqs_this_cpu(desc); | |
1222 | if (desc->istate & IRQS_ONESHOT) | |
1223 | mask_irq(desc); | |
1224 | ||
1225 | /* Start handling the irq */ | |
1226 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
1227 | ||
7703b08c DD |
1228 | handle_irq_event(desc); |
1229 | ||
1230 | cond_unmask_eoi_irq(desc, chip); | |
1231 | ||
1232 | raw_spin_unlock(&desc->lock); | |
1233 | return; | |
1234 | out: | |
1235 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) | |
1236 | chip->irq_eoi(&desc->irq_data); | |
1237 | raw_spin_unlock(&desc->lock); | |
1238 | } | |
1239 | EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); | |
1240 | ||
1241 | /** | |
1242 | * handle_fasteoi_mask_irq - irq handler for level hierarchy | |
1243 | * stacked on transparent controllers | |
1244 | * | |
1245 | * @desc: the interrupt description structure for this irq | |
1246 | * | |
1247 | * Like handle_fasteoi_irq(), but for use with hierarchy where | |
1248 | * the irq_chip also needs to have its ->irq_mask_ack() function | |
1249 | * called. | |
1250 | */ | |
1251 | void handle_fasteoi_mask_irq(struct irq_desc *desc) | |
1252 | { | |
1253 | struct irq_chip *chip = desc->irq_data.chip; | |
1254 | ||
1255 | raw_spin_lock(&desc->lock); | |
1256 | mask_ack_irq(desc); | |
1257 | ||
1258 | if (!irq_may_run(desc)) | |
1259 | goto out; | |
1260 | ||
1261 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
1262 | ||
1263 | /* | |
1264 | * If its disabled or no action available | |
1265 | * then mask it and get out of here: | |
1266 | */ | |
1267 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { | |
1268 | desc->istate |= IRQS_PENDING; | |
1269 | mask_irq(desc); | |
1270 | goto out; | |
1271 | } | |
1272 | ||
1273 | kstat_incr_irqs_this_cpu(desc); | |
1274 | if (desc->istate & IRQS_ONESHOT) | |
1275 | mask_irq(desc); | |
1276 | ||
7703b08c DD |
1277 | handle_irq_event(desc); |
1278 | ||
1279 | cond_unmask_eoi_irq(desc, chip); | |
1280 | ||
1281 | raw_spin_unlock(&desc->lock); | |
1282 | return; | |
1283 | out: | |
1284 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) | |
1285 | chip->irq_eoi(&desc->irq_data); | |
1286 | raw_spin_unlock(&desc->lock); | |
1287 | } | |
1288 | EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); | |
1289 | ||
1290 | #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ | |
1291 | ||
4a169a95 MS |
1292 | /** |
1293 | * irq_chip_set_parent_state - set the state of a parent interrupt. | |
1294 | * | |
1295 | * @data: Pointer to interrupt specific data | |
1296 | * @which: State to be restored (one of IRQCHIP_STATE_*) | |
1297 | * @val: Value corresponding to @which | |
1298 | * | |
1299 | * Conditional success, if the underlying irqchip does not implement it. | |
1300 | */ | |
1301 | int irq_chip_set_parent_state(struct irq_data *data, | |
1302 | enum irqchip_irq_state which, | |
1303 | bool val) | |
1304 | { | |
1305 | data = data->parent_data; | |
1306 | ||
1307 | if (!data || !data->chip->irq_set_irqchip_state) | |
1308 | return 0; | |
1309 | ||
1310 | return data->chip->irq_set_irqchip_state(data, which, val); | |
1311 | } | |
1312 | EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); | |
1313 | ||
1314 | /** | |
1315 | * irq_chip_get_parent_state - get the state of a parent interrupt. | |
1316 | * | |
1317 | * @data: Pointer to interrupt specific data | |
1318 | * @which: one of IRQCHIP_STATE_* the caller wants to know | |
1319 | * @state: a pointer to a boolean where the state is to be stored | |
1320 | * | |
1321 | * Conditional success, if the underlying irqchip does not implement it. | |
1322 | */ | |
1323 | int irq_chip_get_parent_state(struct irq_data *data, | |
1324 | enum irqchip_irq_state which, | |
1325 | bool *state) | |
1326 | { | |
1327 | data = data->parent_data; | |
1328 | ||
1329 | if (!data || !data->chip->irq_get_irqchip_state) | |
1330 | return 0; | |
1331 | ||
1332 | return data->chip->irq_get_irqchip_state(data, which, state); | |
1333 | } | |
1334 | EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); | |
1335 | ||
3cfeffc2 SA |
1336 | /** |
1337 | * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if | |
1338 | * NULL) | |
1339 | * @data: Pointer to interrupt specific data | |
1340 | */ | |
1341 | void irq_chip_enable_parent(struct irq_data *data) | |
1342 | { | |
1343 | data = data->parent_data; | |
1344 | if (data->chip->irq_enable) | |
1345 | data->chip->irq_enable(data); | |
1346 | else | |
1347 | data->chip->irq_unmask(data); | |
1348 | } | |
65efd9a4 | 1349 | EXPORT_SYMBOL_GPL(irq_chip_enable_parent); |
3cfeffc2 SA |
1350 | |
1351 | /** | |
1352 | * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if | |
1353 | * NULL) | |
1354 | * @data: Pointer to interrupt specific data | |
1355 | */ | |
1356 | void irq_chip_disable_parent(struct irq_data *data) | |
1357 | { | |
1358 | data = data->parent_data; | |
1359 | if (data->chip->irq_disable) | |
1360 | data->chip->irq_disable(data); | |
1361 | else | |
1362 | data->chip->irq_mask(data); | |
1363 | } | |
65efd9a4 | 1364 | EXPORT_SYMBOL_GPL(irq_chip_disable_parent); |
3cfeffc2 | 1365 | |
85f08c17 JL |
1366 | /** |
1367 | * irq_chip_ack_parent - Acknowledge the parent interrupt | |
1368 | * @data: Pointer to interrupt specific data | |
1369 | */ | |
1370 | void irq_chip_ack_parent(struct irq_data *data) | |
1371 | { | |
1372 | data = data->parent_data; | |
1373 | data->chip->irq_ack(data); | |
1374 | } | |
a4289dc2 | 1375 | EXPORT_SYMBOL_GPL(irq_chip_ack_parent); |
85f08c17 | 1376 | |
56e8abab YC |
1377 | /** |
1378 | * irq_chip_mask_parent - Mask the parent interrupt | |
1379 | * @data: Pointer to interrupt specific data | |
1380 | */ | |
1381 | void irq_chip_mask_parent(struct irq_data *data) | |
1382 | { | |
1383 | data = data->parent_data; | |
1384 | data->chip->irq_mask(data); | |
1385 | } | |
52b2a05f | 1386 | EXPORT_SYMBOL_GPL(irq_chip_mask_parent); |
56e8abab | 1387 | |
5aa5bd56 LW |
1388 | /** |
1389 | * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt | |
1390 | * @data: Pointer to interrupt specific data | |
1391 | */ | |
1392 | void irq_chip_mask_ack_parent(struct irq_data *data) | |
1393 | { | |
1394 | data = data->parent_data; | |
1395 | data->chip->irq_mask_ack(data); | |
1396 | } | |
1397 | EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); | |
1398 | ||
56e8abab YC |
1399 | /** |
1400 | * irq_chip_unmask_parent - Unmask the parent interrupt | |
1401 | * @data: Pointer to interrupt specific data | |
1402 | */ | |
1403 | void irq_chip_unmask_parent(struct irq_data *data) | |
1404 | { | |
1405 | data = data->parent_data; | |
1406 | data->chip->irq_unmask(data); | |
1407 | } | |
52b2a05f | 1408 | EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); |
56e8abab YC |
1409 | |
1410 | /** | |
1411 | * irq_chip_eoi_parent - Invoke EOI on the parent interrupt | |
1412 | * @data: Pointer to interrupt specific data | |
1413 | */ | |
1414 | void irq_chip_eoi_parent(struct irq_data *data) | |
1415 | { | |
1416 | data = data->parent_data; | |
1417 | data->chip->irq_eoi(data); | |
1418 | } | |
52b2a05f | 1419 | EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); |
56e8abab YC |
1420 | |
1421 | /** | |
1422 | * irq_chip_set_affinity_parent - Set affinity on the parent interrupt | |
1423 | * @data: Pointer to interrupt specific data | |
1424 | * @dest: The affinity mask to set | |
1425 | * @force: Flag to enforce setting (disable online checks) | |
1426 | * | |
5c982c58 | 1427 | * Conditional, as the underlying parent chip might not implement it. |
56e8abab YC |
1428 | */ |
1429 | int irq_chip_set_affinity_parent(struct irq_data *data, | |
1430 | const struct cpumask *dest, bool force) | |
1431 | { | |
1432 | data = data->parent_data; | |
1433 | if (data->chip->irq_set_affinity) | |
1434 | return data->chip->irq_set_affinity(data, dest, force); | |
b7560de1 GS |
1435 | |
1436 | return -ENOSYS; | |
1437 | } | |
65efd9a4 | 1438 | EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); |
b7560de1 GS |
1439 | |
1440 | /** | |
1441 | * irq_chip_set_type_parent - Set IRQ type on the parent interrupt | |
1442 | * @data: Pointer to interrupt specific data | |
1443 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h | |
1444 | * | |
1445 | * Conditional, as the underlying parent chip might not implement it. | |
1446 | */ | |
1447 | int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) | |
1448 | { | |
1449 | data = data->parent_data; | |
1450 | ||
1451 | if (data->chip->irq_set_type) | |
1452 | return data->chip->irq_set_type(data, type); | |
56e8abab YC |
1453 | |
1454 | return -ENOSYS; | |
1455 | } | |
52b2a05f | 1456 | EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); |
56e8abab | 1457 | |
85f08c17 JL |
1458 | /** |
1459 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware | |
1460 | * @data: Pointer to interrupt specific data | |
1461 | * | |
1462 | * Iterate through the domain hierarchy of the interrupt and check | |
1463 | * whether a hw retrigger function exists. If yes, invoke it. | |
1464 | */ | |
1465 | int irq_chip_retrigger_hierarchy(struct irq_data *data) | |
1466 | { | |
1467 | for (data = data->parent_data; data; data = data->parent_data) | |
1468 | if (data->chip && data->chip->irq_retrigger) | |
1469 | return data->chip->irq_retrigger(data); | |
1470 | ||
6d4affea | 1471 | return 0; |
85f08c17 | 1472 | } |
8d16f5b9 | 1473 | EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); |
08b55e2a | 1474 | |
0a4377de JL |
1475 | /** |
1476 | * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt | |
1477 | * @data: Pointer to interrupt specific data | |
8505a81b | 1478 | * @vcpu_info: The vcpu affinity information |
0a4377de JL |
1479 | */ |
1480 | int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) | |
1481 | { | |
1482 | data = data->parent_data; | |
1483 | if (data->chip->irq_set_vcpu_affinity) | |
1484 | return data->chip->irq_set_vcpu_affinity(data, vcpu_info); | |
1485 | ||
1486 | return -ENOSYS; | |
1487 | } | |
8d16f5b9 | 1488 | EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); |
08b55e2a MZ |
1489 | /** |
1490 | * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt | |
1491 | * @data: Pointer to interrupt specific data | |
1492 | * @on: Whether to set or reset the wake-up capability of this irq | |
1493 | * | |
1494 | * Conditional, as the underlying parent chip might not implement it. | |
1495 | */ | |
1496 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) | |
1497 | { | |
1498 | data = data->parent_data; | |
325aa195 SB |
1499 | |
1500 | if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) | |
1501 | return 0; | |
1502 | ||
08b55e2a MZ |
1503 | if (data->chip->irq_set_wake) |
1504 | return data->chip->irq_set_wake(data, on); | |
1505 | ||
1506 | return -ENOSYS; | |
1507 | } | |
38f7ae9b | 1508 | EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); |
2bd1298a LV |
1509 | |
1510 | /** | |
1511 | * irq_chip_request_resources_parent - Request resources on the parent interrupt | |
1512 | * @data: Pointer to interrupt specific data | |
1513 | */ | |
1514 | int irq_chip_request_resources_parent(struct irq_data *data) | |
1515 | { | |
1516 | data = data->parent_data; | |
1517 | ||
1518 | if (data->chip->irq_request_resources) | |
1519 | return data->chip->irq_request_resources(data); | |
1520 | ||
95001b75 AB |
1521 | /* no error on missing optional irq_chip::irq_request_resources */ |
1522 | return 0; | |
2bd1298a LV |
1523 | } |
1524 | EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); | |
1525 | ||
1526 | /** | |
1527 | * irq_chip_release_resources_parent - Release resources on the parent interrupt | |
1528 | * @data: Pointer to interrupt specific data | |
1529 | */ | |
1530 | void irq_chip_release_resources_parent(struct irq_data *data) | |
1531 | { | |
1532 | data = data->parent_data; | |
1533 | if (data->chip->irq_release_resources) | |
1534 | data->chip->irq_release_resources(data); | |
1535 | } | |
1536 | EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); | |
85f08c17 | 1537 | #endif |
515085ef JL |
1538 | |
1539 | /** | |
5c982c58 | 1540 | * irq_chip_compose_msi_msg - Compose msi message for a irq chip |
515085ef JL |
1541 | * @data: Pointer to interrupt specific data |
1542 | * @msg: Pointer to the MSI message | |
1543 | * | |
1544 | * For hierarchical domains we find the first chip in the hierarchy | |
1545 | * which implements the irq_compose_msi_msg callback. For non | |
1546 | * hierarchical we use the top level chip. | |
1547 | */ | |
1548 | int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |
1549 | { | |
13b90cad | 1550 | struct irq_data *pos; |
515085ef | 1551 | |
13b90cad | 1552 | for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { |
515085ef JL |
1553 | if (data->chip && data->chip->irq_compose_msi_msg) |
1554 | pos = data; | |
13b90cad TG |
1555 | } |
1556 | ||
515085ef JL |
1557 | if (!pos) |
1558 | return -ENOSYS; | |
1559 | ||
1560 | pos->chip->irq_compose_msi_msg(pos, msg); | |
515085ef JL |
1561 | return 0; |
1562 | } | |
be45beb2 | 1563 | |
1f8863bf MZ |
1564 | static struct device *irq_get_parent_device(struct irq_data *data) |
1565 | { | |
1f8863bf MZ |
1566 | if (data->domain) |
1567 | return data->domain->dev; | |
1568 | ||
1569 | return NULL; | |
1570 | } | |
1571 | ||
be45beb2 JH |
1572 | /** |
1573 | * irq_chip_pm_get - Enable power for an IRQ chip | |
1574 | * @data: Pointer to interrupt specific data | |
1575 | * | |
1576 | * Enable the power to the IRQ chip referenced by the interrupt data | |
1577 | * structure. | |
1578 | */ | |
1579 | int irq_chip_pm_get(struct irq_data *data) | |
1580 | { | |
1f8863bf | 1581 | struct device *dev = irq_get_parent_device(data); |
ce481895 | 1582 | int retval = 0; |
be45beb2 | 1583 | |
ce481895 MC |
1584 | if (IS_ENABLED(CONFIG_PM) && dev) |
1585 | retval = pm_runtime_resume_and_get(dev); | |
be45beb2 | 1586 | |
ce481895 | 1587 | return retval; |
be45beb2 JH |
1588 | } |
1589 | ||
1590 | /** | |
1591 | * irq_chip_pm_put - Disable power for an IRQ chip | |
1592 | * @data: Pointer to interrupt specific data | |
1593 | * | |
1594 | * Disable the power to the IRQ chip referenced by the interrupt data | |
1595 | * structure, belongs. Note that power will only be disabled, once this | |
1596 | * function has been called for all IRQs that have called irq_chip_pm_get(). | |
1597 | */ | |
1598 | int irq_chip_pm_put(struct irq_data *data) | |
1599 | { | |
1f8863bf | 1600 | struct device *dev = irq_get_parent_device(data); |
be45beb2 JH |
1601 | int retval = 0; |
1602 | ||
1f8863bf MZ |
1603 | if (IS_ENABLED(CONFIG_PM) && dev) |
1604 | retval = pm_runtime_put(dev); | |
be45beb2 JH |
1605 | |
1606 | return (retval < 0) ? retval : 0; | |
1607 | } |