Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* interrupt.h */ |
3 | #ifndef _LINUX_INTERRUPT_H | |
4 | #define _LINUX_INTERRUPT_H | |
5 | ||
1da177e4 | 6 | #include <linux/kernel.h> |
1da177e4 | 7 | #include <linux/bitops.h> |
1da177e4 | 8 | #include <linux/cpumask.h> |
908dcecd | 9 | #include <linux/irqreturn.h> |
dd3a1db9 | 10 | #include <linux/irqnr.h> |
1da177e4 | 11 | #include <linux/hardirq.h> |
de30a2b3 | 12 | #include <linux/irqflags.h> |
9ba5f005 | 13 | #include <linux/hrtimer.h> |
cd7eab44 BH |
14 | #include <linux/kref.h> |
15 | #include <linux/workqueue.h> | |
91cc470e | 16 | #include <linux/jump_label.h> |
0ebb26e7 | 17 | |
60063497 | 18 | #include <linux/atomic.h> |
1da177e4 | 19 | #include <asm/ptrace.h> |
7d65f4a6 | 20 | #include <asm/irq.h> |
229a7186 | 21 | #include <asm/sections.h> |
1da177e4 | 22 | |
6e213616 TG |
23 | /* |
24 | * These correspond to the IORESOURCE_IRQ_* defines in | |
25 | * linux/ioport.h to select the interrupt line behaviour. When | |
26 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | |
27 | * setting should be assumed to be "as already configured", which | |
28 | * may be as per machine or firmware initialisation. | |
29 | */ | |
30 | #define IRQF_TRIGGER_NONE 0x00000000 | |
31 | #define IRQF_TRIGGER_RISING 0x00000001 | |
32 | #define IRQF_TRIGGER_FALLING 0x00000002 | |
33 | #define IRQF_TRIGGER_HIGH 0x00000004 | |
34 | #define IRQF_TRIGGER_LOW 0x00000008 | |
35 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | |
36 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | |
37 | #define IRQF_TRIGGER_PROBE 0x00000010 | |
38 | ||
39 | /* | |
40 | * These flags used only by the kernel as part of the | |
41 | * irq handling routines. | |
42 | * | |
6e213616 TG |
43 | * IRQF_SHARED - allow sharing the irq among several devices |
44 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | |
45 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | |
950f4427 TG |
46 | * IRQF_PERCPU - Interrupt is per cpu |
47 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing | |
d85a60d8 | 48 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
b8d62f33 | 49 | * registered first in a shared interrupt is considered for |
d85a60d8 | 50 | * performance reasons) |
b25c340c TG |
51 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
52 | * Used by threaded interrupts which need to keep the | |
53 | * irq line disabled until the threaded handler has been run. | |
737eb030 MR |
54 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee |
55 | * that this interrupt will wake the system from a suspended | |
151f4e2b | 56 | * state. See Documentation/power/suspend-and-interrupts.rst |
dc5f219e | 57 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
0c4602ff | 58 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
9bab0b7f IC |
59 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
60 | * resume time. | |
17f48034 RW |
61 | * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this |
62 | * interrupt handler after suspending interrupts. For system | |
63 | * wakeup devices users need to implement wakeup detection in | |
64 | * their interrupt handlers. | |
cbe16f35 BS |
65 | * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. |
66 | * Users will enable it explicitly by enable_irq() or enable_nmi() | |
67 | * later. | |
c2b1063e TG |
68 | * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers, |
69 | * depends on IRQF_PERCPU. | |
6e213616 | 70 | */ |
6e213616 TG |
71 | #define IRQF_SHARED 0x00000080 |
72 | #define IRQF_PROBE_SHARED 0x00000100 | |
685fd0b4 | 73 | #define __IRQF_TIMER 0x00000200 |
284c6680 | 74 | #define IRQF_PERCPU 0x00000400 |
950f4427 | 75 | #define IRQF_NOBALANCING 0x00000800 |
d85a60d8 | 76 | #define IRQF_IRQPOLL 0x00001000 |
b25c340c | 77 | #define IRQF_ONESHOT 0x00002000 |
685fd0b4 | 78 | #define IRQF_NO_SUSPEND 0x00004000 |
dc5f219e | 79 | #define IRQF_FORCE_RESUME 0x00008000 |
0c4602ff | 80 | #define IRQF_NO_THREAD 0x00010000 |
9bab0b7f | 81 | #define IRQF_EARLY_RESUME 0x00020000 |
17f48034 | 82 | #define IRQF_COND_SUSPEND 0x00040000 |
cbe16f35 | 83 | #define IRQF_NO_AUTOEN 0x00080000 |
c2b1063e | 84 | #define IRQF_NO_DEBUG 0x00100000 |
685fd0b4 | 85 | |
0c4602ff | 86 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
3aa551c9 | 87 | |
b4e6b097 | 88 | /* |
ae731f8d MZ |
89 | * These values can be returned by request_any_context_irq() and |
90 | * describe the context the interrupt will be run in. | |
91 | * | |
92 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context | |
93 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context | |
94 | */ | |
95 | enum { | |
96 | IRQC_IS_HARDIRQ = 0, | |
97 | IRQC_IS_NESTED, | |
98 | }; | |
99 | ||
7d12e780 | 100 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
da482792 | 101 | |
a9d0a1a3 TG |
102 | /** |
103 | * struct irqaction - per interrupt action descriptor | |
104 | * @handler: interrupt handler function | |
a9d0a1a3 TG |
105 | * @name: name of the device |
106 | * @dev_id: cookie to identify the device | |
31d9d9b6 | 107 | * @percpu_dev_id: cookie to identify the device |
a9d0a1a3 TG |
108 | * @next: pointer to the next irqaction for shared interrupts |
109 | * @irq: interrupt number | |
c0ecaa06 | 110 | * @flags: flags (see IRQF_* above) |
25985edc | 111 | * @thread_fn: interrupt handler function for threaded interrupts |
3aa551c9 | 112 | * @thread: thread pointer for threaded interrupts |
2a1d3ab8 | 113 | * @secondary: pointer to secondary irqaction (force threading) |
3aa551c9 | 114 | * @thread_flags: flags related to @thread |
b5faba21 | 115 | * @thread_mask: bitmask for keeping track of @thread activity |
c0ecaa06 | 116 | * @dir: pointer to the proc/irq/NN/name entry |
a9d0a1a3 | 117 | */ |
1da177e4 | 118 | struct irqaction { |
31d9d9b6 | 119 | irq_handler_t handler; |
31d9d9b6 MZ |
120 | void *dev_id; |
121 | void __percpu *percpu_dev_id; | |
122 | struct irqaction *next; | |
31d9d9b6 MZ |
123 | irq_handler_t thread_fn; |
124 | struct task_struct *thread; | |
2a1d3ab8 | 125 | struct irqaction *secondary; |
c0ecaa06 TG |
126 | unsigned int irq; |
127 | unsigned int flags; | |
31d9d9b6 MZ |
128 | unsigned long thread_flags; |
129 | unsigned long thread_mask; | |
130 | const char *name; | |
131 | struct proc_dir_entry *dir; | |
f6cd2477 | 132 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 133 | |
7d12e780 | 134 | extern irqreturn_t no_action(int cpl, void *dev_id); |
3aa551c9 | 135 | |
e237a551 CF |
136 | /* |
137 | * If a (PCI) device interrupt is not connected we set dev->irq to | |
138 | * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we | |
139 | * can distingiush that case from other error returns. | |
140 | * | |
141 | * 0x80000000 is guaranteed to be outside the available range of interrupts | |
142 | * and easy to distinguish from other possible incorrect values. | |
143 | */ | |
144 | #define IRQ_NOTCONNECTED (1U << 31) | |
145 | ||
3aa551c9 TG |
146 | extern int __must_check |
147 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | |
148 | irq_handler_t thread_fn, | |
149 | unsigned long flags, const char *name, void *dev); | |
150 | ||
5ca470a0 JC |
151 | /** |
152 | * request_irq - Add a handler for an interrupt line | |
153 | * @irq: The interrupt line to allocate | |
154 | * @handler: Function to be called when the IRQ occurs. | |
155 | * Primary handler for threaded interrupts | |
156 | * If NULL, the default primary handler is installed | |
157 | * @flags: Handling flags | |
158 | * @name: Name of the device generating this interrupt | |
159 | * @dev: A cookie passed to the handler function | |
160 | * | |
161 | * This call allocates an interrupt and establishes a handler; see | |
162 | * the documentation for request_threaded_irq() for details. | |
163 | */ | |
3aa551c9 TG |
164 | static inline int __must_check |
165 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | |
166 | const char *name, void *dev) | |
167 | { | |
168 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | |
169 | } | |
170 | ||
ae731f8d MZ |
171 | extern int __must_check |
172 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
173 | unsigned long flags, const char *name, void *dev_id); | |
174 | ||
31d9d9b6 | 175 | extern int __must_check |
c80081b9 DL |
176 | __request_percpu_irq(unsigned int irq, irq_handler_t handler, |
177 | unsigned long flags, const char *devname, | |
178 | void __percpu *percpu_dev_id); | |
179 | ||
b525903c JT |
180 | extern int __must_check |
181 | request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, | |
182 | const char *name, void *dev); | |
183 | ||
c80081b9 | 184 | static inline int __must_check |
31d9d9b6 | 185 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
c80081b9 DL |
186 | const char *devname, void __percpu *percpu_dev_id) |
187 | { | |
188 | return __request_percpu_irq(irq, handler, 0, | |
189 | devname, percpu_dev_id); | |
190 | } | |
3aa551c9 | 191 | |
4b078c3f JT |
192 | extern int __must_check |
193 | request_percpu_nmi(unsigned int irq, irq_handler_t handler, | |
194 | const char *devname, void __percpu *dev); | |
195 | ||
25ce4be7 | 196 | extern const void *free_irq(unsigned int, void *); |
31d9d9b6 | 197 | extern void free_percpu_irq(unsigned int, void __percpu *); |
1da177e4 | 198 | |
b525903c | 199 | extern const void *free_nmi(unsigned int irq, void *dev_id); |
4b078c3f | 200 | extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); |
b525903c | 201 | |
0af3678f AV |
202 | struct device; |
203 | ||
935bd5b9 AV |
204 | extern int __must_check |
205 | devm_request_threaded_irq(struct device *dev, unsigned int irq, | |
206 | irq_handler_t handler, irq_handler_t thread_fn, | |
207 | unsigned long irqflags, const char *devname, | |
208 | void *dev_id); | |
209 | ||
210 | static inline int __must_check | |
211 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | |
212 | unsigned long irqflags, const char *devname, void *dev_id) | |
213 | { | |
214 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | |
215 | devname, dev_id); | |
216 | } | |
217 | ||
0668d306 SB |
218 | extern int __must_check |
219 | devm_request_any_context_irq(struct device *dev, unsigned int irq, | |
220 | irq_handler_t handler, unsigned long irqflags, | |
221 | const char *devname, void *dev_id); | |
222 | ||
9ac7849e TH |
223 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
224 | ||
a313357e | 225 | bool irq_has_action(unsigned int irq); |
1da177e4 | 226 | extern void disable_irq_nosync(unsigned int irq); |
02cea395 | 227 | extern bool disable_hardirq(unsigned int irq); |
1da177e4 | 228 | extern void disable_irq(unsigned int irq); |
31d9d9b6 | 229 | extern void disable_percpu_irq(unsigned int irq); |
1da177e4 | 230 | extern void enable_irq(unsigned int irq); |
1e7c5fd2 | 231 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); |
f0cb3220 | 232 | extern bool irq_percpu_is_enabled(unsigned int irq); |
a92444c6 | 233 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
ba9a2331 | 234 | |
b525903c | 235 | extern void disable_nmi_nosync(unsigned int irq); |
4b078c3f | 236 | extern void disable_percpu_nmi(unsigned int irq); |
b525903c | 237 | extern void enable_nmi(unsigned int irq); |
4b078c3f JT |
238 | extern void enable_percpu_nmi(unsigned int irq, unsigned int type); |
239 | extern int prepare_percpu_nmi(unsigned int irq); | |
240 | extern void teardown_percpu_nmi(unsigned int irq); | |
b525903c | 241 | |
acd26bcf TG |
242 | extern int irq_inject_interrupt(unsigned int irq); |
243 | ||
0a0c5168 RW |
244 | /* The following three functions are for the core kernel use only. */ |
245 | extern void suspend_device_irqs(void); | |
246 | extern void resume_device_irqs(void); | |
3a79bc63 | 247 | extern void rearm_wake_irq(unsigned int irq); |
0a0c5168 | 248 | |
f0ba3d05 EP |
249 | /** |
250 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | |
251 | * @irq: Interrupt to which notification applies | |
252 | * @kref: Reference count, for internal use | |
253 | * @work: Work item, for internal use | |
254 | * @notify: Function to be called on change. This will be | |
255 | * called in process context. | |
256 | * @release: Function to be called on release. This will be | |
257 | * called in process context. Once registered, the | |
258 | * structure must only be freed when this function is | |
259 | * called or later. | |
260 | */ | |
261 | struct irq_affinity_notify { | |
262 | unsigned int irq; | |
263 | struct kref kref; | |
264 | struct work_struct work; | |
265 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | |
266 | void (*release)(struct kref *ref); | |
267 | }; | |
268 | ||
9cfef55b ML |
269 | #define IRQ_AFFINITY_MAX_SETS 4 |
270 | ||
20e407e1 CH |
271 | /** |
272 | * struct irq_affinity - Description for automatic irq affinity assignements | |
273 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of | |
274 | * the MSI(-X) vector space | |
275 | * @post_vectors: Don't apply affinity to @post_vectors at end of | |
276 | * the MSI(-X) vector space | |
9cfef55b ML |
277 | * @nr_sets: The number of interrupt sets for which affinity |
278 | * spreading is required | |
279 | * @set_size: Array holding the size of each interrupt set | |
c66d4bd1 ML |
280 | * @calc_sets: Callback for calculating the number and size |
281 | * of interrupt sets | |
282 | * @priv: Private data for usage by @calc_sets, usually a | |
283 | * pointer to driver/device specific data. | |
20e407e1 CH |
284 | */ |
285 | struct irq_affinity { | |
0145c30e TG |
286 | unsigned int pre_vectors; |
287 | unsigned int post_vectors; | |
288 | unsigned int nr_sets; | |
9cfef55b | 289 | unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; |
c66d4bd1 ML |
290 | void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); |
291 | void *priv; | |
20e407e1 CH |
292 | }; |
293 | ||
bec04037 DL |
294 | /** |
295 | * struct irq_affinity_desc - Interrupt affinity descriptor | |
296 | * @mask: cpumask to hold the affinity assignment | |
70921ae2 | 297 | * @is_managed: 1 if the interrupt is managed internally |
bec04037 DL |
298 | */ |
299 | struct irq_affinity_desc { | |
300 | struct cpumask mask; | |
c410abbb | 301 | unsigned int is_managed : 1; |
bec04037 DL |
302 | }; |
303 | ||
0244ad00 | 304 | #if defined(CONFIG_SMP) |
d7b90689 | 305 | |
d036e67b | 306 | extern cpumask_var_t irq_default_affinity; |
18404756 | 307 | |
4d80d6ca TG |
308 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
309 | extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask); | |
01f8fa4f | 310 | |
d7b90689 | 311 | extern int irq_can_set_affinity(unsigned int irq); |
18404756 | 312 | extern int irq_select_affinity(unsigned int irq); |
d7b90689 | 313 | |
65c7cded TG |
314 | extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, |
315 | bool setaffinity); | |
316 | ||
317 | /** | |
318 | * irq_update_affinity_hint - Update the affinity hint | |
319 | * @irq: Interrupt to update | |
320 | * @m: cpumask pointer (NULL to clear the hint) | |
321 | * | |
322 | * Updates the affinity hint, but does not change the affinity of the interrupt. | |
323 | */ | |
324 | static inline int | |
325 | irq_update_affinity_hint(unsigned int irq, const struct cpumask *m) | |
326 | { | |
327 | return __irq_apply_affinity_hint(irq, m, false); | |
328 | } | |
329 | ||
330 | /** | |
331 | * irq_set_affinity_and_hint - Update the affinity hint and apply the provided | |
332 | * cpumask to the interrupt | |
333 | * @irq: Interrupt to update | |
334 | * @m: cpumask pointer (NULL to clear the hint) | |
335 | * | |
336 | * Updates the affinity hint and if @m is not NULL it applies it as the | |
337 | * affinity of that interrupt. | |
338 | */ | |
339 | static inline int | |
340 | irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m) | |
341 | { | |
342 | return __irq_apply_affinity_hint(irq, m, true); | |
343 | } | |
344 | ||
345 | /* | |
346 | * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint() | |
347 | * instead. | |
348 | */ | |
349 | static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | |
350 | { | |
351 | return irq_set_affinity_and_hint(irq, m); | |
352 | } | |
353 | ||
1d3aec89 JG |
354 | extern int irq_update_affinity_desc(unsigned int irq, |
355 | struct irq_affinity_desc *affinity); | |
cd7eab44 | 356 | |
cd7eab44 BH |
357 | extern int |
358 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | |
359 | ||
bec04037 | 360 | struct irq_affinity_desc * |
c66d4bd1 | 361 | irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); |
bec04037 | 362 | |
0145c30e TG |
363 | unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
364 | const struct irq_affinity *affd); | |
5e385a6e | 365 | |
d7b90689 RK |
366 | #else /* CONFIG_SMP */ |
367 | ||
0de26520 | 368 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
d7b90689 RK |
369 | { |
370 | return -EINVAL; | |
371 | } | |
372 | ||
4c88d7f9 AB |
373 | static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) |
374 | { | |
375 | return 0; | |
376 | } | |
377 | ||
d7b90689 RK |
378 | static inline int irq_can_set_affinity(unsigned int irq) |
379 | { | |
380 | return 0; | |
381 | } | |
382 | ||
18404756 MK |
383 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
384 | ||
65c7cded TG |
385 | static inline int irq_update_affinity_hint(unsigned int irq, |
386 | const struct cpumask *m) | |
387 | { | |
388 | return -EINVAL; | |
389 | } | |
390 | ||
391 | static inline int irq_set_affinity_and_hint(unsigned int irq, | |
392 | const struct cpumask *m) | |
393 | { | |
394 | return -EINVAL; | |
395 | } | |
396 | ||
e7a297b0 | 397 | static inline int irq_set_affinity_hint(unsigned int irq, |
cd7eab44 | 398 | const struct cpumask *m) |
e7a297b0 PWJ |
399 | { |
400 | return -EINVAL; | |
401 | } | |
f0ba3d05 | 402 | |
1d3aec89 JG |
403 | static inline int irq_update_affinity_desc(unsigned int irq, |
404 | struct irq_affinity_desc *affinity) | |
405 | { | |
406 | return -EINVAL; | |
407 | } | |
408 | ||
f0ba3d05 EP |
409 | static inline int |
410 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |
411 | { | |
412 | return 0; | |
413 | } | |
5e385a6e | 414 | |
bec04037 | 415 | static inline struct irq_affinity_desc * |
c66d4bd1 | 416 | irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) |
34c3d981 TG |
417 | { |
418 | return NULL; | |
419 | } | |
420 | ||
0145c30e TG |
421 | static inline unsigned int |
422 | irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, | |
423 | const struct irq_affinity *affd) | |
34c3d981 TG |
424 | { |
425 | return maxvec; | |
426 | } | |
427 | ||
0244ad00 | 428 | #endif /* CONFIG_SMP */ |
d7b90689 | 429 | |
c01d403b IM |
430 | /* |
431 | * Special lockdep variants of irq disabling/enabling. | |
432 | * These should be used for locking constructs that | |
433 | * know that a particular irq context which is disabled, | |
434 | * and which is the only irq-context user of a lock, | |
435 | * that it's safe to take the lock in the irq-disabled | |
436 | * section without disabling hardirqs. | |
437 | * | |
438 | * On !CONFIG_LOCKDEP they are equivalent to the normal | |
439 | * irq disable/enable methods. | |
440 | */ | |
441 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | |
442 | { | |
443 | disable_irq_nosync(irq); | |
444 | #ifdef CONFIG_LOCKDEP | |
445 | local_irq_disable(); | |
446 | #endif | |
447 | } | |
448 | ||
e8106b94 AV |
449 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
450 | { | |
451 | disable_irq_nosync(irq); | |
452 | #ifdef CONFIG_LOCKDEP | |
453 | local_irq_save(*flags); | |
454 | #endif | |
455 | } | |
456 | ||
c01d403b IM |
457 | static inline void disable_irq_lockdep(unsigned int irq) |
458 | { | |
459 | disable_irq(irq); | |
460 | #ifdef CONFIG_LOCKDEP | |
461 | local_irq_disable(); | |
462 | #endif | |
463 | } | |
464 | ||
465 | static inline void enable_irq_lockdep(unsigned int irq) | |
466 | { | |
467 | #ifdef CONFIG_LOCKDEP | |
468 | local_irq_enable(); | |
469 | #endif | |
470 | enable_irq(irq); | |
471 | } | |
472 | ||
e8106b94 AV |
473 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
474 | { | |
475 | #ifdef CONFIG_LOCKDEP | |
476 | local_irq_restore(*flags); | |
477 | #endif | |
478 | enable_irq(irq); | |
479 | } | |
480 | ||
ba9a2331 | 481 | /* IRQ wakeup (PM) control: */ |
a0cd9ca2 TG |
482 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
483 | ||
ba9a2331 TG |
484 | static inline int enable_irq_wake(unsigned int irq) |
485 | { | |
a0cd9ca2 | 486 | return irq_set_irq_wake(irq, 1); |
ba9a2331 TG |
487 | } |
488 | ||
489 | static inline int disable_irq_wake(unsigned int irq) | |
490 | { | |
a0cd9ca2 | 491 | return irq_set_irq_wake(irq, 0); |
ba9a2331 TG |
492 | } |
493 | ||
1b7047ed MZ |
494 | /* |
495 | * irq_get_irqchip_state/irq_set_irqchip_state specific flags | |
496 | */ | |
497 | enum irqchip_irq_state { | |
498 | IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ | |
499 | IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ | |
500 | IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ | |
501 | IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ | |
502 | }; | |
503 | ||
504 | extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
505 | bool *state); | |
506 | extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
507 | bool state); | |
8d32a307 TG |
508 | |
509 | #ifdef CONFIG_IRQ_FORCED_THREADING | |
b6a32bbd | 510 | # ifdef CONFIG_PREEMPT_RT |
91cc470e | 511 | # define force_irqthreads() (true) |
b6a32bbd | 512 | # else |
91cc470e TL |
513 | DECLARE_STATIC_KEY_FALSE(force_irqthreads_key); |
514 | # define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key)) | |
b6a32bbd | 515 | # endif |
8d32a307 | 516 | #else |
91cc470e | 517 | #define force_irqthreads() (false) |
8d32a307 TG |
518 | #endif |
519 | ||
0fd7d862 FW |
520 | #ifndef local_softirq_pending |
521 | ||
522 | #ifndef local_softirq_pending_ref | |
523 | #define local_softirq_pending_ref irq_stat.__softirq_pending | |
524 | #endif | |
525 | ||
526 | #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) | |
527 | #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) | |
528 | #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) | |
529 | ||
0fd7d862 FW |
530 | #endif /* local_softirq_pending */ |
531 | ||
2d3fbbb3 BH |
532 | /* Some architectures might implement lazy enabling/disabling of |
533 | * interrupts. In some cases, such as stop_machine, we might want | |
534 | * to ensure that after a local_irq_disable(), interrupts have | |
535 | * really been disabled in hardware. Such architectures need to | |
536 | * implement the following hook. | |
537 | */ | |
538 | #ifndef hard_irq_disable | |
539 | #define hard_irq_disable() do { } while(0) | |
540 | #endif | |
541 | ||
1da177e4 LT |
542 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
543 | frequency threaded job scheduling. For almost all the purposes | |
544 | tasklets are more than enough. F.e. all serial device BHs et | |
545 | al. should be converted to tasklets, not to softirqs. | |
546 | */ | |
547 | ||
548 | enum | |
549 | { | |
550 | HI_SOFTIRQ=0, | |
551 | TIMER_SOFTIRQ, | |
552 | NET_TX_SOFTIRQ, | |
553 | NET_RX_SOFTIRQ, | |
ff856bad | 554 | BLOCK_SOFTIRQ, |
511cbce2 | 555 | IRQ_POLL_SOFTIRQ, |
c9819f45 CL |
556 | TASKLET_SOFTIRQ, |
557 | SCHED_SOFTIRQ, | |
3bbc53f4 | 558 | HRTIMER_SOFTIRQ, |
09223371 | 559 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
978b0116 AD |
560 | |
561 | NR_SOFTIRQS | |
1da177e4 LT |
562 | }; |
563 | ||
0345691b | 564 | /* |
f96272a9 FW |
565 | * The following vectors can be safely ignored after ksoftirqd is parked: |
566 | * | |
567 | * _ RCU: | |
568 | * 1) rcutree_migrate_callbacks() migrates the queue. | |
0345691b | 569 | * 2) rcu_report_dead() reports the final quiescent states. |
f96272a9 FW |
570 | * |
571 | * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue | |
0345691b | 572 | */ |
f96272a9 | 573 | #define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ)) |
803b0eba | 574 | |
5d592b44 JB |
575 | /* map softirq index to softirq name. update 'softirq_to_name' in |
576 | * kernel/softirq.c when adding a new softirq. | |
577 | */ | |
ce85b4f2 | 578 | extern const char * const softirq_to_name[NR_SOFTIRQS]; |
5d592b44 | 579 | |
1da177e4 LT |
580 | /* softirq mask and active fields moved to irq_cpustat_t in |
581 | * asm/hardirq.h to get better cache usage. KAO | |
582 | */ | |
583 | ||
584 | struct softirq_action | |
585 | { | |
586 | void (*action)(struct softirq_action *); | |
1da177e4 LT |
587 | }; |
588 | ||
589 | asmlinkage void do_softirq(void); | |
eb0f1c44 | 590 | asmlinkage void __do_softirq(void); |
7d65f4a6 | 591 | |
1a90bfd2 SAS |
592 | #ifdef CONFIG_PREEMPT_RT |
593 | extern void do_softirq_post_smp_call_flush(unsigned int was_pending); | |
594 | #else | |
595 | static inline void do_softirq_post_smp_call_flush(unsigned int unused) | |
596 | { | |
597 | do_softirq(); | |
598 | } | |
599 | #endif | |
600 | ||
962cf36c | 601 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
1da177e4 | 602 | extern void softirq_init(void); |
f069686e | 603 | extern void __raise_softirq_irqoff(unsigned int nr); |
2bf2160d | 604 | |
b3c97528 HH |
605 | extern void raise_softirq_irqoff(unsigned int nr); |
606 | extern void raise_softirq(unsigned int nr); | |
1da177e4 | 607 | |
4dd53d89 VP |
608 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
609 | ||
610 | static inline struct task_struct *this_cpu_ksoftirqd(void) | |
611 | { | |
612 | return this_cpu_read(ksoftirqd); | |
613 | } | |
614 | ||
1da177e4 LT |
615 | /* Tasklets --- multithreaded analogue of BHs. |
616 | ||
12cc923f RP |
617 | This API is deprecated. Please consider using threaded IRQs instead: |
618 | https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de | |
619 | ||
1da177e4 LT |
620 | Main feature differing them of generic softirqs: tasklet |
621 | is running only on one CPU simultaneously. | |
622 | ||
623 | Main feature differing them of BHs: different tasklets | |
624 | may be run simultaneously on different CPUs. | |
625 | ||
626 | Properties: | |
627 | * If tasklet_schedule() is called, then tasklet is guaranteed | |
628 | to be executed on some cpu at least once after this. | |
25985edc | 629 | * If the tasklet is already scheduled, but its execution is still not |
1da177e4 LT |
630 | started, it will be executed only once. |
631 | * If this tasklet is already running on another CPU (or schedule is called | |
632 | from tasklet itself), it is rescheduled for later. | |
633 | * Tasklet is strictly serialized wrt itself, but not | |
634 | wrt another tasklets. If client needs some intertask synchronization, | |
635 | he makes it with spinlocks. | |
636 | */ | |
637 | ||
638 | struct tasklet_struct | |
639 | { | |
640 | struct tasklet_struct *next; | |
641 | unsigned long state; | |
642 | atomic_t count; | |
12cc923f RP |
643 | bool use_callback; |
644 | union { | |
645 | void (*func)(unsigned long data); | |
646 | void (*callback)(struct tasklet_struct *t); | |
647 | }; | |
1da177e4 LT |
648 | unsigned long data; |
649 | }; | |
650 | ||
12cc923f RP |
651 | #define DECLARE_TASKLET(name, _callback) \ |
652 | struct tasklet_struct name = { \ | |
653 | .count = ATOMIC_INIT(0), \ | |
654 | .callback = _callback, \ | |
655 | .use_callback = true, \ | |
656 | } | |
657 | ||
658 | #define DECLARE_TASKLET_DISABLED(name, _callback) \ | |
659 | struct tasklet_struct name = { \ | |
660 | .count = ATOMIC_INIT(1), \ | |
661 | .callback = _callback, \ | |
662 | .use_callback = true, \ | |
663 | } | |
664 | ||
665 | #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ | |
666 | container_of(callback_tasklet, typeof(*var), tasklet_fieldname) | |
667 | ||
b13fecb1 KC |
668 | #define DECLARE_TASKLET_OLD(name, _func) \ |
669 | struct tasklet_struct name = { \ | |
670 | .count = ATOMIC_INIT(0), \ | |
671 | .func = _func, \ | |
672 | } | |
1da177e4 | 673 | |
b13fecb1 KC |
674 | #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ |
675 | struct tasklet_struct name = { \ | |
676 | .count = ATOMIC_INIT(1), \ | |
677 | .func = _func, \ | |
678 | } | |
1da177e4 LT |
679 | |
680 | enum | |
681 | { | |
682 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | |
683 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | |
684 | }; | |
685 | ||
eb2dafbb | 686 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
1da177e4 LT |
687 | static inline int tasklet_trylock(struct tasklet_struct *t) |
688 | { | |
689 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | |
690 | } | |
691 | ||
da044747 PZ |
692 | void tasklet_unlock(struct tasklet_struct *t); |
693 | void tasklet_unlock_wait(struct tasklet_struct *t); | |
eb2dafbb | 694 | void tasklet_unlock_spin_wait(struct tasklet_struct *t); |
ca5f6251 | 695 | |
1da177e4 | 696 | #else |
6951547a TG |
697 | static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } |
698 | static inline void tasklet_unlock(struct tasklet_struct *t) { } | |
699 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } | |
ca5f6251 | 700 | static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } |
1da177e4 LT |
701 | #endif |
702 | ||
b3c97528 | 703 | extern void __tasklet_schedule(struct tasklet_struct *t); |
1da177e4 LT |
704 | |
705 | static inline void tasklet_schedule(struct tasklet_struct *t) | |
706 | { | |
707 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
708 | __tasklet_schedule(t); | |
709 | } | |
710 | ||
b3c97528 | 711 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
1da177e4 LT |
712 | |
713 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |
714 | { | |
715 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
716 | __tasklet_hi_schedule(t); | |
717 | } | |
718 | ||
1da177e4 LT |
719 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
720 | { | |
721 | atomic_inc(&t->count); | |
4e857c58 | 722 | smp_mb__after_atomic(); |
1da177e4 LT |
723 | } |
724 | ||
ca5f6251 TG |
725 | /* |
726 | * Do not use in new code. Disabling tasklets from atomic contexts is | |
727 | * error prone and should be avoided. | |
728 | */ | |
729 | static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) | |
730 | { | |
731 | tasklet_disable_nosync(t); | |
732 | tasklet_unlock_spin_wait(t); | |
733 | smp_mb(); | |
734 | } | |
735 | ||
1da177e4 LT |
736 | static inline void tasklet_disable(struct tasklet_struct *t) |
737 | { | |
738 | tasklet_disable_nosync(t); | |
6fd4e861 | 739 | tasklet_unlock_wait(t); |
1da177e4 LT |
740 | smp_mb(); |
741 | } | |
742 | ||
743 | static inline void tasklet_enable(struct tasklet_struct *t) | |
744 | { | |
4e857c58 | 745 | smp_mb__before_atomic(); |
1da177e4 LT |
746 | atomic_dec(&t->count); |
747 | } | |
748 | ||
1da177e4 | 749 | extern void tasklet_kill(struct tasklet_struct *t); |
1da177e4 LT |
750 | extern void tasklet_init(struct tasklet_struct *t, |
751 | void (*func)(unsigned long), unsigned long data); | |
12cc923f RP |
752 | extern void tasklet_setup(struct tasklet_struct *t, |
753 | void (*callback)(struct tasklet_struct *)); | |
1da177e4 LT |
754 | |
755 | /* | |
756 | * Autoprobing for irqs: | |
757 | * | |
758 | * probe_irq_on() and probe_irq_off() provide robust primitives | |
759 | * for accurate IRQ probing during kernel initialization. They are | |
760 | * reasonably simple to use, are not "fooled" by spurious interrupts, | |
761 | * and, unlike other attempts at IRQ probing, they do not get hung on | |
762 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | |
763 | * | |
764 | * For reasonably foolproof probing, use them as follows: | |
765 | * | |
766 | * 1. clear and/or mask the device's internal interrupt. | |
767 | * 2. sti(); | |
768 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | |
769 | * 4. enable the device and cause it to trigger an interrupt. | |
770 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | |
771 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | |
772 | * 7. service the device to clear its pending interrupt. | |
773 | * 8. loop again if paranoia is required. | |
774 | * | |
775 | * probe_irq_on() returns a mask of allocated irq's. | |
776 | * | |
777 | * probe_irq_off() takes the mask as a parameter, | |
778 | * and returns the irq number which occurred, | |
779 | * or zero if none occurred, or a negative irq number | |
780 | * if more than one irq occurred. | |
781 | */ | |
782 | ||
0244ad00 | 783 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) |
1da177e4 LT |
784 | static inline unsigned long probe_irq_on(void) |
785 | { | |
786 | return 0; | |
787 | } | |
788 | static inline int probe_irq_off(unsigned long val) | |
789 | { | |
790 | return 0; | |
791 | } | |
792 | static inline unsigned int probe_irq_mask(unsigned long val) | |
793 | { | |
794 | return 0; | |
795 | } | |
796 | #else | |
797 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | |
798 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | |
799 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | |
800 | #endif | |
801 | ||
6168a702 AM |
802 | #ifdef CONFIG_PROC_FS |
803 | /* Initialize /proc/irq/ */ | |
804 | extern void init_irq_proc(void); | |
805 | #else | |
806 | static inline void init_irq_proc(void) | |
807 | { | |
808 | } | |
809 | #endif | |
810 | ||
b2d3d61a DL |
811 | #ifdef CONFIG_IRQ_TIMINGS |
812 | void irq_timings_enable(void); | |
813 | void irq_timings_disable(void); | |
e1c92149 | 814 | u64 irq_timings_next_event(u64 now); |
b2d3d61a DL |
815 | #endif |
816 | ||
d43c36dc | 817 | struct seq_file; |
f74596d0 | 818 | int show_interrupts(struct seq_file *p, void *v); |
c78b9b65 | 819 | int arch_show_interrupts(struct seq_file *p, int prec); |
f74596d0 | 820 | |
43a25632 | 821 | extern int early_irq_init(void); |
4a046d17 | 822 | extern int arch_probe_nr_irqs(void); |
43a25632 | 823 | extern int arch_early_irq_init(void); |
43a25632 | 824 | |
be7635e7 AP |
825 | /* |
826 | * We want to know which function is an entrypoint of a hardirq or a softirq. | |
827 | */ | |
f0178fc0 | 828 | #ifndef __irq_entry |
33def849 | 829 | # define __irq_entry __section(".irqentry.text") |
f0178fc0 TG |
830 | #endif |
831 | ||
33def849 | 832 | #define __softirq_entry __section(".softirqentry.text") |
be7635e7 | 833 | |
1da177e4 | 834 | #endif |