Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * IRQ subsystem internal functions and variables: | |
dbec07ba TG |
4 | * |
5 | * Do not ever include this file from anything else than | |
6 | * kernel/irq/. Do not even think about using any information outside | |
7 | * of this file for your non core code. | |
1da177e4 | 8 | */ |
e144710b | 9 | #include <linux/irqdesc.h> |
8f945a33 | 10 | #include <linux/kernel_stat.h> |
be45beb2 | 11 | #include <linux/pm_runtime.h> |
b2d3d61a | 12 | #include <linux/sched/clock.h> |
1da177e4 | 13 | |
c1ee6264 | 14 | #ifdef CONFIG_SPARSE_IRQ |
721255b9 | 15 | # define MAX_SPARSE_IRQS INT_MAX |
c1ee6264 | 16 | #else |
5e630aa8 | 17 | # define MAX_SPARSE_IRQS NR_IRQS |
c1ee6264 TG |
18 | #endif |
19 | ||
dbec07ba TG |
20 | #define istate core_internal_state__do_not_mess_with_it |
21 | ||
2329abfa | 22 | extern bool noirqdebug; |
1da177e4 | 23 | |
e509bd7d MW |
24 | extern struct irqaction chained_action; |
25 | ||
1535dfac TG |
26 | /* |
27 | * Bits used by threaded handlers: | |
28 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | |
1535dfac TG |
29 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
30 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | |
8d32a307 | 31 | * IRQTF_FORCED_THREAD - irq action is force threaded |
8707898e | 32 | * IRQTF_READY - signals that irq thread is ready |
1535dfac TG |
33 | */ |
34 | enum { | |
35 | IRQTF_RUNTHREAD, | |
1535dfac TG |
36 | IRQTF_WARNED, |
37 | IRQTF_AFFINITY, | |
8d32a307 | 38 | IRQTF_FORCED_THREAD, |
8707898e | 39 | IRQTF_READY, |
1535dfac TG |
40 | }; |
41 | ||
bd062e76 | 42 | /* |
a257954b | 43 | * Bit masks for desc->core_internal_state__do_not_mess_with_it |
bd062e76 TG |
44 | * |
45 | * IRQS_AUTODETECT - autodetection in progress | |
7acdd53e TG |
46 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
47 | * detection | |
6954b75b | 48 | * IRQS_POLL_INPROGRESS - polling in progress |
3d67baec | 49 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
7cc148a3 JG |
50 | * IRQS_REPLAY - irq has been resent and will not be resent |
51 | * again until the handler has run and cleared | |
52 | * this flag. | |
163ef309 | 53 | * IRQS_WAITING - irq is waiting |
7cc148a3 JG |
54 | * IRQS_PENDING - irq needs to be resent and should be resent |
55 | * at the next available opportunity. | |
c531e836 | 56 | * IRQS_SUSPENDED - irq is suspended |
b525903c | 57 | * IRQS_NMI - irq line is used to deliver NMIs |
9049e1ca | 58 | * IRQS_SYSFS - descriptor has been added to sysfs |
bd062e76 TG |
59 | */ |
60 | enum { | |
61 | IRQS_AUTODETECT = 0x00000001, | |
7acdd53e | 62 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
6954b75b | 63 | IRQS_POLL_INPROGRESS = 0x00000008, |
3d67baec | 64 | IRQS_ONESHOT = 0x00000020, |
163ef309 TG |
65 | IRQS_REPLAY = 0x00000040, |
66 | IRQS_WAITING = 0x00000080, | |
2a0d6fb3 | 67 | IRQS_PENDING = 0x00000200, |
c531e836 | 68 | IRQS_SUSPENDED = 0x00000800, |
b2d3d61a | 69 | IRQS_TIMINGS = 0x00001000, |
b525903c | 70 | IRQS_NMI = 0x00002000, |
9049e1ca | 71 | IRQS_SYSFS = 0x00004000, |
bd062e76 TG |
72 | }; |
73 | ||
1ce6068d TG |
74 | #include "debug.h" |
75 | #include "settings.h" | |
76 | ||
a1ff541a | 77 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); |
79ff1cda JL |
78 | extern void __disable_irq(struct irq_desc *desc); |
79 | extern void __enable_irq(struct irq_desc *desc); | |
0c5d1eb7 | 80 | |
4cde9c6b TG |
81 | #define IRQ_RESEND true |
82 | #define IRQ_NORESEND false | |
83 | ||
84 | #define IRQ_START_FORCE true | |
85 | #define IRQ_START_COND false | |
86 | ||
c942cee4 | 87 | extern int irq_activate(struct irq_desc *desc); |
1beaeacd | 88 | extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); |
4cde9c6b | 89 | extern int irq_startup(struct irq_desc *desc, bool resend, bool force); |
788019eb | 90 | extern void irq_startup_managed(struct irq_desc *desc); |
4cde9c6b | 91 | |
46999238 | 92 | extern void irq_shutdown(struct irq_desc *desc); |
4001d8e8 | 93 | extern void irq_shutdown_and_deactivate(struct irq_desc *desc); |
87923470 | 94 | extern void irq_disable(struct irq_desc *desc); |
31d9d9b6 MZ |
95 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); |
96 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); | |
d4d5e089 TG |
97 | extern void mask_irq(struct irq_desc *desc); |
98 | extern void unmask_irq(struct irq_desc *desc); | |
328a4978 | 99 | extern void unmask_threaded_irq(struct irq_desc *desc); |
46999238 | 100 | |
f63b6a05 TG |
101 | #ifdef CONFIG_SPARSE_IRQ |
102 | static inline void irq_mark_irq(unsigned int irq) { } | |
103 | #else | |
104 | extern void irq_mark_irq(unsigned int irq); | |
105 | #endif | |
106 | ||
5320eb42 | 107 | irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc); |
71f64340 | 108 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); |
4912609f TG |
109 | irqreturn_t handle_irq_event(struct irq_desc *desc); |
110 | ||
e144710b | 111 | /* Resending of interrupts :*/ |
acd26bcf | 112 | int check_irq_resend(struct irq_desc *desc, bool inject); |
bc06a9e0 SD |
113 | void clear_irq_resend(struct irq_desc *desc); |
114 | void irq_resend_init(struct irq_desc *desc); | |
fe200ae4 | 115 | bool irq_wait_for_poll(struct irq_desc *desc); |
a92444c6 | 116 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); |
e144710b | 117 | |
e2c12739 VW |
118 | void wake_threads_waitq(struct irq_desc *desc); |
119 | ||
1da177e4 | 120 | #ifdef CONFIG_PROC_FS |
2c6927a3 | 121 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
13bfe99e | 122 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); |
1da177e4 LT |
123 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
124 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); | |
125 | #else | |
2c6927a3 | 126 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
13bfe99e | 127 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
1da177e4 LT |
128 | static inline void register_handler_proc(unsigned int irq, |
129 | struct irqaction *action) { } | |
130 | static inline void unregister_handler_proc(unsigned int irq, | |
131 | struct irqaction *action) { } | |
132 | #endif | |
133 | ||
9c255583 TG |
134 | extern bool irq_can_set_affinity_usr(unsigned int irq); |
135 | ||
818b0f3b JL |
136 | extern int irq_do_set_affinity(struct irq_data *data, |
137 | const struct cpumask *dest, bool force); | |
138 | ||
43564bd9 TG |
139 | #ifdef CONFIG_SMP |
140 | extern int irq_setup_affinity(struct irq_desc *desc); | |
141 | #else | |
142 | static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } | |
143 | #endif | |
144 | ||
0f70a49f TG |
145 | |
146 | #define for_each_action_of_desc(desc, act) \ | |
147 | for (act = desc->action; act; act = act->next) | |
148 | ||
70aedd24 | 149 | /* Inline functions for support of irq chips on slow busses */ |
3876ec9e | 150 | static inline void chip_bus_lock(struct irq_desc *desc) |
70aedd24 | 151 | { |
3876ec9e TG |
152 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
153 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); | |
70aedd24 TG |
154 | } |
155 | ||
3876ec9e | 156 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
70aedd24 | 157 | { |
3876ec9e TG |
158 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
159 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); | |
70aedd24 TG |
160 | } |
161 | ||
31d9d9b6 MZ |
162 | #define _IRQ_DESC_CHECK (1 << 0) |
163 | #define _IRQ_DESC_PERCPU (1 << 1) | |
164 | ||
165 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) | |
166 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) | |
167 | ||
0f70a49f TG |
168 | struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
169 | unsigned int check); | |
d5eb4ad2 TG |
170 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); |
171 | ||
0f70a49f TG |
172 | __DEFINE_CLASS_IS_CONDITIONAL(irqdesc_lock, true); |
173 | __DEFINE_UNLOCK_GUARD(irqdesc_lock, struct irq_desc, | |
174 | __irq_put_desc_unlock(_T->lock, _T->flags, _T->bus), | |
175 | unsigned long flags; bool bus); | |
176 | ||
177 | static inline class_irqdesc_lock_t class_irqdesc_lock_constructor(unsigned int irq, bool bus, | |
178 | unsigned int check) | |
179 | { | |
b5fcb689 NC |
180 | class_irqdesc_lock_t _t = { .bus = bus, }; |
181 | ||
182 | _t.lock = __irq_get_desc_lock(irq, &_t.flags, bus, check); | |
183 | ||
0f70a49f TG |
184 | return _t; |
185 | } | |
186 | ||
187 | #define scoped_irqdesc_get_and_lock(_irq, _check) \ | |
188 | scoped_guard(irqdesc_lock, _irq, false, _check) | |
189 | ||
190 | #define scoped_irqdesc_get_and_buslock(_irq, _check) \ | |
191 | scoped_guard(irqdesc_lock, _irq, true, _check) | |
192 | ||
193 | #define scoped_irqdesc ((struct irq_desc *)(__guard_ptr(irqdesc_lock)(&scope))) | |
194 | ||
b354286e BF |
195 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
196 | ||
087cdfb6 TG |
197 | static inline unsigned int irqd_get(struct irq_data *d) |
198 | { | |
199 | return __irqd_to_state(d); | |
200 | } | |
201 | ||
f230b6d5 TG |
202 | /* |
203 | * Manipulation functions for irq_data.state | |
204 | */ | |
205 | static inline void irqd_set_move_pending(struct irq_data *d) | |
206 | { | |
0d0b4c86 | 207 | __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; |
f230b6d5 TG |
208 | } |
209 | ||
210 | static inline void irqd_clr_move_pending(struct irq_data *d) | |
211 | { | |
0d0b4c86 | 212 | __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; |
f230b6d5 | 213 | } |
a005677b | 214 | |
54fdf6a0 TG |
215 | static inline void irqd_set_managed_shutdown(struct irq_data *d) |
216 | { | |
217 | __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; | |
218 | } | |
219 | ||
220 | static inline void irqd_clr_managed_shutdown(struct irq_data *d) | |
221 | { | |
222 | __irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN; | |
223 | } | |
224 | ||
a005677b TG |
225 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
226 | { | |
0d0b4c86 | 227 | __irqd_to_state(d) &= ~mask; |
a005677b TG |
228 | } |
229 | ||
230 | static inline void irqd_set(struct irq_data *d, unsigned int mask) | |
231 | { | |
0d0b4c86 | 232 | __irqd_to_state(d) |= mask; |
a005677b TG |
233 | } |
234 | ||
2bdd1055 TG |
235 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) |
236 | { | |
0d0b4c86 | 237 | return __irqd_to_state(d) & mask; |
2bdd1055 | 238 | } |
8f945a33 | 239 | |
a696712c JG |
240 | static inline void irq_state_set_disabled(struct irq_desc *desc) |
241 | { | |
242 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | |
243 | } | |
244 | ||
245 | static inline void irq_state_set_masked(struct irq_desc *desc) | |
246 | { | |
247 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); | |
248 | } | |
249 | ||
b354286e BF |
250 | #undef __irqd_to_state |
251 | ||
1136b072 | 252 | static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
8f945a33 | 253 | { |
86d2a2f5 | 254 | __this_cpu_inc(desc->kstat_irqs->cnt); |
8f945a33 TG |
255 | __this_cpu_inc(kstat.irqs_sum); |
256 | } | |
cab303be | 257 | |
1136b072 TG |
258 | static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
259 | { | |
260 | __kstat_incr_irqs_this_cpu(desc); | |
261 | desc->tot_count++; | |
262 | } | |
263 | ||
6783011b JL |
264 | static inline int irq_desc_get_node(struct irq_desc *desc) |
265 | { | |
449e9cae | 266 | return irq_common_data_get_node(&desc->irq_common_data); |
6783011b JL |
267 | } |
268 | ||
4717f133 GS |
269 | static inline int irq_desc_is_chained(struct irq_desc *desc) |
270 | { | |
271 | return (desc->action && desc->action == &chained_action); | |
272 | } | |
273 | ||
6678ae19 JR |
274 | static inline bool irq_is_nmi(struct irq_desc *desc) |
275 | { | |
276 | return desc->istate & IRQS_NMI; | |
277 | } | |
278 | ||
cab303be | 279 | #ifdef CONFIG_PM_SLEEP |
9ce7a258 | 280 | bool irq_pm_check_wakeup(struct irq_desc *desc); |
cab303be TG |
281 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); |
282 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); | |
283 | #else | |
9ce7a258 | 284 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } |
cab303be TG |
285 | static inline void |
286 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } | |
287 | static inline void | |
288 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } | |
289 | #endif | |
f1602039 | 290 | |
b2d3d61a DL |
291 | #ifdef CONFIG_IRQ_TIMINGS |
292 | ||
293 | #define IRQ_TIMINGS_SHIFT 5 | |
294 | #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT) | |
295 | #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1) | |
296 | ||
297 | /** | |
298 | * struct irq_timings - irq timings storing structure | |
299 | * @values: a circular buffer of u64 encoded <timestamp,irq> values | |
300 | * @count: the number of elements in the array | |
301 | */ | |
302 | struct irq_timings { | |
303 | u64 values[IRQ_TIMINGS_SIZE]; | |
304 | int count; | |
305 | }; | |
306 | ||
307 | DECLARE_PER_CPU(struct irq_timings, irq_timings); | |
308 | ||
e1c92149 DL |
309 | extern void irq_timings_free(int irq); |
310 | extern int irq_timings_alloc(int irq); | |
311 | ||
b2d3d61a DL |
312 | static inline void irq_remove_timings(struct irq_desc *desc) |
313 | { | |
314 | desc->istate &= ~IRQS_TIMINGS; | |
e1c92149 DL |
315 | |
316 | irq_timings_free(irq_desc_get_irq(desc)); | |
b2d3d61a DL |
317 | } |
318 | ||
319 | static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) | |
320 | { | |
e1c92149 DL |
321 | int irq = irq_desc_get_irq(desc); |
322 | int ret; | |
323 | ||
b2d3d61a DL |
324 | /* |
325 | * We don't need the measurement because the idle code already | |
326 | * knows the next expiry event. | |
327 | */ | |
328 | if (act->flags & __IRQF_TIMER) | |
329 | return; | |
330 | ||
e1c92149 DL |
331 | /* |
332 | * In case the timing allocation fails, we just want to warn, | |
333 | * not fail, so letting the system boot anyway. | |
334 | */ | |
335 | ret = irq_timings_alloc(irq); | |
336 | if (ret) { | |
337 | pr_warn("Failed to allocate irq timing stats for irq%d (%d)", | |
338 | irq, ret); | |
339 | return; | |
340 | } | |
341 | ||
b2d3d61a DL |
342 | desc->istate |= IRQS_TIMINGS; |
343 | } | |
344 | ||
345 | extern void irq_timings_enable(void); | |
346 | extern void irq_timings_disable(void); | |
347 | ||
348 | DECLARE_STATIC_KEY_FALSE(irq_timing_enabled); | |
349 | ||
350 | /* | |
351 | * The interrupt number and the timestamp are encoded into a single | |
352 | * u64 variable to optimize the size. | |
353 | * 48 bit time stamp and 16 bit IRQ number is way sufficient. | |
354 | * Who cares an IRQ after 78 hours of idle time? | |
355 | */ | |
356 | static inline u64 irq_timing_encode(u64 timestamp, int irq) | |
357 | { | |
358 | return (timestamp << 16) | irq; | |
359 | } | |
360 | ||
361 | static inline int irq_timing_decode(u64 value, u64 *timestamp) | |
362 | { | |
363 | *timestamp = value >> 16; | |
364 | return value & U16_MAX; | |
365 | } | |
366 | ||
df025e47 DL |
367 | static __always_inline void irq_timings_push(u64 ts, int irq) |
368 | { | |
369 | struct irq_timings *timings = this_cpu_ptr(&irq_timings); | |
370 | ||
371 | timings->values[timings->count & IRQ_TIMINGS_MASK] = | |
372 | irq_timing_encode(ts, irq); | |
373 | ||
374 | timings->count++; | |
375 | } | |
376 | ||
b2d3d61a DL |
377 | /* |
378 | * The function record_irq_time is only called in one place in the | |
379 | * interrupts handler. We want this function always inline so the code | |
380 | * inside is embedded in the function and the static key branching | |
381 | * code can act at the higher level. Without the explicit | |
382 | * __always_inline we can end up with a function call and a small | |
383 | * overhead in the hotpath for nothing. | |
384 | */ | |
385 | static __always_inline void record_irq_time(struct irq_desc *desc) | |
386 | { | |
387 | if (!static_branch_likely(&irq_timing_enabled)) | |
388 | return; | |
389 | ||
df025e47 DL |
390 | if (desc->istate & IRQS_TIMINGS) |
391 | irq_timings_push(local_clock(), irq_desc_get_irq(desc)); | |
b2d3d61a DL |
392 | } |
393 | #else | |
394 | static inline void irq_remove_timings(struct irq_desc *desc) {} | |
395 | static inline void irq_setup_timings(struct irq_desc *desc, | |
396 | struct irqaction *act) {}; | |
397 | static inline void record_irq_time(struct irq_desc *desc) {} | |
398 | #endif /* CONFIG_IRQ_TIMINGS */ | |
399 | ||
400 | ||
f1602039 BG |
401 | #ifdef CONFIG_GENERIC_IRQ_CHIP |
402 | void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, | |
403 | int num_ct, unsigned int irq_base, | |
404 | void __iomem *reg_base, irq_flow_handler_t handler); | |
405 | #else | |
406 | static inline void | |
407 | irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, | |
408 | int num_ct, unsigned int irq_base, | |
409 | void __iomem *reg_base, irq_flow_handler_t handler) { } | |
410 | #endif /* CONFIG_GENERIC_IRQ_CHIP */ | |
087cdfb6 | 411 | |
137221df CH |
412 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
413 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | |
414 | { | |
f94a1824 | 415 | return !(data->chip->flags & IRQCHIP_MOVE_DEFERRED); |
137221df CH |
416 | } |
417 | static inline bool irq_move_pending(struct irq_data *data) | |
418 | { | |
419 | return irqd_is_setaffinity_pending(data); | |
420 | } | |
421 | static inline void | |
422 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | |
423 | { | |
424 | cpumask_copy(desc->pending_mask, mask); | |
425 | } | |
426 | static inline void | |
427 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | |
428 | { | |
429 | cpumask_copy(mask, desc->pending_mask); | |
430 | } | |
f0383c24 TG |
431 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
432 | { | |
433 | return desc->pending_mask; | |
434 | } | |
36d84fb4 | 435 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); |
751dc837 | 436 | void irq_force_complete_move(struct irq_desc *desc); |
137221df CH |
437 | #else /* CONFIG_GENERIC_PENDING_IRQ */ |
438 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | |
439 | { | |
440 | return true; | |
441 | } | |
442 | static inline bool irq_move_pending(struct irq_data *data) | |
443 | { | |
444 | return false; | |
445 | } | |
446 | static inline void | |
447 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | |
448 | { | |
449 | } | |
450 | static inline void | |
451 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | |
452 | { | |
453 | } | |
f0383c24 TG |
454 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
455 | { | |
456 | return NULL; | |
457 | } | |
36d84fb4 TG |
458 | static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) |
459 | { | |
460 | return false; | |
461 | } | |
751dc837 | 462 | static inline void irq_force_complete_move(struct irq_desc *desc) { } |
8d187a77 TG |
463 | #endif /* !CONFIG_GENERIC_PENDING_IRQ */ |
464 | ||
457f6d35 | 465 | #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) |
702cb0a0 | 466 | static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) |
457f6d35 TG |
467 | { |
468 | irqd_set_activated(data); | |
bb9b428a | 469 | return 0; |
457f6d35 TG |
470 | } |
471 | static inline void irq_domain_deactivate_irq(struct irq_data *data) | |
472 | { | |
473 | irqd_clr_activated(data); | |
474 | } | |
475 | #endif | |
476 | ||
13b90cad TG |
477 | static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd) |
478 | { | |
479 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
480 | return irqd->parent_data; | |
481 | #else | |
482 | return NULL; | |
483 | #endif | |
484 | } | |
485 | ||
087cdfb6 | 486 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
c2ce34c0 TG |
487 | #include <linux/debugfs.h> |
488 | ||
cb06c982 JR |
489 | struct irq_bit_descr { |
490 | unsigned int mask; | |
491 | char *name; | |
492 | }; | |
493 | ||
494 | #define BIT_MASK_DESCR(m) { .mask = m, .name = #m } | |
495 | ||
496 | void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state, | |
497 | const struct irq_bit_descr *sd, int size); | |
498 | ||
087cdfb6 | 499 | void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); |
c2ce34c0 TG |
500 | static inline void irq_remove_debugfs_entry(struct irq_desc *desc) |
501 | { | |
502 | debugfs_remove(desc->debugfs_file); | |
07557ccb | 503 | kfree(desc->dev_name); |
c2ce34c0 | 504 | } |
07557ccb | 505 | void irq_debugfs_copy_devname(int irq, struct device *dev); |
087cdfb6 TG |
506 | # ifdef CONFIG_IRQ_DOMAIN |
507 | void irq_domain_debugfs_init(struct dentry *root); | |
508 | # else | |
e5682b4e SO |
509 | static inline void irq_domain_debugfs_init(struct dentry *root) |
510 | { | |
511 | } | |
087cdfb6 TG |
512 | # endif |
513 | #else /* CONFIG_GENERIC_IRQ_DEBUGFS */ | |
514 | static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) | |
515 | { | |
516 | } | |
517 | static inline void irq_remove_debugfs_entry(struct irq_desc *d) | |
518 | { | |
519 | } | |
07557ccb TG |
520 | static inline void irq_debugfs_copy_devname(int irq, struct device *dev) |
521 | { | |
522 | } | |
087cdfb6 | 523 | #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ |