1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_HARDIRQ_H
3 #define LINUX_HARDIRQ_H
5 #include <linux/context_tracking_state.h>
6 #include <linux/preempt.h>
7 #include <linux/lockdep.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/vtime.h>
10 #include <asm/hardirq.h>
12 extern void synchronize_irq(unsigned int irq);
13 extern bool synchronize_hardirq(unsigned int irq);
15 #ifdef CONFIG_NO_HZ_FULL
16 void __rcu_irq_enter_check_tick(void);
18 static inline void __rcu_irq_enter_check_tick(void) { }
21 static __always_inline void rcu_irq_enter_check_tick(void)
23 if (context_tracking_enabled())
24 __rcu_irq_enter_check_tick();
28 * It is safe to do non-atomic ops on ->hardirq_context,
29 * because NMI handlers may not preempt and the ops are
30 * always balanced, so the interrupted value of ->hardirq_context
31 * will always be restored.
33 #define __irq_enter() \
35 preempt_count_add(HARDIRQ_OFFSET); \
36 lockdep_hardirq_enter(); \
37 account_hardirq_enter(current); \
41 * Like __irq_enter() without time accounting for fast
42 * interrupts, e.g. reschedule IPI where time accounting
43 * is more expensive than the actual interrupt.
45 #define __irq_enter_raw() \
47 preempt_count_add(HARDIRQ_OFFSET); \
48 lockdep_hardirq_enter(); \
52 * Enter irq context (on NO_HZ, update jiffies):
56 * Like irq_enter(), but RCU is already watching.
58 void irq_enter_rcu(void);
61 * Exit irq context without processing softirqs:
63 #define __irq_exit() \
65 account_hardirq_exit(current); \
66 lockdep_hardirq_exit(); \
67 preempt_count_sub(HARDIRQ_OFFSET); \
71 * Like __irq_exit() without time accounting
73 #define __irq_exit_raw() \
75 lockdep_hardirq_exit(); \
76 preempt_count_sub(HARDIRQ_OFFSET); \
80 * Exit irq context and process softirqs if needed:
85 * Like irq_exit(), but return with RCU watching.
87 void irq_exit_rcu(void);
89 #ifndef arch_nmi_enter
90 #define arch_nmi_enter() do { } while (0)
91 #define arch_nmi_exit() do { } while (0)
94 #ifdef CONFIG_TINY_RCU
95 static inline void rcu_nmi_enter(void) { }
96 static inline void rcu_nmi_exit(void) { }
98 extern void rcu_nmi_enter(void);
99 extern void rcu_nmi_exit(void);
106 * We must not land in a tracer until (or after) we've changed preempt_count
107 * such that in_nmi() becomes true. To that effect all NMI C entry points must
108 * be marked 'notrace' and call nmi_enter() as soon as possible.
112 * nmi_enter() can nest up to 15 times; see NMI_BITS.
114 #define __nmi_enter() \
118 printk_nmi_enter(); \
119 BUG_ON(in_nmi() == NMI_MASK); \
120 __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
123 #define nmi_enter() \
126 lockdep_hardirq_enter(); \
128 instrumentation_begin(); \
129 ftrace_nmi_enter(); \
130 instrumentation_end(); \
133 #define __nmi_exit() \
136 __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
144 instrumentation_begin(); \
146 instrumentation_end(); \
148 lockdep_hardirq_exit(); \
152 #endif /* LINUX_HARDIRQ_H */