Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/include/linux/nmi.h | |
4 | */ | |
5 | #ifndef LINUX_NMI_H | |
6 | #define LINUX_NMI_H | |
7 | ||
9938406a | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <asm/irq.h> |
f2e0cff8 NP |
10 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) |
11 | #include <asm/nmi.h> | |
12 | #endif | |
1da177e4 | 13 | |
d151b27d | 14 | #ifdef CONFIG_LOCKUP_DETECTOR |
05a4a952 | 15 | void lockup_detector_init(void); |
930d8f8d | 16 | void lockup_detector_retry_init(void); |
6554fd8c | 17 | void lockup_detector_soft_poweroff(void); |
941154bd | 18 | void lockup_detector_cleanup(void); |
3b371b59 TG |
19 | |
20 | extern int watchdog_user_enabled; | |
3b371b59 TG |
21 | extern int watchdog_thresh; |
22 | extern unsigned long watchdog_enabled; | |
23 | ||
24 | extern struct cpumask watchdog_cpumask; | |
25 | extern unsigned long *watchdog_cpumask_bits; | |
26 | #ifdef CONFIG_SMP | |
27 | extern int sysctl_softlockup_all_cpu_backtrace; | |
28 | extern int sysctl_hardlockup_all_cpu_backtrace; | |
05a4a952 | 29 | #else |
3b371b59 TG |
30 | #define sysctl_softlockup_all_cpu_backtrace 0 |
31 | #define sysctl_hardlockup_all_cpu_backtrace 0 | |
32 | #endif /* !CONFIG_SMP */ | |
33 | ||
34 | #else /* CONFIG_LOCKUP_DETECTOR */ | |
6554fd8c | 35 | static inline void lockup_detector_init(void) { } |
930d8f8d | 36 | static inline void lockup_detector_retry_init(void) { } |
6554fd8c | 37 | static inline void lockup_detector_soft_poweroff(void) { } |
941154bd | 38 | static inline void lockup_detector_cleanup(void) { } |
3b371b59 | 39 | #endif /* !CONFIG_LOCKUP_DETECTOR */ |
05a4a952 NP |
40 | |
41 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | |
d151b27d IM |
42 | extern void touch_softlockup_watchdog_sched(void); |
43 | extern void touch_softlockup_watchdog(void); | |
44 | extern void touch_softlockup_watchdog_sync(void); | |
45 | extern void touch_all_softlockup_watchdogs(void); | |
d151b27d | 46 | extern unsigned int softlockup_panic; |
aef92a8b PZ |
47 | |
48 | extern int lockup_detector_online_cpu(unsigned int cpu); | |
49 | extern int lockup_detector_offline_cpu(unsigned int cpu); | |
50 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ | |
3b371b59 TG |
51 | static inline void touch_softlockup_watchdog_sched(void) { } |
52 | static inline void touch_softlockup_watchdog(void) { } | |
53 | static inline void touch_softlockup_watchdog_sync(void) { } | |
54 | static inline void touch_all_softlockup_watchdogs(void) { } | |
aef92a8b PZ |
55 | |
56 | #define lockup_detector_online_cpu NULL | |
57 | #define lockup_detector_offline_cpu NULL | |
58 | #endif /* CONFIG_SOFTLOCKUP_DETECTOR */ | |
d151b27d IM |
59 | |
60 | #ifdef CONFIG_DETECT_HUNG_TASK | |
61 | void reset_hung_task_detector(void); | |
62 | #else | |
3b371b59 | 63 | static inline void reset_hung_task_detector(void) { } |
d151b27d IM |
64 | #endif |
65 | ||
249e52e3 BM |
66 | /* |
67 | * The run state of the lockup detectors is controlled by the content of the | |
68 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | |
69 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | |
70 | * | |
df95d308 DA |
71 | * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and |
72 | * 'watchdog_softlockup_user_enabled' are variables that are only used as an | |
7feeb9cd TG |
73 | * 'interface' between the parameters in /proc/sys/kernel and the internal |
74 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is | |
75 | * handled differently because its value is not boolean, and the lockup | |
76 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. | |
249e52e3 | 77 | */ |
df95d308 DA |
78 | #define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0 |
79 | #define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1 | |
80 | #define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT) | |
81 | #define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT) | |
249e52e3 | 82 | |
f2e0cff8 NP |
83 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
84 | extern void hardlockup_detector_disable(void); | |
05a4a952 | 85 | extern unsigned int hardlockup_panic; |
f2e0cff8 NP |
86 | #else |
87 | static inline void hardlockup_detector_disable(void) {} | |
88 | #endif | |
89 | ||
1f423c90 | 90 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER) |
ed92e1ef | 91 | void arch_touch_nmi_watchdog(void); |
1f423c90 | 92 | void watchdog_hardlockup_touch_cpu(unsigned int cpu); |
77c12fc9 | 93 | void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); |
ed92e1ef DA |
94 | #elif !defined(CONFIG_HAVE_NMI_WATCHDOG) |
95 | static inline void arch_touch_nmi_watchdog(void) { } | |
81972551 DA |
96 | #endif |
97 | ||
05a4a952 | 98 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
d0b6e0a8 PZ |
99 | extern void hardlockup_detector_perf_stop(void); |
100 | extern void hardlockup_detector_perf_restart(void); | |
941154bd | 101 | extern void hardlockup_detector_perf_cleanup(void); |
f2e0cff8 | 102 | #else |
d0b6e0a8 PZ |
103 | static inline void hardlockup_detector_perf_stop(void) { } |
104 | static inline void hardlockup_detector_perf_restart(void) { } | |
941154bd | 105 | static inline void hardlockup_detector_perf_cleanup(void) { } |
05a4a952 | 106 | #endif |
f2e0cff8 | 107 | |
df95d308 DA |
108 | void watchdog_hardlockup_stop(void); |
109 | void watchdog_hardlockup_start(void); | |
110 | int watchdog_hardlockup_probe(void); | |
111 | void watchdog_hardlockup_enable(unsigned int cpu); | |
112 | void watchdog_hardlockup_disable(unsigned int cpu); | |
6592ad2f | 113 | |
7c56a873 LD |
114 | void lockup_detector_reconfigure(void); |
115 | ||
1f423c90 DA |
116 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY |
117 | void watchdog_buddy_check_hardlockup(unsigned long hrtimer_interrupts); | |
118 | #else | |
119 | static inline void watchdog_buddy_check_hardlockup(unsigned long hrtimer_interrupts) {} | |
120 | #endif | |
121 | ||
1da177e4 | 122 | /** |
8b5c59a9 | 123 | * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout. |
3b371b59 | 124 | * |
8b5c59a9 DA |
125 | * If we support detecting hardlockups, touch_nmi_watchdog() may be |
126 | * used to pet the watchdog (reset the timeout) - for code which | |
127 | * intentionally disables interrupts for a long time. This call is stateless. | |
128 | * | |
129 | * Though this function has "nmi" in the name, the hardlockup watchdog might | |
130 | * not be backed by NMIs. This function will likely be renamed to | |
131 | * touch_hardlockup_watchdog() in the future. | |
1da177e4 | 132 | */ |
5d0e600d IM |
133 | static inline void touch_nmi_watchdog(void) |
134 | { | |
8b5c59a9 DA |
135 | /* |
136 | * Pass on to the hardlockup detector selected via CONFIG_. Note that | |
137 | * the hardlockup detector may not be arch-specific nor using NMIs | |
138 | * and the arch_touch_nmi_watchdog() function will likely be renamed | |
139 | * in the future. | |
140 | */ | |
f2e0cff8 | 141 | arch_touch_nmi_watchdog(); |
8b5c59a9 | 142 | |
5d0e600d IM |
143 | touch_softlockup_watchdog(); |
144 | } | |
6e7458a6 | 145 | |
47cab6a7 IM |
146 | /* |
147 | * Create trigger_all_cpu_backtrace() out of the arch-provided | |
148 | * base function. Return whether such support was available, | |
149 | * to allow calling code to fall back to some other mechanism: | |
150 | */ | |
9a01c3ed | 151 | #ifdef arch_trigger_cpumask_backtrace |
47cab6a7 IM |
152 | static inline bool trigger_all_cpu_backtrace(void) |
153 | { | |
9a01c3ed | 154 | arch_trigger_cpumask_backtrace(cpu_online_mask, false); |
47cab6a7 IM |
155 | return true; |
156 | } | |
9a01c3ed | 157 | |
f3aca3d0 AT |
158 | static inline bool trigger_allbutself_cpu_backtrace(void) |
159 | { | |
9a01c3ed CM |
160 | arch_trigger_cpumask_backtrace(cpu_online_mask, true); |
161 | return true; | |
162 | } | |
163 | ||
164 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) | |
165 | { | |
166 | arch_trigger_cpumask_backtrace(mask, false); | |
167 | return true; | |
168 | } | |
169 | ||
170 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
171 | { | |
172 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); | |
f3aca3d0 AT |
173 | return true; |
174 | } | |
b2c0b2cb RK |
175 | |
176 | /* generic implementation */ | |
9a01c3ed CM |
177 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
178 | bool exclude_self, | |
b2c0b2cb RK |
179 | void (*raise)(cpumask_t *mask)); |
180 | bool nmi_cpu_backtrace(struct pt_regs *regs); | |
181 | ||
47cab6a7 IM |
182 | #else |
183 | static inline bool trigger_all_cpu_backtrace(void) | |
184 | { | |
185 | return false; | |
186 | } | |
f3aca3d0 AT |
187 | static inline bool trigger_allbutself_cpu_backtrace(void) |
188 | { | |
189 | return false; | |
190 | } | |
9a01c3ed CM |
191 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
192 | { | |
193 | return false; | |
194 | } | |
195 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
196 | { | |
197 | return false; | |
198 | } | |
bb81a09e AM |
199 | #endif |
200 | ||
05a4a952 | 201 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF |
4eec42f3 | 202 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
b17aa959 | 203 | bool arch_perf_nmi_is_available(void); |
05a4a952 NP |
204 | #endif |
205 | ||
7edaeb68 | 206 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
5e008df1 | 207 | defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
7edaeb68 TG |
208 | void watchdog_update_hrtimer_threshold(u64 period); |
209 | #else | |
210 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } | |
211 | #endif | |
212 | ||
504d7cf1 | 213 | struct ctl_table; |
32927393 CH |
214 | int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); |
215 | int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); | |
216 | int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); | |
217 | int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); | |
218 | int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); | |
84e478c6 | 219 | |
44a69f61 TN |
220 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
221 | #include <asm/nmi.h> | |
222 | #endif | |
223 | ||
344da544 PM |
224 | #ifdef CONFIG_NMI_CHECK_CPU |
225 | void nmi_backtrace_stall_snap(const struct cpumask *btp); | |
226 | void nmi_backtrace_stall_check(const struct cpumask *btp); | |
227 | #else | |
228 | static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} | |
229 | static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} | |
230 | #endif | |
231 | ||
1da177e4 | 232 | #endif |