Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/include/linux/nmi.h | |
4 | */ | |
5 | #ifndef LINUX_NMI_H | |
6 | #define LINUX_NMI_H | |
7 | ||
9938406a | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <asm/irq.h> |
f2e0cff8 NP |
10 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) |
11 | #include <asm/nmi.h> | |
12 | #endif | |
1da177e4 | 13 | |
d151b27d | 14 | #ifdef CONFIG_LOCKUP_DETECTOR |
05a4a952 | 15 | void lockup_detector_init(void); |
6554fd8c | 16 | void lockup_detector_soft_poweroff(void); |
941154bd | 17 | void lockup_detector_cleanup(void); |
3b371b59 TG |
18 | |
19 | extern int watchdog_user_enabled; | |
3b371b59 TG |
20 | extern int watchdog_thresh; |
21 | extern unsigned long watchdog_enabled; | |
22 | ||
23 | extern struct cpumask watchdog_cpumask; | |
24 | extern unsigned long *watchdog_cpumask_bits; | |
25 | #ifdef CONFIG_SMP | |
26 | extern int sysctl_softlockup_all_cpu_backtrace; | |
27 | extern int sysctl_hardlockup_all_cpu_backtrace; | |
05a4a952 | 28 | #else |
3b371b59 TG |
29 | #define sysctl_softlockup_all_cpu_backtrace 0 |
30 | #define sysctl_hardlockup_all_cpu_backtrace 0 | |
31 | #endif /* !CONFIG_SMP */ | |
32 | ||
33 | #else /* CONFIG_LOCKUP_DETECTOR */ | |
6554fd8c TG |
34 | static inline void lockup_detector_init(void) { } |
35 | static inline void lockup_detector_soft_poweroff(void) { } | |
941154bd | 36 | static inline void lockup_detector_cleanup(void) { } |
3b371b59 | 37 | #endif /* !CONFIG_LOCKUP_DETECTOR */ |
05a4a952 NP |
38 | |
39 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | |
d151b27d IM |
40 | extern void touch_softlockup_watchdog_sched(void); |
41 | extern void touch_softlockup_watchdog(void); | |
42 | extern void touch_softlockup_watchdog_sync(void); | |
43 | extern void touch_all_softlockup_watchdogs(void); | |
d151b27d | 44 | extern unsigned int softlockup_panic; |
aef92a8b PZ |
45 | |
46 | extern int lockup_detector_online_cpu(unsigned int cpu); | |
47 | extern int lockup_detector_offline_cpu(unsigned int cpu); | |
48 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ | |
3b371b59 TG |
49 | static inline void touch_softlockup_watchdog_sched(void) { } |
50 | static inline void touch_softlockup_watchdog(void) { } | |
51 | static inline void touch_softlockup_watchdog_sync(void) { } | |
52 | static inline void touch_all_softlockup_watchdogs(void) { } | |
aef92a8b PZ |
53 | |
54 | #define lockup_detector_online_cpu NULL | |
55 | #define lockup_detector_offline_cpu NULL | |
56 | #endif /* CONFIG_SOFTLOCKUP_DETECTOR */ | |
d151b27d IM |
57 | |
58 | #ifdef CONFIG_DETECT_HUNG_TASK | |
59 | void reset_hung_task_detector(void); | |
60 | #else | |
3b371b59 | 61 | static inline void reset_hung_task_detector(void) { } |
d151b27d IM |
62 | #endif |
63 | ||
249e52e3 BM |
64 | /* |
65 | * The run state of the lockup detectors is controlled by the content of the | |
66 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | |
67 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | |
68 | * | |
df95d308 DA |
69 | * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and |
70 | * 'watchdog_softlockup_user_enabled' are variables that are only used as an | |
7feeb9cd TG |
71 | * 'interface' between the parameters in /proc/sys/kernel and the internal |
72 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is | |
73 | * handled differently because its value is not boolean, and the lockup | |
74 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. | |
249e52e3 | 75 | */ |
df95d308 DA |
76 | #define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0 |
77 | #define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1 | |
78 | #define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT) | |
79 | #define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT) | |
249e52e3 | 80 | |
f2e0cff8 NP |
81 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
82 | extern void hardlockup_detector_disable(void); | |
05a4a952 | 83 | extern unsigned int hardlockup_panic; |
f2e0cff8 NP |
84 | #else |
85 | static inline void hardlockup_detector_disable(void) {} | |
86 | #endif | |
87 | ||
81972551 | 88 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
ed92e1ef | 89 | void arch_touch_nmi_watchdog(void); |
77c12fc9 | 90 | void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); |
ed92e1ef DA |
91 | #elif !defined(CONFIG_HAVE_NMI_WATCHDOG) |
92 | static inline void arch_touch_nmi_watchdog(void) { } | |
81972551 DA |
93 | #endif |
94 | ||
51d4052b TG |
95 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) |
96 | # define NMI_WATCHDOG_SYSCTL_PERM 0644 | |
97 | #else | |
98 | # define NMI_WATCHDOG_SYSCTL_PERM 0444 | |
99 | #endif | |
100 | ||
05a4a952 | 101 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
d0b6e0a8 PZ |
102 | extern void hardlockup_detector_perf_stop(void); |
103 | extern void hardlockup_detector_perf_restart(void); | |
941154bd | 104 | extern void hardlockup_detector_perf_cleanup(void); |
f2e0cff8 | 105 | #else |
d0b6e0a8 PZ |
106 | static inline void hardlockup_detector_perf_stop(void) { } |
107 | static inline void hardlockup_detector_perf_restart(void) { } | |
941154bd | 108 | static inline void hardlockup_detector_perf_cleanup(void) { } |
05a4a952 | 109 | #endif |
f2e0cff8 | 110 | |
df95d308 DA |
111 | void watchdog_hardlockup_stop(void); |
112 | void watchdog_hardlockup_start(void); | |
113 | int watchdog_hardlockup_probe(void); | |
114 | void watchdog_hardlockup_enable(unsigned int cpu); | |
115 | void watchdog_hardlockup_disable(unsigned int cpu); | |
6592ad2f | 116 | |
7c56a873 LD |
117 | void lockup_detector_reconfigure(void); |
118 | ||
1da177e4 | 119 | /** |
8b5c59a9 | 120 | * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout. |
3b371b59 | 121 | * |
8b5c59a9 DA |
122 | * If we support detecting hardlockups, touch_nmi_watchdog() may be |
123 | * used to pet the watchdog (reset the timeout) - for code which | |
124 | * intentionally disables interrupts for a long time. This call is stateless. | |
125 | * | |
126 | * Though this function has "nmi" in the name, the hardlockup watchdog might | |
127 | * not be backed by NMIs. This function will likely be renamed to | |
128 | * touch_hardlockup_watchdog() in the future. | |
1da177e4 | 129 | */ |
5d0e600d IM |
130 | static inline void touch_nmi_watchdog(void) |
131 | { | |
8b5c59a9 DA |
132 | /* |
133 | * Pass on to the hardlockup detector selected via CONFIG_. Note that | |
134 | * the hardlockup detector may not be arch-specific nor using NMIs | |
135 | * and the arch_touch_nmi_watchdog() function will likely be renamed | |
136 | * in the future. | |
137 | */ | |
f2e0cff8 | 138 | arch_touch_nmi_watchdog(); |
8b5c59a9 DA |
139 | |
140 | /* | |
141 | * Touching the hardlock detector implicitly resets the | |
142 | * softlockup detector too | |
143 | */ | |
5d0e600d IM |
144 | touch_softlockup_watchdog(); |
145 | } | |
6e7458a6 | 146 | |
47cab6a7 IM |
147 | /* |
148 | * Create trigger_all_cpu_backtrace() out of the arch-provided | |
149 | * base function. Return whether such support was available, | |
150 | * to allow calling code to fall back to some other mechanism: | |
151 | */ | |
9a01c3ed | 152 | #ifdef arch_trigger_cpumask_backtrace |
47cab6a7 IM |
153 | static inline bool trigger_all_cpu_backtrace(void) |
154 | { | |
9a01c3ed | 155 | arch_trigger_cpumask_backtrace(cpu_online_mask, false); |
47cab6a7 IM |
156 | return true; |
157 | } | |
9a01c3ed | 158 | |
f3aca3d0 AT |
159 | static inline bool trigger_allbutself_cpu_backtrace(void) |
160 | { | |
9a01c3ed CM |
161 | arch_trigger_cpumask_backtrace(cpu_online_mask, true); |
162 | return true; | |
163 | } | |
164 | ||
165 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) | |
166 | { | |
167 | arch_trigger_cpumask_backtrace(mask, false); | |
168 | return true; | |
169 | } | |
170 | ||
171 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
172 | { | |
173 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); | |
f3aca3d0 AT |
174 | return true; |
175 | } | |
b2c0b2cb RK |
176 | |
177 | /* generic implementation */ | |
9a01c3ed CM |
178 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
179 | bool exclude_self, | |
b2c0b2cb RK |
180 | void (*raise)(cpumask_t *mask)); |
181 | bool nmi_cpu_backtrace(struct pt_regs *regs); | |
182 | ||
47cab6a7 IM |
183 | #else |
184 | static inline bool trigger_all_cpu_backtrace(void) | |
185 | { | |
186 | return false; | |
187 | } | |
f3aca3d0 AT |
188 | static inline bool trigger_allbutself_cpu_backtrace(void) |
189 | { | |
190 | return false; | |
191 | } | |
9a01c3ed CM |
192 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
193 | { | |
194 | return false; | |
195 | } | |
196 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
197 | { | |
198 | return false; | |
199 | } | |
bb81a09e AM |
200 | #endif |
201 | ||
05a4a952 | 202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF |
4eec42f3 | 203 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
05a4a952 NP |
204 | #endif |
205 | ||
7edaeb68 | 206 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
5e008df1 | 207 | defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
7edaeb68 TG |
208 | void watchdog_update_hrtimer_threshold(u64 period); |
209 | #else | |
210 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } | |
211 | #endif | |
212 | ||
504d7cf1 | 213 | struct ctl_table; |
32927393 CH |
214 | int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); |
215 | int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); | |
216 | int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); | |
217 | int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); | |
218 | int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); | |
84e478c6 | 219 | |
44a69f61 TN |
220 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
221 | #include <asm/nmi.h> | |
222 | #endif | |
223 | ||
344da544 PM |
224 | #ifdef CONFIG_NMI_CHECK_CPU |
225 | void nmi_backtrace_stall_snap(const struct cpumask *btp); | |
226 | void nmi_backtrace_stall_check(const struct cpumask *btp); | |
227 | #else | |
228 | static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} | |
229 | static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} | |
230 | #endif | |
231 | ||
1da177e4 | 232 | #endif |