Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/include/linux/nmi.h | |
4 | */ | |
5 | #ifndef LINUX_NMI_H | |
6 | #define LINUX_NMI_H | |
7 | ||
9938406a | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <asm/irq.h> |
0c68bda6 PM |
10 | |
11 | /* Arch specific watchdogs might need to share extra watchdog-related APIs. */ | |
7ca8fe94 | 12 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64) |
f2e0cff8 NP |
13 | #include <asm/nmi.h> |
14 | #endif | |
1da177e4 | 15 | |
d151b27d | 16 | #ifdef CONFIG_LOCKUP_DETECTOR |
05a4a952 | 17 | void lockup_detector_init(void); |
930d8f8d | 18 | void lockup_detector_retry_init(void); |
6554fd8c | 19 | void lockup_detector_soft_poweroff(void); |
941154bd | 20 | void lockup_detector_cleanup(void); |
3b371b59 TG |
21 | |
22 | extern int watchdog_user_enabled; | |
3b371b59 TG |
23 | extern int watchdog_thresh; |
24 | extern unsigned long watchdog_enabled; | |
25 | ||
26 | extern struct cpumask watchdog_cpumask; | |
27 | extern unsigned long *watchdog_cpumask_bits; | |
28 | #ifdef CONFIG_SMP | |
29 | extern int sysctl_softlockup_all_cpu_backtrace; | |
30 | extern int sysctl_hardlockup_all_cpu_backtrace; | |
05a4a952 | 31 | #else |
3b371b59 TG |
32 | #define sysctl_softlockup_all_cpu_backtrace 0 |
33 | #define sysctl_hardlockup_all_cpu_backtrace 0 | |
34 | #endif /* !CONFIG_SMP */ | |
35 | ||
36 | #else /* CONFIG_LOCKUP_DETECTOR */ | |
6554fd8c | 37 | static inline void lockup_detector_init(void) { } |
930d8f8d | 38 | static inline void lockup_detector_retry_init(void) { } |
6554fd8c | 39 | static inline void lockup_detector_soft_poweroff(void) { } |
941154bd | 40 | static inline void lockup_detector_cleanup(void) { } |
3b371b59 | 41 | #endif /* !CONFIG_LOCKUP_DETECTOR */ |
05a4a952 NP |
42 | |
43 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | |
d151b27d IM |
44 | extern void touch_softlockup_watchdog_sched(void); |
45 | extern void touch_softlockup_watchdog(void); | |
46 | extern void touch_softlockup_watchdog_sync(void); | |
47 | extern void touch_all_softlockup_watchdogs(void); | |
d151b27d | 48 | extern unsigned int softlockup_panic; |
aef92a8b PZ |
49 | |
50 | extern int lockup_detector_online_cpu(unsigned int cpu); | |
51 | extern int lockup_detector_offline_cpu(unsigned int cpu); | |
52 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ | |
3b371b59 TG |
53 | static inline void touch_softlockup_watchdog_sched(void) { } |
54 | static inline void touch_softlockup_watchdog(void) { } | |
55 | static inline void touch_softlockup_watchdog_sync(void) { } | |
56 | static inline void touch_all_softlockup_watchdogs(void) { } | |
aef92a8b PZ |
57 | |
58 | #define lockup_detector_online_cpu NULL | |
59 | #define lockup_detector_offline_cpu NULL | |
60 | #endif /* CONFIG_SOFTLOCKUP_DETECTOR */ | |
d151b27d IM |
61 | |
62 | #ifdef CONFIG_DETECT_HUNG_TASK | |
63 | void reset_hung_task_detector(void); | |
64 | #else | |
3b371b59 | 65 | static inline void reset_hung_task_detector(void) { } |
d151b27d IM |
66 | #endif |
67 | ||
249e52e3 BM |
68 | /* |
69 | * The run state of the lockup detectors is controlled by the content of the | |
70 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | |
71 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | |
72 | * | |
df95d308 DA |
73 | * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and |
74 | * 'watchdog_softlockup_user_enabled' are variables that are only used as an | |
7feeb9cd TG |
75 | * 'interface' between the parameters in /proc/sys/kernel and the internal |
76 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is | |
77 | * handled differently because its value is not boolean, and the lockup | |
78 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. | |
249e52e3 | 79 | */ |
df95d308 DA |
80 | #define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0 |
81 | #define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1 | |
82 | #define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT) | |
83 | #define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT) | |
249e52e3 | 84 | |
f2e0cff8 NP |
85 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
86 | extern void hardlockup_detector_disable(void); | |
05a4a952 | 87 | extern unsigned int hardlockup_panic; |
f2e0cff8 NP |
88 | #else |
89 | static inline void hardlockup_detector_disable(void) {} | |
90 | #endif | |
91 | ||
0c68bda6 | 92 | /* Sparc64 has special implemetantion that is always enabled. */ |
47f4cb43 | 93 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64) |
ed92e1ef | 94 | void arch_touch_nmi_watchdog(void); |
0c68bda6 PM |
95 | #else |
96 | static inline void arch_touch_nmi_watchdog(void) { } | |
97 | #endif | |
98 | ||
99 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER) | |
1f423c90 | 100 | void watchdog_hardlockup_touch_cpu(unsigned int cpu); |
77c12fc9 | 101 | void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); |
81972551 DA |
102 | #endif |
103 | ||
05a4a952 | 104 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
d0b6e0a8 PZ |
105 | extern void hardlockup_detector_perf_stop(void); |
106 | extern void hardlockup_detector_perf_restart(void); | |
941154bd | 107 | extern void hardlockup_detector_perf_cleanup(void); |
f2e0cff8 | 108 | #else |
d0b6e0a8 PZ |
109 | static inline void hardlockup_detector_perf_stop(void) { } |
110 | static inline void hardlockup_detector_perf_restart(void) { } | |
941154bd | 111 | static inline void hardlockup_detector_perf_cleanup(void) { } |
05a4a952 | 112 | #endif |
f2e0cff8 | 113 | |
df95d308 DA |
114 | void watchdog_hardlockup_stop(void); |
115 | void watchdog_hardlockup_start(void); | |
116 | int watchdog_hardlockup_probe(void); | |
117 | void watchdog_hardlockup_enable(unsigned int cpu); | |
118 | void watchdog_hardlockup_disable(unsigned int cpu); | |
6592ad2f | 119 | |
7c56a873 LD |
120 | void lockup_detector_reconfigure(void); |
121 | ||
1f423c90 | 122 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY |
d3b62ace | 123 | void watchdog_buddy_check_hardlockup(int hrtimer_interrupts); |
1f423c90 | 124 | #else |
d3b62ace | 125 | static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {} |
1f423c90 DA |
126 | #endif |
127 | ||
1da177e4 | 128 | /** |
8b5c59a9 | 129 | * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout. |
3b371b59 | 130 | * |
8b5c59a9 DA |
131 | * If we support detecting hardlockups, touch_nmi_watchdog() may be |
132 | * used to pet the watchdog (reset the timeout) - for code which | |
133 | * intentionally disables interrupts for a long time. This call is stateless. | |
134 | * | |
135 | * Though this function has "nmi" in the name, the hardlockup watchdog might | |
136 | * not be backed by NMIs. This function will likely be renamed to | |
137 | * touch_hardlockup_watchdog() in the future. | |
1da177e4 | 138 | */ |
5d0e600d IM |
139 | static inline void touch_nmi_watchdog(void) |
140 | { | |
8b5c59a9 DA |
141 | /* |
142 | * Pass on to the hardlockup detector selected via CONFIG_. Note that | |
143 | * the hardlockup detector may not be arch-specific nor using NMIs | |
144 | * and the arch_touch_nmi_watchdog() function will likely be renamed | |
145 | * in the future. | |
146 | */ | |
f2e0cff8 | 147 | arch_touch_nmi_watchdog(); |
8b5c59a9 | 148 | |
5d0e600d IM |
149 | touch_softlockup_watchdog(); |
150 | } | |
6e7458a6 | 151 | |
47cab6a7 IM |
152 | /* |
153 | * Create trigger_all_cpu_backtrace() out of the arch-provided | |
154 | * base function. Return whether such support was available, | |
155 | * to allow calling code to fall back to some other mechanism: | |
156 | */ | |
9a01c3ed | 157 | #ifdef arch_trigger_cpumask_backtrace |
47cab6a7 IM |
158 | static inline bool trigger_all_cpu_backtrace(void) |
159 | { | |
9a01c3ed | 160 | arch_trigger_cpumask_backtrace(cpu_online_mask, false); |
47cab6a7 IM |
161 | return true; |
162 | } | |
9a01c3ed | 163 | |
f3aca3d0 AT |
164 | static inline bool trigger_allbutself_cpu_backtrace(void) |
165 | { | |
9a01c3ed CM |
166 | arch_trigger_cpumask_backtrace(cpu_online_mask, true); |
167 | return true; | |
168 | } | |
169 | ||
170 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) | |
171 | { | |
172 | arch_trigger_cpumask_backtrace(mask, false); | |
173 | return true; | |
174 | } | |
175 | ||
176 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
177 | { | |
178 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); | |
f3aca3d0 AT |
179 | return true; |
180 | } | |
b2c0b2cb RK |
181 | |
182 | /* generic implementation */ | |
9a01c3ed CM |
183 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
184 | bool exclude_self, | |
b2c0b2cb RK |
185 | void (*raise)(cpumask_t *mask)); |
186 | bool nmi_cpu_backtrace(struct pt_regs *regs); | |
187 | ||
47cab6a7 IM |
188 | #else |
189 | static inline bool trigger_all_cpu_backtrace(void) | |
190 | { | |
191 | return false; | |
192 | } | |
f3aca3d0 AT |
193 | static inline bool trigger_allbutself_cpu_backtrace(void) |
194 | { | |
195 | return false; | |
196 | } | |
9a01c3ed CM |
197 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
198 | { | |
199 | return false; | |
200 | } | |
201 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
202 | { | |
203 | return false; | |
204 | } | |
bb81a09e AM |
205 | #endif |
206 | ||
05a4a952 | 207 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF |
4eec42f3 | 208 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
b17aa959 | 209 | bool arch_perf_nmi_is_available(void); |
05a4a952 NP |
210 | #endif |
211 | ||
7edaeb68 | 212 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
5e008df1 | 213 | defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
7edaeb68 TG |
214 | void watchdog_update_hrtimer_threshold(u64 period); |
215 | #else | |
216 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } | |
217 | #endif | |
218 | ||
504d7cf1 | 219 | struct ctl_table; |
32927393 CH |
220 | int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); |
221 | int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); | |
222 | int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); | |
223 | int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); | |
224 | int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); | |
84e478c6 | 225 | |
44a69f61 TN |
226 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
227 | #include <asm/nmi.h> | |
228 | #endif | |
229 | ||
344da544 PM |
230 | #ifdef CONFIG_NMI_CHECK_CPU |
231 | void nmi_backtrace_stall_snap(const struct cpumask *btp); | |
232 | void nmi_backtrace_stall_check(const struct cpumask *btp); | |
233 | #else | |
234 | static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} | |
235 | static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} | |
236 | #endif | |
237 | ||
1da177e4 | 238 | #endif |