1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard lockups on a system using perf
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
13 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/nmi.h>
16 #include <linux/atomic.h>
17 #include <linux/module.h>
18 #include <linux/sched/debug.h>
20 #include <asm/irq_regs.h>
21 #include <linux/perf_event.h>
23 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
24 static DEFINE_PER_CPU(struct perf_event *, dead_event);
25 static struct cpumask dead_events_mask;
27 static atomic_t watchdog_cpus = ATOMIC_INIT(0);
29 #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
30 static DEFINE_PER_CPU(ktime_t, last_timestamp);
31 static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
32 static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
34 void watchdog_update_hrtimer_threshold(u64 period)
37 * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
39 * So it runs effectively with 2.5 times the rate of the NMI
40 * watchdog. That means the hrtimer should fire 2-3 times before
41 * the NMI watchdog expires. The NMI watchdog on x86 is based on
42 * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
43 * might run way faster than expected and the NMI fires in a
44 * smaller period than the one deduced from the nominal CPU
45 * frequency. Depending on the Turbo-Mode factor this might be fast
46 * enough to get the NMI period smaller than the hrtimer watchdog
47 * period and trigger false positives.
49 * The sample threshold is used to check in the NMI handler whether
50 * the minimum time between two NMI samples has elapsed. That
51 * prevents false positives.
53 * Set this to 4/5 of the actual watchdog threshold period so the
54 * hrtimer is guaranteed to fire at least once within the real
57 watchdog_hrtimer_sample_threshold = period * 2;
60 static bool watchdog_check_timestamp(void)
62 ktime_t delta, now = ktime_get_mono_fast_ns();
64 delta = now - __this_cpu_read(last_timestamp);
65 if (delta < watchdog_hrtimer_sample_threshold) {
67 * If ktime is jiffies based, a stalled timer would prevent
68 * jiffies from being incremented and the filter would look
69 * at a stale timestamp and never trigger.
71 if (__this_cpu_inc_return(nmi_rearmed) < 10)
74 __this_cpu_write(nmi_rearmed, 0);
75 __this_cpu_write(last_timestamp, now);
79 static inline bool watchdog_check_timestamp(void)
85 static struct perf_event_attr wd_hw_attr = {
86 .type = PERF_TYPE_HARDWARE,
87 .config = PERF_COUNT_HW_CPU_CYCLES,
88 .size = sizeof(struct perf_event_attr),
93 static struct perf_event_attr fallback_wd_hw_attr = {
94 .type = PERF_TYPE_HARDWARE,
95 .config = PERF_COUNT_HW_CPU_CYCLES,
96 .size = sizeof(struct perf_event_attr),
101 /* Callback function for perf event subsystem */
102 static void watchdog_overflow_callback(struct perf_event *event,
103 struct perf_sample_data *data,
104 struct pt_regs *regs)
106 /* Ensure the watchdog never gets throttled */
107 event->hw.interrupts = 0;
109 if (!watchdog_check_timestamp())
112 watchdog_hardlockup_check(smp_processor_id(), regs);
115 static int hardlockup_detector_event_create(void)
118 struct perf_event_attr *wd_attr;
119 struct perf_event *evt;
122 * Preemption is not disabled because memory will be allocated.
123 * Ensure CPU-locality by calling this in per-CPU kthread.
125 WARN_ON(!is_percpu_thread());
126 cpu = raw_smp_processor_id();
127 wd_attr = &wd_hw_attr;
128 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
130 /* Try to register using hardware perf events */
131 evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
132 watchdog_overflow_callback, NULL);
134 wd_attr = &fallback_wd_hw_attr;
135 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
136 evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
137 watchdog_overflow_callback, NULL);
141 pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
145 this_cpu_write(watchdog_ev, evt);
150 * watchdog_hardlockup_enable - Enable the local event
151 * @cpu: The CPU to enable hard lockup on.
153 void watchdog_hardlockup_enable(unsigned int cpu)
155 WARN_ON_ONCE(cpu != smp_processor_id());
157 if (hardlockup_detector_event_create())
160 /* use original value for check */
161 if (!atomic_fetch_inc(&watchdog_cpus))
162 pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
164 perf_event_enable(this_cpu_read(watchdog_ev));
168 * watchdog_hardlockup_disable - Disable the local event
169 * @cpu: The CPU to enable hard lockup on.
171 void watchdog_hardlockup_disable(unsigned int cpu)
173 struct perf_event *event = this_cpu_read(watchdog_ev);
175 WARN_ON_ONCE(cpu != smp_processor_id());
178 perf_event_disable(event);
179 this_cpu_write(watchdog_ev, NULL);
180 this_cpu_write(dead_event, event);
181 cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
182 atomic_dec(&watchdog_cpus);
187 * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
189 * Called from lockup_detector_cleanup(). Serialized by the caller.
191 void hardlockup_detector_perf_cleanup(void)
195 for_each_cpu(cpu, &dead_events_mask) {
196 struct perf_event *event = per_cpu(dead_event, cpu);
199 * Required because for_each_cpu() reports unconditionally
200 * CPU0 as set on UP kernels. Sigh.
203 perf_event_release_kernel(event);
204 per_cpu(dead_event, cpu) = NULL;
206 cpumask_clear(&dead_events_mask);
210 * hardlockup_detector_perf_stop - Globally stop watchdog events
212 * Special interface for x86 to handle the perf HT bug.
214 void __init hardlockup_detector_perf_stop(void)
218 lockdep_assert_cpus_held();
220 for_each_online_cpu(cpu) {
221 struct perf_event *event = per_cpu(watchdog_ev, cpu);
224 perf_event_disable(event);
229 * hardlockup_detector_perf_restart - Globally restart watchdog events
231 * Special interface for x86 to handle the perf HT bug.
233 void __init hardlockup_detector_perf_restart(void)
237 lockdep_assert_cpus_held();
239 if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
242 for_each_online_cpu(cpu) {
243 struct perf_event *event = per_cpu(watchdog_ev, cpu);
246 perf_event_enable(event);
250 bool __weak __init arch_perf_nmi_is_available(void)
256 * watchdog_hardlockup_probe - Probe whether NMI event is available at all
258 int __init watchdog_hardlockup_probe(void)
262 if (!arch_perf_nmi_is_available())
265 ret = hardlockup_detector_event_create();
268 pr_info("Perf NMI watchdog permanently disabled\n");
270 perf_event_release_kernel(this_cpu_read(watchdog_ev));
271 this_cpu_write(watchdog_ev, NULL);
277 * hardlockup_config_perf_event - Overwrite config of wd_hw_attr.
278 * @str: number which identifies the raw perf event to use
280 void __init hardlockup_config_perf_event(const char *str)
284 char *comma = strchr(str, ',');
287 if (kstrtoull(str, 16, &config))
290 unsigned int len = comma - str;
292 if (len >= sizeof(buf))
295 if (strscpy(buf, str, sizeof(buf)) < 0)
298 if (kstrtoull(buf, 16, &config))
302 wd_hw_attr.type = PERF_TYPE_RAW;
303 wd_hw_attr.config = config;