watchdog/hardlockup: rename some "NMI watchdog" constants/function
[linux-block.git] / kernel / watchdog_perf.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
73ce0511 2/*
6ea0d042 3 * Detect hard lockups on a system using perf
73ce0511
BM
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "NMI watchdog: " fmt
14
15#include <linux/nmi.h>
42f930da 16#include <linux/atomic.h>
73ce0511 17#include <linux/module.h>
b17b0153
IM
18#include <linux/sched/debug.h>
19
73ce0511
BM
20#include <asm/irq_regs.h>
21#include <linux/perf_event.h>
22
73ce0511 23static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
9c388a5e 24static DEFINE_PER_CPU(struct perf_event *, dead_event);
941154bd 25static struct cpumask dead_events_mask;
73ce0511 26
42f930da 27static atomic_t watchdog_cpus = ATOMIC_INIT(0);
73ce0511 28
7edaeb68
TG
29#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
30static DEFINE_PER_CPU(ktime_t, last_timestamp);
31static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
32static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
33
34void watchdog_update_hrtimer_threshold(u64 period)
35{
36 /*
37 * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
38 *
39 * So it runs effectively with 2.5 times the rate of the NMI
40 * watchdog. That means the hrtimer should fire 2-3 times before
41 * the NMI watchdog expires. The NMI watchdog on x86 is based on
42 * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
43 * might run way faster than expected and the NMI fires in a
44 * smaller period than the one deduced from the nominal CPU
45 * frequency. Depending on the Turbo-Mode factor this might be fast
46 * enough to get the NMI period smaller than the hrtimer watchdog
47 * period and trigger false positives.
48 *
49 * The sample threshold is used to check in the NMI handler whether
50 * the minimum time between two NMI samples has elapsed. That
51 * prevents false positives.
52 *
53 * Set this to 4/5 of the actual watchdog threshold period so the
54 * hrtimer is guaranteed to fire at least once within the real
55 * watchdog threshold.
56 */
57 watchdog_hrtimer_sample_threshold = period * 2;
58}
59
60static bool watchdog_check_timestamp(void)
61{
62 ktime_t delta, now = ktime_get_mono_fast_ns();
63
64 delta = now - __this_cpu_read(last_timestamp);
65 if (delta < watchdog_hrtimer_sample_threshold) {
66 /*
67 * If ktime is jiffies based, a stalled timer would prevent
68 * jiffies from being incremented and the filter would look
69 * at a stale timestamp and never trigger.
70 */
71 if (__this_cpu_inc_return(nmi_rearmed) < 10)
72 return false;
73 }
74 __this_cpu_write(nmi_rearmed, 0);
75 __this_cpu_write(last_timestamp, now);
76 return true;
77}
78#else
79static inline bool watchdog_check_timestamp(void)
80{
81 return true;
82}
83#endif
84
73ce0511
BM
85static struct perf_event_attr wd_hw_attr = {
86 .type = PERF_TYPE_HARDWARE,
87 .config = PERF_COUNT_HW_CPU_CYCLES,
88 .size = sizeof(struct perf_event_attr),
89 .pinned = 1,
90 .disabled = 1,
91};
92
93/* Callback function for perf event subsystem */
94static void watchdog_overflow_callback(struct perf_event *event,
01f0a027
TG
95 struct perf_sample_data *data,
96 struct pt_regs *regs)
73ce0511
BM
97{
98 /* Ensure the watchdog never gets throttled */
99 event->hw.interrupts = 0;
100
4379e59f
DA
101 if (!watchdog_check_timestamp())
102 return;
103
77c12fc9 104 watchdog_hardlockup_check(smp_processor_id(), regs);
73ce0511
BM
105}
106
178b9f7a
TG
107static int hardlockup_detector_event_create(void)
108{
1fafaa77 109 unsigned int cpu;
178b9f7a
TG
110 struct perf_event_attr *wd_attr;
111 struct perf_event *evt;
112
1fafaa77
PL
113 /*
114 * Preemption is not disabled because memory will be allocated.
115 * Ensure CPU-locality by calling this in per-CPU kthread.
116 */
117 WARN_ON(!is_percpu_thread());
118 cpu = raw_smp_processor_id();
178b9f7a
TG
119 wd_attr = &wd_hw_attr;
120 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
121
122 /* Try to register using hardware perf events */
123 evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
124 watchdog_overflow_callback, NULL);
125 if (IS_ERR(evt)) {
1b6266eb
SK
126 pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
127 PTR_ERR(evt));
178b9f7a
TG
128 return PTR_ERR(evt);
129 }
130 this_cpu_write(watchdog_ev, evt);
131 return 0;
132}
133
2a1b8ee4
TG
134/**
135 * hardlockup_detector_perf_enable - Enable the local event
136 */
137void hardlockup_detector_perf_enable(void)
138{
139 if (hardlockup_detector_event_create())
140 return;
141
42f930da
DZ
142 /* use original value for check */
143 if (!atomic_fetch_inc(&watchdog_cpus))
146c9d0e
TG
144 pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
145
2a1b8ee4
TG
146 perf_event_enable(this_cpu_read(watchdog_ev));
147}
148
941154bd
TG
149/**
150 * hardlockup_detector_perf_disable - Disable the local event
151 */
152void hardlockup_detector_perf_disable(void)
73ce0511 153{
941154bd 154 struct perf_event *event = this_cpu_read(watchdog_ev);
73ce0511
BM
155
156 if (event) {
157 perf_event_disable(event);
9c388a5e
TG
158 this_cpu_write(watchdog_ev, NULL);
159 this_cpu_write(dead_event, event);
941154bd 160 cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
42f930da 161 atomic_dec(&watchdog_cpus);
73ce0511
BM
162 }
163}
d0b6e0a8 164
941154bd
TG
165/**
166 * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
167 *
168 * Called from lockup_detector_cleanup(). Serialized by the caller.
169 */
170void hardlockup_detector_perf_cleanup(void)
171{
172 int cpu;
173
174 for_each_cpu(cpu, &dead_events_mask) {
9c388a5e 175 struct perf_event *event = per_cpu(dead_event, cpu);
941154bd 176
115ef3b7
TG
177 /*
178 * Required because for_each_cpu() reports unconditionally
179 * CPU0 as set on UP kernels. Sigh.
180 */
181 if (event)
182 perf_event_release_kernel(event);
9c388a5e 183 per_cpu(dead_event, cpu) = NULL;
941154bd
TG
184 }
185 cpumask_clear(&dead_events_mask);
186}
187
d0b6e0a8
PZ
188/**
189 * hardlockup_detector_perf_stop - Globally stop watchdog events
190 *
191 * Special interface for x86 to handle the perf HT bug.
192 */
193void __init hardlockup_detector_perf_stop(void)
194{
195 int cpu;
196
197 lockdep_assert_cpus_held();
198
199 for_each_online_cpu(cpu) {
200 struct perf_event *event = per_cpu(watchdog_ev, cpu);
201
202 if (event)
203 perf_event_disable(event);
204 }
205}
206
207/**
208 * hardlockup_detector_perf_restart - Globally restart watchdog events
209 *
210 * Special interface for x86 to handle the perf HT bug.
211 */
212void __init hardlockup_detector_perf_restart(void)
213{
214 int cpu;
215
216 lockdep_assert_cpus_held();
217
df95d308 218 if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
d0b6e0a8
PZ
219 return;
220
221 for_each_online_cpu(cpu) {
222 struct perf_event *event = per_cpu(watchdog_ev, cpu);
223
224 if (event)
225 perf_event_enable(event);
226 }
227}
178b9f7a
TG
228
229/**
230 * hardlockup_detector_perf_init - Probe whether NMI event is available at all
231 */
232int __init hardlockup_detector_perf_init(void)
233{
234 int ret = hardlockup_detector_event_create();
235
236 if (ret) {
77c01d11 237 pr_info("Perf NMI watchdog permanently disabled\n");
178b9f7a
TG
238 } else {
239 perf_event_release_kernel(this_cpu_read(watchdog_ev));
240 this_cpu_write(watchdog_ev, NULL);
241 }
242 return ret;
243}