Merge branch 'next-smack' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-2.6-block.git] / kernel / watchdog.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
58687acb
DZ
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
86f5e6a7
FLVC
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
58687acb
DZ
10 * to those contributors as well.
11 */
12
5f92a7b0 13#define pr_fmt(fmt) "watchdog: " fmt
4501980a 14
58687acb
DZ
15#include <linux/mm.h>
16#include <linux/cpu.h>
17#include <linux/nmi.h>
18#include <linux/init.h>
58687acb
DZ
19#include <linux/module.h>
20#include <linux/sysctl.h>
fe4ba3c3 21#include <linux/tick.h>
e6017571 22#include <linux/sched/clock.h>
b17b0153 23#include <linux/sched/debug.h>
78634061 24#include <linux/sched/isolation.h>
9cf57731 25#include <linux/stop_machine.h>
58687acb
DZ
26
27#include <asm/irq_regs.h>
5d1c0f4a 28#include <linux/kvm_para.h>
58687acb 29
946d1977 30static DEFINE_MUTEX(watchdog_mutex);
ab992dc3 31
05a4a952 32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
09154985
TG
33# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34# define NMI_WATCHDOG_DEFAULT 1
84d56e66 35#else
09154985
TG
36# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37# define NMI_WATCHDOG_DEFAULT 0
84d56e66 38#endif
05a4a952 39
09154985
TG
40unsigned long __read_mostly watchdog_enabled;
41int __read_mostly watchdog_user_enabled = 1;
42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43int __read_mostly soft_watchdog_user_enabled = 1;
7feeb9cd 44int __read_mostly watchdog_thresh = 10;
a994a314 45int __read_mostly nmi_watchdog_available;
7feeb9cd
TG
46
47struct cpumask watchdog_allowed_mask __read_mostly;
7feeb9cd
TG
48
49struct cpumask watchdog_cpumask __read_mostly;
50unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
51
05a4a952 52#ifdef CONFIG_HARDLOCKUP_DETECTOR
05a4a952
NP
53/*
54 * Should we panic when a soft-lockup or hard-lockup occurs:
55 */
56unsigned int __read_mostly hardlockup_panic =
57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58/*
59 * We may not want to enable hard lockup detection by default in all cases,
60 * for example when running the kernel as a guest on a hypervisor. In these
61 * cases this function can be called to disable hard lockup detection. This
62 * function should only be executed once by the boot processor before the
63 * kernel command line parameters are parsed, because otherwise it is not
64 * possible to override this in hardlockup_panic_setup().
65 */
7a355820 66void __init hardlockup_detector_disable(void)
05a4a952 67{
09154985 68 nmi_watchdog_user_enabled = 0;
05a4a952
NP
69}
70
71static int __init hardlockup_panic_setup(char *str)
72{
73 if (!strncmp(str, "panic", 5))
74 hardlockup_panic = 1;
75 else if (!strncmp(str, "nopanic", 7))
76 hardlockup_panic = 0;
77 else if (!strncmp(str, "0", 1))
09154985 78 nmi_watchdog_user_enabled = 0;
05a4a952 79 else if (!strncmp(str, "1", 1))
09154985 80 nmi_watchdog_user_enabled = 1;
05a4a952
NP
81 return 1;
82}
83__setup("nmi_watchdog=", hardlockup_panic_setup);
84
368a7e2c
TG
85# ifdef CONFIG_SMP
86int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
05a4a952 87
368a7e2c
TG
88static int __init hardlockup_all_cpu_backtrace_setup(char *str)
89{
90 sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
91 return 1;
92}
93__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
94# endif /* CONFIG_SMP */
95#endif /* CONFIG_HARDLOCKUP_DETECTOR */
05a4a952 96
05a4a952
NP
97/*
98 * These functions can be overridden if an architecture implements its
99 * own hardlockup detector.
a10a842f
NP
100 *
101 * watchdog_nmi_enable/disable can be implemented to start and stop when
102 * softlockup watchdog threads start and stop. The arch must select the
103 * SOFTLOCKUP_DETECTOR Kconfig.
05a4a952
NP
104 */
105int __weak watchdog_nmi_enable(unsigned int cpu)
106{
146c9d0e 107 hardlockup_detector_perf_enable();
05a4a952
NP
108 return 0;
109}
941154bd 110
05a4a952
NP
111void __weak watchdog_nmi_disable(unsigned int cpu)
112{
941154bd 113 hardlockup_detector_perf_disable();
05a4a952
NP
114}
115
a994a314
TG
116/* Return 0, if a NMI watchdog is available. Error code otherwise */
117int __weak __init watchdog_nmi_probe(void)
118{
119 return hardlockup_detector_perf_init();
120}
121
6592ad2f 122/**
6b9dc480 123 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
6592ad2f 124 *
6b9dc480
TG
125 * The reconfiguration steps are:
126 * watchdog_nmi_stop();
6592ad2f 127 * update_variables();
6b9dc480
TG
128 * watchdog_nmi_start();
129 */
130void __weak watchdog_nmi_stop(void) { }
131
132/**
133 * watchdog_nmi_start - Start the watchdog after reconfiguration
6592ad2f 134 *
6b9dc480
TG
135 * Counterpart to watchdog_nmi_stop().
136 *
137 * The following variables have been updated in update_variables() and
138 * contain the currently valid configuration:
7feeb9cd 139 * - watchdog_enabled
a10a842f
NP
140 * - watchdog_thresh
141 * - watchdog_cpumask
a10a842f 142 */
6b9dc480 143void __weak watchdog_nmi_start(void) { }
a10a842f 144
09154985
TG
145/**
146 * lockup_detector_update_enable - Update the sysctl enable bit
147 *
148 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
149 * can't race with watchdog_nmi_disable().
150 */
151static void lockup_detector_update_enable(void)
152{
153 watchdog_enabled = 0;
154 if (!watchdog_user_enabled)
155 return;
a994a314 156 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
09154985
TG
157 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
158 if (soft_watchdog_user_enabled)
159 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
160}
161
05a4a952
NP
162#ifdef CONFIG_SOFTLOCKUP_DETECTOR
163
2b9d7f23
TG
164/* Global variables, exported for sysctl */
165unsigned int __read_mostly softlockup_panic =
166 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
2eb2527f 167
9cf57731 168static bool softlockup_initialized __read_mostly;
0f34c400 169static u64 __read_mostly sample_period;
58687acb
DZ
170
171static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
58687acb
DZ
172static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
173static DEFINE_PER_CPU(bool, softlockup_touch_sync);
58687acb 174static DEFINE_PER_CPU(bool, soft_watchdog_warn);
bcd951cf
TG
175static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
176static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
b1a8de1f 177static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
58687acb 178static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
ed235875 179static unsigned long soft_lockup_nmi_warn;
58687acb 180
58687acb
DZ
181static int __init softlockup_panic_setup(char *str)
182{
183 softlockup_panic = simple_strtoul(str, NULL, 0);
58687acb
DZ
184 return 1;
185}
186__setup("softlockup_panic=", softlockup_panic_setup);
187
188static int __init nowatchdog_setup(char *str)
189{
09154985 190 watchdog_user_enabled = 0;
58687acb
DZ
191 return 1;
192}
193__setup("nowatchdog", nowatchdog_setup);
194
58687acb
DZ
195static int __init nosoftlockup_setup(char *str)
196{
09154985 197 soft_watchdog_user_enabled = 0;
58687acb
DZ
198 return 1;
199}
200__setup("nosoftlockup", nosoftlockup_setup);
195daf66 201
ed235875 202#ifdef CONFIG_SMP
368a7e2c
TG
203int __read_mostly sysctl_softlockup_all_cpu_backtrace;
204
ed235875
AT
205static int __init softlockup_all_cpu_backtrace_setup(char *str)
206{
368a7e2c 207 sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
ed235875
AT
208 return 1;
209}
210__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
05a4a952 211#endif
58687acb 212
941154bd
TG
213static void __lockup_detector_cleanup(void);
214
4eec42f3
MSB
215/*
216 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
217 * lockups can have false positives under extreme conditions. So we generally
218 * want a higher threshold for soft lockups than for hard lockups. So we couple
219 * the thresholds with a factor: we make the soft threshold twice the amount of
220 * time the hard threshold is.
221 */
6e9101ae 222static int get_softlockup_thresh(void)
4eec42f3
MSB
223{
224 return watchdog_thresh * 2;
225}
58687acb
DZ
226
227/*
228 * Returns seconds, approximately. We don't need nanosecond
229 * resolution, and we don't need to waste time with a big divide when
230 * 2^30ns == 1.074s.
231 */
c06b4f19 232static unsigned long get_timestamp(void)
58687acb 233{
545a2bf7 234 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
58687acb
DZ
235}
236
0f34c400 237static void set_sample_period(void)
58687acb
DZ
238{
239 /*
586692a5 240 * convert watchdog_thresh from seconds to ns
86f5e6a7
FLVC
241 * the divide by 5 is to give hrtimer several chances (two
242 * or three with the current relation between the soft
243 * and hard thresholds) to increment before the
244 * hardlockup detector generates a warning
58687acb 245 */
0f34c400 246 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
7edaeb68 247 watchdog_update_hrtimer_threshold(sample_period);
58687acb
DZ
248}
249
250/* Commands for resetting the watchdog */
251static void __touch_watchdog(void)
252{
c06b4f19 253 __this_cpu_write(watchdog_touch_ts, get_timestamp());
58687acb
DZ
254}
255
03e0d461
TH
256/**
257 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
258 *
259 * Call when the scheduler may have stalled for legitimate reasons
260 * preventing the watchdog task from executing - e.g. the scheduler
261 * entering idle state. This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else.
263 */
cb9d7fd5 264notrace void touch_softlockup_watchdog_sched(void)
58687acb 265{
7861144b
AM
266 /*
267 * Preemption can be enabled. It doesn't matter which CPU's timestamp
268 * gets zeroed here, so use the raw_ operation.
269 */
270 raw_cpu_write(watchdog_touch_ts, 0);
58687acb 271}
03e0d461 272
cb9d7fd5 273notrace void touch_softlockup_watchdog(void)
03e0d461
TH
274{
275 touch_softlockup_watchdog_sched();
82607adc 276 wq_watchdog_touch(raw_smp_processor_id());
03e0d461 277}
0167c781 278EXPORT_SYMBOL(touch_softlockup_watchdog);
58687acb 279
332fbdbc 280void touch_all_softlockup_watchdogs(void)
58687acb
DZ
281{
282 int cpu;
283
284 /*
d57108d4
TG
285 * watchdog_mutex cannpt be taken here, as this might be called
286 * from (soft)interrupt context, so the access to
287 * watchdog_allowed_cpumask might race with a concurrent update.
288 *
289 * The watchdog time stamp can race against a concurrent real
290 * update as well, the only side effect might be a cycle delay for
291 * the softlockup check.
58687acb 292 */
d57108d4 293 for_each_cpu(cpu, &watchdog_allowed_mask)
58687acb 294 per_cpu(watchdog_touch_ts, cpu) = 0;
82607adc 295 wq_watchdog_touch(-1);
58687acb
DZ
296}
297
58687acb
DZ
298void touch_softlockup_watchdog_sync(void)
299{
f7f66b05
CL
300 __this_cpu_write(softlockup_touch_sync, true);
301 __this_cpu_write(watchdog_touch_ts, 0);
58687acb
DZ
302}
303
26e09c6e 304static int is_softlockup(unsigned long touch_ts)
58687acb 305{
c06b4f19 306 unsigned long now = get_timestamp();
58687acb 307
39d2da21 308 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
195daf66
UO
309 /* Warn about unreasonable delays. */
310 if (time_after(now, touch_ts + get_softlockup_thresh()))
311 return now - touch_ts;
312 }
58687acb
DZ
313 return 0;
314}
315
05a4a952
NP
316/* watchdog detector functions */
317bool is_hardlockup(void)
58687acb 318{
05a4a952 319 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
bcd951cf 320
05a4a952
NP
321 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
322 return true;
323
324 __this_cpu_write(hrtimer_interrupts_saved, hrint);
325 return false;
73ce0511 326}
05a4a952
NP
327
328static void watchdog_interrupt_count(void)
73ce0511 329{
05a4a952 330 __this_cpu_inc(hrtimer_interrupts);
73ce0511 331}
58687acb 332
be45bf53
PZ
333static DEFINE_PER_CPU(struct completion, softlockup_completion);
334static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
335
9cf57731
PZ
336/*
337 * The watchdog thread function - touches the timestamp.
338 *
339 * It only runs once every sample_period seconds (4 seconds by
340 * default) to reset the softlockup timestamp. If this gets delayed
341 * for more than 2*watchdog_thresh seconds then the debug-printout
342 * triggers in watchdog_timer_fn().
343 */
344static int softlockup_fn(void *data)
345{
346 __this_cpu_write(soft_lockup_hrtimer_cnt,
347 __this_cpu_read(hrtimer_interrupts));
348 __touch_watchdog();
be45bf53 349 complete(this_cpu_ptr(&softlockup_completion));
9cf57731
PZ
350
351 return 0;
352}
353
58687acb
DZ
354/* watchdog kicker functions */
355static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
356{
909ea964 357 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
58687acb
DZ
358 struct pt_regs *regs = get_irq_regs();
359 int duration;
ed235875 360 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
58687acb 361
01f0a027 362 if (!watchdog_enabled)
b94f5118
DZ
363 return HRTIMER_NORESTART;
364
58687acb
DZ
365 /* kick the hardlockup detector */
366 watchdog_interrupt_count();
367
368 /* kick the softlockup detector */
be45bf53
PZ
369 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
370 reinit_completion(this_cpu_ptr(&softlockup_completion));
371 stop_one_cpu_nowait(smp_processor_id(),
372 softlockup_fn, NULL,
373 this_cpu_ptr(&softlockup_stop_work));
374 }
58687acb
DZ
375
376 /* .. and repeat */
0f34c400 377 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
58687acb
DZ
378
379 if (touch_ts == 0) {
909ea964 380 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
58687acb
DZ
381 /*
382 * If the time stamp was touched atomically
383 * make sure the scheduler tick is up to date.
384 */
909ea964 385 __this_cpu_write(softlockup_touch_sync, false);
58687acb
DZ
386 sched_clock_tick();
387 }
5d1c0f4a
EM
388
389 /* Clear the guest paused flag on watchdog reset */
390 kvm_check_and_clear_guest_paused();
58687acb
DZ
391 __touch_watchdog();
392 return HRTIMER_RESTART;
393 }
394
395 /* check for a softlockup
396 * This is done by making sure a high priority task is
397 * being scheduled. The task touches the watchdog to
398 * indicate it is getting cpu time. If it hasn't then
399 * this is a good indication some task is hogging the cpu
400 */
26e09c6e 401 duration = is_softlockup(touch_ts);
58687acb 402 if (unlikely(duration)) {
5d1c0f4a
EM
403 /*
404 * If a virtual machine is stopped by the host it can look to
405 * the watchdog like a soft lockup, check to see if the host
406 * stopped the vm before we issue the warning
407 */
408 if (kvm_check_and_clear_guest_paused())
409 return HRTIMER_RESTART;
410
58687acb 411 /* only warn once */
b1a8de1f 412 if (__this_cpu_read(soft_watchdog_warn) == true) {
413 /*
414 * When multiple processes are causing softlockups the
415 * softlockup detector only warns on the first one
416 * because the code relies on a full quiet cycle to
417 * re-arm. The second process prevents the quiet cycle
418 * and never gets reported. Use task pointers to detect
419 * this.
420 */
421 if (__this_cpu_read(softlockup_task_ptr_saved) !=
422 current) {
423 __this_cpu_write(soft_watchdog_warn, false);
424 __touch_watchdog();
425 }
58687acb 426 return HRTIMER_RESTART;
b1a8de1f 427 }
58687acb 428
ed235875
AT
429 if (softlockup_all_cpu_backtrace) {
430 /* Prevent multiple soft-lockup reports if one cpu is already
431 * engaged in dumping cpu back traces
432 */
433 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
434 /* Someone else will report us. Let's give up */
435 __this_cpu_write(soft_watchdog_warn, true);
436 return HRTIMER_RESTART;
437 }
438 }
439
656c3b79 440 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
26e09c6e 441 smp_processor_id(), duration,
58687acb 442 current->comm, task_pid_nr(current));
b1a8de1f 443 __this_cpu_write(softlockup_task_ptr_saved, current);
58687acb
DZ
444 print_modules();
445 print_irqtrace_events(current);
446 if (regs)
447 show_regs(regs);
448 else
449 dump_stack();
450
ed235875
AT
451 if (softlockup_all_cpu_backtrace) {
452 /* Avoid generating two back traces for current
453 * given that one is already made above
454 */
455 trigger_allbutself_cpu_backtrace();
456
457 clear_bit(0, &soft_lockup_nmi_warn);
458 /* Barrier to sync with other cpus */
459 smp_mb__after_atomic();
460 }
461
69361eef 462 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
58687acb
DZ
463 if (softlockup_panic)
464 panic("softlockup: hung tasks");
909ea964 465 __this_cpu_write(soft_watchdog_warn, true);
58687acb 466 } else
909ea964 467 __this_cpu_write(soft_watchdog_warn, false);
58687acb
DZ
468
469 return HRTIMER_RESTART;
470}
471
bcd951cf 472static void watchdog_enable(unsigned int cpu)
58687acb 473{
01f0a027 474 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
be45bf53 475 struct completion *done = this_cpu_ptr(&softlockup_completion);
58687acb 476
9cf57731
PZ
477 WARN_ON_ONCE(cpu != smp_processor_id());
478
be45bf53
PZ
479 init_completion(done);
480 complete(done);
481
01f0a027
TG
482 /*
483 * Start the timer first to prevent the NMI watchdog triggering
484 * before the timer has a chance to fire.
485 */
3935e895
BM
486 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
487 hrtimer->function = watchdog_timer_fn;
01f0a027
TG
488 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
489 HRTIMER_MODE_REL_PINNED);
3935e895 490
01f0a027
TG
491 /* Initialize timestamp */
492 __touch_watchdog();
bcd951cf 493 /* Enable the perf event */
146c9d0e
TG
494 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
495 watchdog_nmi_enable(cpu);
bcd951cf 496}
58687acb 497
bcd951cf
TG
498static void watchdog_disable(unsigned int cpu)
499{
01f0a027 500 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
58687acb 501
9cf57731
PZ
502 WARN_ON_ONCE(cpu != smp_processor_id());
503
01f0a027
TG
504 /*
505 * Disable the perf event first. That prevents that a large delay
506 * between disabling the timer and disabling the perf event causes
507 * the perf NMI to detect a false positive.
508 */
bcd951cf 509 watchdog_nmi_disable(cpu);
01f0a027 510 hrtimer_cancel(hrtimer);
be45bf53 511 wait_for_completion(this_cpu_ptr(&softlockup_completion));
58687acb
DZ
512}
513
9cf57731 514static int softlockup_stop_fn(void *data)
b8900bc0 515{
9cf57731
PZ
516 watchdog_disable(smp_processor_id());
517 return 0;
b8900bc0
FW
518}
519
9cf57731 520static void softlockup_stop_all(void)
bcd951cf 521{
9cf57731
PZ
522 int cpu;
523
524 if (!softlockup_initialized)
525 return;
526
527 for_each_cpu(cpu, &watchdog_allowed_mask)
528 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
529
530 cpumask_clear(&watchdog_allowed_mask);
bcd951cf
TG
531}
532
9cf57731 533static int softlockup_start_fn(void *data)
bcd951cf 534{
9cf57731
PZ
535 watchdog_enable(smp_processor_id());
536 return 0;
bcd951cf 537}
58687acb 538
9cf57731 539static void softlockup_start_all(void)
2eb2527f 540{
9cf57731 541 int cpu;
2eb2527f 542
9cf57731
PZ
543 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
544 for_each_cpu(cpu, &watchdog_allowed_mask)
545 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
2eb2527f
TG
546}
547
9cf57731 548int lockup_detector_online_cpu(unsigned int cpu)
2eb2527f 549{
9cf57731
PZ
550 watchdog_enable(cpu);
551 return 0;
2eb2527f
TG
552}
553
9cf57731 554int lockup_detector_offline_cpu(unsigned int cpu)
2eb2527f 555{
9cf57731
PZ
556 watchdog_disable(cpu);
557 return 0;
2eb2527f
TG
558}
559
5587185d 560static void lockup_detector_reconfigure(void)
2eb2527f 561{
e31d6883 562 cpus_read_lock();
6b9dc480 563 watchdog_nmi_stop();
9cf57731
PZ
564
565 softlockup_stop_all();
2eb2527f 566 set_sample_period();
09154985
TG
567 lockup_detector_update_enable();
568 if (watchdog_enabled && watchdog_thresh)
9cf57731
PZ
569 softlockup_start_all();
570
6b9dc480 571 watchdog_nmi_start();
e31d6883
TG
572 cpus_read_unlock();
573 /*
574 * Must be called outside the cpus locked section to prevent
575 * recursive locking in the perf code.
576 */
577 __lockup_detector_cleanup();
2eb2527f
TG
578}
579
580/*
5587185d 581 * Create the watchdog thread infrastructure and configure the detector(s).
2eb2527f
TG
582 *
583 * The threads are not unparked as watchdog_allowed_mask is empty. When
584 * the threads are sucessfully initialized, take the proper locks and
585 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
586 */
5587185d 587static __init void lockup_detector_setup(void)
2eb2527f 588{
2eb2527f
TG
589 /*
590 * If sysctl is off and watchdog got disabled on the command line,
591 * nothing to do here.
592 */
09154985
TG
593 lockup_detector_update_enable();
594
2eb2527f
TG
595 if (!IS_ENABLED(CONFIG_SYSCTL) &&
596 !(watchdog_enabled && watchdog_thresh))
597 return;
598
2eb2527f 599 mutex_lock(&watchdog_mutex);
5587185d 600 lockup_detector_reconfigure();
9cf57731 601 softlockup_initialized = true;
2eb2527f
TG
602 mutex_unlock(&watchdog_mutex);
603}
604
2b9d7f23 605#else /* CONFIG_SOFTLOCKUP_DETECTOR */
5587185d 606static void lockup_detector_reconfigure(void)
6592ad2f 607{
e31d6883 608 cpus_read_lock();
6b9dc480 609 watchdog_nmi_stop();
09154985 610 lockup_detector_update_enable();
6b9dc480 611 watchdog_nmi_start();
e31d6883 612 cpus_read_unlock();
6592ad2f 613}
5587185d 614static inline void lockup_detector_setup(void)
34ddaa3e 615{
5587185d 616 lockup_detector_reconfigure();
34ddaa3e 617}
2b9d7f23 618#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
05a4a952 619
941154bd
TG
620static void __lockup_detector_cleanup(void)
621{
622 lockdep_assert_held(&watchdog_mutex);
623 hardlockup_detector_perf_cleanup();
624}
625
626/**
627 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
628 *
629 * Caller must not hold the cpu hotplug rwsem.
630 */
631void lockup_detector_cleanup(void)
632{
633 mutex_lock(&watchdog_mutex);
634 __lockup_detector_cleanup();
635 mutex_unlock(&watchdog_mutex);
636}
637
6554fd8c
TG
638/**
639 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
640 *
641 * Special interface for parisc. It prevents lockup detector warnings from
642 * the default pm_poweroff() function which busy loops forever.
643 */
644void lockup_detector_soft_poweroff(void)
645{
646 watchdog_enabled = 0;
647}
648
58cf690a
UO
649#ifdef CONFIG_SYSCTL
650
e8b62b2d 651/* Propagate any changes to the watchdog threads */
d57108d4 652static void proc_watchdog_update(void)
a0c9cbb9 653{
e8b62b2d
TG
654 /* Remove impossible cpus to keep sysctl output clean. */
655 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
5587185d 656 lockup_detector_reconfigure();
a0c9cbb9
UO
657}
658
ef246a21
UO
659/*
660 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
661 *
7feeb9cd
TG
662 * caller | table->data points to | 'which'
663 * -------------------|----------------------------|--------------------------
664 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
665 * | | SOFT_WATCHDOG_ENABLED
666 * -------------------|----------------------------|--------------------------
667 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
668 * -------------------|----------------------------|--------------------------
669 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
ef246a21
UO
670 */
671static int proc_watchdog_common(int which, struct ctl_table *table, int write,
672 void __user *buffer, size_t *lenp, loff_t *ppos)
673{
09154985 674 int err, old, *param = table->data;
ef246a21 675
946d1977 676 mutex_lock(&watchdog_mutex);
ef246a21 677
ef246a21 678 if (!write) {
09154985
TG
679 /*
680 * On read synchronize the userspace interface. This is a
681 * racy snapshot.
682 */
683 *param = (watchdog_enabled & which) != 0;
ef246a21
UO
684 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
685 } else {
09154985 686 old = READ_ONCE(*param);
ef246a21 687 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
09154985 688 if (!err && old != READ_ONCE(*param))
d57108d4 689 proc_watchdog_update();
ef246a21 690 }
946d1977 691 mutex_unlock(&watchdog_mutex);
ef246a21
UO
692 return err;
693}
694
83a80a39
UO
695/*
696 * /proc/sys/kernel/watchdog
697 */
698int proc_watchdog(struct ctl_table *table, int write,
699 void __user *buffer, size_t *lenp, loff_t *ppos)
700{
701 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
702 table, write, buffer, lenp, ppos);
703}
704
705/*
706 * /proc/sys/kernel/nmi_watchdog
58687acb 707 */
83a80a39
UO
708int proc_nmi_watchdog(struct ctl_table *table, int write,
709 void __user *buffer, size_t *lenp, loff_t *ppos)
710{
a994a314
TG
711 if (!nmi_watchdog_available && write)
712 return -ENOTSUPP;
83a80a39
UO
713 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
714 table, write, buffer, lenp, ppos);
715}
716
717/*
718 * /proc/sys/kernel/soft_watchdog
719 */
720int proc_soft_watchdog(struct ctl_table *table, int write,
721 void __user *buffer, size_t *lenp, loff_t *ppos)
722{
723 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
724 table, write, buffer, lenp, ppos);
725}
58687acb 726
83a80a39
UO
727/*
728 * /proc/sys/kernel/watchdog_thresh
729 */
730int proc_watchdog_thresh(struct ctl_table *table, int write,
731 void __user *buffer, size_t *lenp, loff_t *ppos)
58687acb 732{
d57108d4 733 int err, old;
58687acb 734
946d1977 735 mutex_lock(&watchdog_mutex);
bcd951cf 736
d57108d4 737 old = READ_ONCE(watchdog_thresh);
b8900bc0 738 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
83a80a39 739
d57108d4
TG
740 if (!err && write && old != READ_ONCE(watchdog_thresh))
741 proc_watchdog_update();
e04ab2bc 742
946d1977 743 mutex_unlock(&watchdog_mutex);
b8900bc0 744 return err;
58687acb 745}
fe4ba3c3
CM
746
747/*
748 * The cpumask is the mask of possible cpus that the watchdog can run
749 * on, not the mask of cpus it is actually running on. This allows the
750 * user to specify a mask that will include cpus that have not yet
751 * been brought online, if desired.
752 */
753int proc_watchdog_cpumask(struct ctl_table *table, int write,
754 void __user *buffer, size_t *lenp, loff_t *ppos)
755{
756 int err;
757
946d1977 758 mutex_lock(&watchdog_mutex);
8c073d27 759
fe4ba3c3 760 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
05ba3de7 761 if (!err && write)
e8b62b2d 762 proc_watchdog_update();
5490125d 763
946d1977 764 mutex_unlock(&watchdog_mutex);
fe4ba3c3
CM
765 return err;
766}
58687acb
DZ
767#endif /* CONFIG_SYSCTL */
768
004417a6 769void __init lockup_detector_init(void)
58687acb 770{
13316b31 771 if (tick_nohz_full_enabled())
314b08ff 772 pr_info("Disabling watchdog on nohz_full cores by default\n");
13316b31 773
de201559
FW
774 cpumask_copy(&watchdog_cpumask,
775 housekeeping_cpumask(HK_FLAG_TIMER));
fe4ba3c3 776
a994a314
TG
777 if (!watchdog_nmi_probe())
778 nmi_watchdog_available = true;
5587185d 779 lockup_detector_setup();
58687acb 780}