1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard and soft lockups on a system
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
13 #define pr_fmt(fmt) "watchdog: " fmt
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/tick.h>
22 #include <linux/sched/clock.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/isolation.h>
25 #include <linux/stop_machine.h>
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
30 static DEFINE_MUTEX(watchdog_mutex);
32 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
33 # define WATCHDOG_HARDLOCKUP_DEFAULT 1
35 # define WATCHDOG_HARDLOCKUP_DEFAULT 0
38 unsigned long __read_mostly watchdog_enabled;
39 int __read_mostly watchdog_user_enabled = 1;
40 static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
41 static int __read_mostly watchdog_softlockup_user_enabled = 1;
42 int __read_mostly watchdog_thresh = 10;
43 static int __read_mostly watchdog_hardlockup_available;
45 struct cpumask watchdog_cpumask __read_mostly;
46 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
48 #ifdef CONFIG_HARDLOCKUP_DETECTOR
51 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
52 # endif /* CONFIG_SMP */
55 * Should we panic when a soft-lockup or hard-lockup occurs:
57 unsigned int __read_mostly hardlockup_panic =
58 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
60 * We may not want to enable hard lockup detection by default in all cases,
61 * for example when running the kernel as a guest on a hypervisor. In these
62 * cases this function can be called to disable hard lockup detection. This
63 * function should only be executed once by the boot processor before the
64 * kernel command line parameters are parsed, because otherwise it is not
65 * possible to override this in hardlockup_panic_setup().
67 void __init hardlockup_detector_disable(void)
69 watchdog_hardlockup_user_enabled = 0;
72 static int __init hardlockup_panic_setup(char *str)
74 if (!strncmp(str, "panic", 5))
76 else if (!strncmp(str, "nopanic", 7))
78 else if (!strncmp(str, "0", 1))
79 watchdog_hardlockup_user_enabled = 0;
80 else if (!strncmp(str, "1", 1))
81 watchdog_hardlockup_user_enabled = 1;
84 __setup("nmi_watchdog=", hardlockup_panic_setup);
86 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
88 #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
90 static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
91 static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
92 static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
93 static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
94 static unsigned long hard_lockup_nmi_warn;
96 notrace void arch_touch_nmi_watchdog(void)
99 * Using __raw here because some code paths have
100 * preemption enabled. If preemption is enabled
101 * then interrupts should be enabled too, in which
102 * case we shouldn't have to worry about the watchdog
105 raw_cpu_write(watchdog_hardlockup_touched, true);
107 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
109 void watchdog_hardlockup_touch_cpu(unsigned int cpu)
111 per_cpu(watchdog_hardlockup_touched, cpu) = true;
114 static bool is_hardlockup(unsigned int cpu)
116 int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
118 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
122 * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
123 * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
124 * written/read by a single CPU.
126 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
131 static void watchdog_hardlockup_kick(void)
135 new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
136 watchdog_buddy_check_hardlockup(new_interrupts);
139 void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
141 if (per_cpu(watchdog_hardlockup_touched, cpu)) {
142 per_cpu(watchdog_hardlockup_touched, cpu) = false;
147 * Check for a hardlockup by making sure the CPU's timer
148 * interrupt is incrementing. The timer interrupt should have
149 * fired multiple times before we overflow'd. If it hasn't
150 * then this is a good indication the cpu is stuck
152 if (is_hardlockup(cpu)) {
153 unsigned int this_cpu = smp_processor_id();
156 /* Only print hardlockups once. */
157 if (per_cpu(watchdog_hardlockup_warned, cpu))
161 * Prevent multiple hard-lockup reports if one cpu is already
162 * engaged in dumping all cpu back traces.
164 if (sysctl_hardlockup_all_cpu_backtrace) {
165 if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
170 * NOTE: we call printk_cpu_sync_get_irqsave() after printing
171 * the lockup message. While it would be nice to serialize
172 * that printout, we really want to make sure that if some
173 * other CPU somehow locked up while holding the lock associated
174 * with printk_cpu_sync_get_irqsave() that we can still at least
175 * get the message about the lockup out.
177 pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
178 printk_cpu_sync_get_irqsave(flags);
181 print_irqtrace_events(current);
182 if (cpu == this_cpu) {
187 printk_cpu_sync_put_irqrestore(flags);
189 printk_cpu_sync_put_irqrestore(flags);
190 trigger_single_cpu_backtrace(cpu);
193 if (sysctl_hardlockup_all_cpu_backtrace) {
194 trigger_allbutcpu_cpu_backtrace(cpu);
195 if (!hardlockup_panic)
196 clear_bit_unlock(0, &hard_lockup_nmi_warn);
199 if (hardlockup_panic)
200 nmi_panic(regs, "Hard LOCKUP");
202 per_cpu(watchdog_hardlockup_warned, cpu) = true;
204 per_cpu(watchdog_hardlockup_warned, cpu) = false;
208 #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
210 static inline void watchdog_hardlockup_kick(void) { }
212 #endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
215 * These functions can be overridden based on the configured hardlockdup detector.
217 * watchdog_hardlockup_enable/disable can be implemented to start and stop when
218 * softlockup watchdog start and stop. The detector must select the
219 * SOFTLOCKUP_DETECTOR Kconfig.
221 void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
223 void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
226 * Watchdog-detector specific API.
228 * Return 0 when hardlockup watchdog is available, negative value otherwise.
229 * Note that the negative value means that a delayed probe might
232 int __weak __init watchdog_hardlockup_probe(void)
238 * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
240 * The reconfiguration steps are:
241 * watchdog_hardlockup_stop();
242 * update_variables();
243 * watchdog_hardlockup_start();
245 void __weak watchdog_hardlockup_stop(void) { }
248 * watchdog_hardlockup_start - Start the watchdog after reconfiguration
250 * Counterpart to watchdog_hardlockup_stop().
252 * The following variables have been updated in update_variables() and
253 * contain the currently valid configuration:
258 void __weak watchdog_hardlockup_start(void) { }
261 * lockup_detector_update_enable - Update the sysctl enable bit
263 * Caller needs to make sure that the hard watchdogs are off, so this
264 * can't race with watchdog_hardlockup_disable().
266 static void lockup_detector_update_enable(void)
268 watchdog_enabled = 0;
269 if (!watchdog_user_enabled)
271 if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
272 watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
273 if (watchdog_softlockup_user_enabled)
274 watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
277 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
280 * Delay the soflockup report when running a known slow code.
281 * It does _not_ affect the timestamp of the last successdul reschedule.
283 #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
286 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
289 static struct cpumask watchdog_allowed_mask __read_mostly;
291 /* Global variables, exported for sysctl */
292 unsigned int __read_mostly softlockup_panic =
293 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
295 static bool softlockup_initialized __read_mostly;
296 static u64 __read_mostly sample_period;
298 /* Timestamp taken after the last successful reschedule. */
299 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
300 /* Timestamp of the last softlockup report. */
301 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
302 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
303 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
304 static unsigned long soft_lockup_nmi_warn;
306 static int __init softlockup_panic_setup(char *str)
308 softlockup_panic = simple_strtoul(str, NULL, 0);
311 __setup("softlockup_panic=", softlockup_panic_setup);
313 static int __init nowatchdog_setup(char *str)
315 watchdog_user_enabled = 0;
318 __setup("nowatchdog", nowatchdog_setup);
320 static int __init nosoftlockup_setup(char *str)
322 watchdog_softlockup_user_enabled = 0;
325 __setup("nosoftlockup", nosoftlockup_setup);
327 static int __init watchdog_thresh_setup(char *str)
329 get_option(&str, &watchdog_thresh);
332 __setup("watchdog_thresh=", watchdog_thresh_setup);
334 static void __lockup_detector_cleanup(void);
337 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
338 * lockups can have false positives under extreme conditions. So we generally
339 * want a higher threshold for soft lockups than for hard lockups. So we couple
340 * the thresholds with a factor: we make the soft threshold twice the amount of
341 * time the hard threshold is.
343 static int get_softlockup_thresh(void)
345 return watchdog_thresh * 2;
349 * Returns seconds, approximately. We don't need nanosecond
350 * resolution, and we don't need to waste time with a big divide when
353 static unsigned long get_timestamp(void)
355 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
358 static void set_sample_period(void)
361 * convert watchdog_thresh from seconds to ns
362 * the divide by 5 is to give hrtimer several chances (two
363 * or three with the current relation between the soft
364 * and hard thresholds) to increment before the
365 * hardlockup detector generates a warning
367 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
368 watchdog_update_hrtimer_threshold(sample_period);
371 static void update_report_ts(void)
373 __this_cpu_write(watchdog_report_ts, get_timestamp());
376 /* Commands for resetting the watchdog */
377 static void update_touch_ts(void)
379 __this_cpu_write(watchdog_touch_ts, get_timestamp());
384 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
386 * Call when the scheduler may have stalled for legitimate reasons
387 * preventing the watchdog task from executing - e.g. the scheduler
388 * entering idle state. This should only be used for scheduler events.
389 * Use touch_softlockup_watchdog() for everything else.
391 notrace void touch_softlockup_watchdog_sched(void)
394 * Preemption can be enabled. It doesn't matter which CPU's watchdog
395 * report period gets restarted here, so use the raw_ operation.
397 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
400 notrace void touch_softlockup_watchdog(void)
402 touch_softlockup_watchdog_sched();
403 wq_watchdog_touch(raw_smp_processor_id());
405 EXPORT_SYMBOL(touch_softlockup_watchdog);
407 void touch_all_softlockup_watchdogs(void)
412 * watchdog_mutex cannpt be taken here, as this might be called
413 * from (soft)interrupt context, so the access to
414 * watchdog_allowed_cpumask might race with a concurrent update.
416 * The watchdog time stamp can race against a concurrent real
417 * update as well, the only side effect might be a cycle delay for
418 * the softlockup check.
420 for_each_cpu(cpu, &watchdog_allowed_mask) {
421 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
422 wq_watchdog_touch(cpu);
426 void touch_softlockup_watchdog_sync(void)
428 __this_cpu_write(softlockup_touch_sync, true);
429 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
432 static int is_softlockup(unsigned long touch_ts,
433 unsigned long period_ts,
436 if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
437 /* Warn about unreasonable delays. */
438 if (time_after(now, period_ts + get_softlockup_thresh()))
439 return now - touch_ts;
444 /* watchdog detector functions */
445 static DEFINE_PER_CPU(struct completion, softlockup_completion);
446 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
449 * The watchdog feed function - touches the timestamp.
451 * It only runs once every sample_period seconds (4 seconds by
452 * default) to reset the softlockup timestamp. If this gets delayed
453 * for more than 2*watchdog_thresh seconds then the debug-printout
454 * triggers in watchdog_timer_fn().
456 static int softlockup_fn(void *data)
459 complete(this_cpu_ptr(&softlockup_completion));
464 /* watchdog kicker functions */
465 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
467 unsigned long touch_ts, period_ts, now;
468 struct pt_regs *regs = get_irq_regs();
470 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
473 if (!watchdog_enabled)
474 return HRTIMER_NORESTART;
476 watchdog_hardlockup_kick();
478 /* kick the softlockup detector */
479 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
480 reinit_completion(this_cpu_ptr(&softlockup_completion));
481 stop_one_cpu_nowait(smp_processor_id(),
483 this_cpu_ptr(&softlockup_stop_work));
487 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
490 * Read the current timestamp first. It might become invalid anytime
491 * when a virtual machine is stopped by the host or when the watchog
492 * is touched from NMI.
494 now = get_timestamp();
496 * If a virtual machine is stopped by the host it can look to
497 * the watchdog like a soft lockup. This function touches the watchdog.
499 kvm_check_and_clear_guest_paused();
501 * The stored timestamp is comparable with @now only when not touched.
502 * It might get touched anytime from NMI. Make sure that is_softlockup()
503 * uses the same (valid) value.
505 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
507 /* Reset the interval when touched by known problematic code. */
508 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
509 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
511 * If the time stamp was touched atomically
512 * make sure the scheduler tick is up to date.
514 __this_cpu_write(softlockup_touch_sync, false);
519 return HRTIMER_RESTART;
522 /* Check for a softlockup. */
523 touch_ts = __this_cpu_read(watchdog_touch_ts);
524 duration = is_softlockup(touch_ts, period_ts, now);
525 if (unlikely(duration)) {
527 * Prevent multiple soft-lockup reports if one cpu is already
528 * engaged in dumping all cpu back traces.
530 if (softlockup_all_cpu_backtrace) {
531 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
532 return HRTIMER_RESTART;
535 /* Start period for the next softlockup warning. */
538 printk_cpu_sync_get_irqsave(flags);
539 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
540 smp_processor_id(), duration,
541 current->comm, task_pid_nr(current));
543 print_irqtrace_events(current);
548 printk_cpu_sync_put_irqrestore(flags);
550 if (softlockup_all_cpu_backtrace) {
551 trigger_allbutcpu_cpu_backtrace(smp_processor_id());
552 if (!softlockup_panic)
553 clear_bit_unlock(0, &soft_lockup_nmi_warn);
556 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
557 if (softlockup_panic)
558 panic("softlockup: hung tasks");
561 return HRTIMER_RESTART;
564 static void watchdog_enable(unsigned int cpu)
566 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
567 struct completion *done = this_cpu_ptr(&softlockup_completion);
569 WARN_ON_ONCE(cpu != smp_processor_id());
571 init_completion(done);
575 * Start the timer first to prevent the hardlockup watchdog triggering
576 * before the timer has a chance to fire.
578 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
579 hrtimer->function = watchdog_timer_fn;
580 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
581 HRTIMER_MODE_REL_PINNED_HARD);
583 /* Initialize timestamp */
585 /* Enable the hardlockup detector */
586 if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
587 watchdog_hardlockup_enable(cpu);
590 static void watchdog_disable(unsigned int cpu)
592 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
594 WARN_ON_ONCE(cpu != smp_processor_id());
597 * Disable the hardlockup detector first. That prevents that a large
598 * delay between disabling the timer and disabling the hardlockup
599 * detector causes a false positive.
601 watchdog_hardlockup_disable(cpu);
602 hrtimer_cancel(hrtimer);
603 wait_for_completion(this_cpu_ptr(&softlockup_completion));
606 static int softlockup_stop_fn(void *data)
608 watchdog_disable(smp_processor_id());
612 static void softlockup_stop_all(void)
616 if (!softlockup_initialized)
619 for_each_cpu(cpu, &watchdog_allowed_mask)
620 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
622 cpumask_clear(&watchdog_allowed_mask);
625 static int softlockup_start_fn(void *data)
627 watchdog_enable(smp_processor_id());
631 static void softlockup_start_all(void)
635 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
636 for_each_cpu(cpu, &watchdog_allowed_mask)
637 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
640 int lockup_detector_online_cpu(unsigned int cpu)
642 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
643 watchdog_enable(cpu);
647 int lockup_detector_offline_cpu(unsigned int cpu)
649 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
650 watchdog_disable(cpu);
654 static void __lockup_detector_reconfigure(void)
657 watchdog_hardlockup_stop();
659 softlockup_stop_all();
661 lockup_detector_update_enable();
662 if (watchdog_enabled && watchdog_thresh)
663 softlockup_start_all();
665 watchdog_hardlockup_start();
668 * Must be called outside the cpus locked section to prevent
669 * recursive locking in the perf code.
671 __lockup_detector_cleanup();
674 void lockup_detector_reconfigure(void)
676 mutex_lock(&watchdog_mutex);
677 __lockup_detector_reconfigure();
678 mutex_unlock(&watchdog_mutex);
682 * Create the watchdog infrastructure and configure the detector(s).
684 static __init void lockup_detector_setup(void)
687 * If sysctl is off and watchdog got disabled on the command line,
688 * nothing to do here.
690 lockup_detector_update_enable();
692 if (!IS_ENABLED(CONFIG_SYSCTL) &&
693 !(watchdog_enabled && watchdog_thresh))
696 mutex_lock(&watchdog_mutex);
697 __lockup_detector_reconfigure();
698 softlockup_initialized = true;
699 mutex_unlock(&watchdog_mutex);
702 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
703 static void __lockup_detector_reconfigure(void)
706 watchdog_hardlockup_stop();
707 lockup_detector_update_enable();
708 watchdog_hardlockup_start();
711 void lockup_detector_reconfigure(void)
713 __lockup_detector_reconfigure();
715 static inline void lockup_detector_setup(void)
717 __lockup_detector_reconfigure();
719 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
721 static void __lockup_detector_cleanup(void)
723 lockdep_assert_held(&watchdog_mutex);
724 hardlockup_detector_perf_cleanup();
728 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
730 * Caller must not hold the cpu hotplug rwsem.
732 void lockup_detector_cleanup(void)
734 mutex_lock(&watchdog_mutex);
735 __lockup_detector_cleanup();
736 mutex_unlock(&watchdog_mutex);
740 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
742 * Special interface for parisc. It prevents lockup detector warnings from
743 * the default pm_poweroff() function which busy loops forever.
745 void lockup_detector_soft_poweroff(void)
747 watchdog_enabled = 0;
752 /* Propagate any changes to the watchdog infrastructure */
753 static void proc_watchdog_update(void)
755 /* Remove impossible cpus to keep sysctl output clean. */
756 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
757 __lockup_detector_reconfigure();
761 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
763 * caller | table->data points to | 'which'
764 * -------------------|----------------------------------|-------------------------------
765 * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
766 * | | WATCHDOG_SOFTOCKUP_ENABLED
767 * -------------------|----------------------------------|-------------------------------
768 * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
769 * -------------------|----------------------------------|-------------------------------
770 * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
772 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
773 void *buffer, size_t *lenp, loff_t *ppos)
775 int err, old, *param = table->data;
777 mutex_lock(&watchdog_mutex);
781 * On read synchronize the userspace interface. This is a
784 *param = (watchdog_enabled & which) != 0;
785 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
787 old = READ_ONCE(*param);
788 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
789 if (!err && old != READ_ONCE(*param))
790 proc_watchdog_update();
792 mutex_unlock(&watchdog_mutex);
797 * /proc/sys/kernel/watchdog
799 int proc_watchdog(struct ctl_table *table, int write,
800 void *buffer, size_t *lenp, loff_t *ppos)
802 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
803 WATCHDOG_SOFTOCKUP_ENABLED,
804 table, write, buffer, lenp, ppos);
808 * /proc/sys/kernel/nmi_watchdog
810 int proc_nmi_watchdog(struct ctl_table *table, int write,
811 void *buffer, size_t *lenp, loff_t *ppos)
813 if (!watchdog_hardlockup_available && write)
815 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
816 table, write, buffer, lenp, ppos);
820 * /proc/sys/kernel/soft_watchdog
822 int proc_soft_watchdog(struct ctl_table *table, int write,
823 void *buffer, size_t *lenp, loff_t *ppos)
825 return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
826 table, write, buffer, lenp, ppos);
830 * /proc/sys/kernel/watchdog_thresh
832 int proc_watchdog_thresh(struct ctl_table *table, int write,
833 void *buffer, size_t *lenp, loff_t *ppos)
837 mutex_lock(&watchdog_mutex);
839 old = READ_ONCE(watchdog_thresh);
840 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
842 if (!err && write && old != READ_ONCE(watchdog_thresh))
843 proc_watchdog_update();
845 mutex_unlock(&watchdog_mutex);
850 * The cpumask is the mask of possible cpus that the watchdog can run
851 * on, not the mask of cpus it is actually running on. This allows the
852 * user to specify a mask that will include cpus that have not yet
853 * been brought online, if desired.
855 int proc_watchdog_cpumask(struct ctl_table *table, int write,
856 void *buffer, size_t *lenp, loff_t *ppos)
860 mutex_lock(&watchdog_mutex);
862 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
864 proc_watchdog_update();
866 mutex_unlock(&watchdog_mutex);
870 static const int sixty = 60;
872 static struct ctl_table watchdog_sysctls[] = {
874 .procname = "watchdog",
875 .data = &watchdog_user_enabled,
876 .maxlen = sizeof(int),
878 .proc_handler = proc_watchdog,
879 .extra1 = SYSCTL_ZERO,
880 .extra2 = SYSCTL_ONE,
883 .procname = "watchdog_thresh",
884 .data = &watchdog_thresh,
885 .maxlen = sizeof(int),
887 .proc_handler = proc_watchdog_thresh,
888 .extra1 = SYSCTL_ZERO,
889 .extra2 = (void *)&sixty,
892 .procname = "watchdog_cpumask",
893 .data = &watchdog_cpumask_bits,
896 .proc_handler = proc_watchdog_cpumask,
898 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
900 .procname = "soft_watchdog",
901 .data = &watchdog_softlockup_user_enabled,
902 .maxlen = sizeof(int),
904 .proc_handler = proc_soft_watchdog,
905 .extra1 = SYSCTL_ZERO,
906 .extra2 = SYSCTL_ONE,
909 .procname = "softlockup_panic",
910 .data = &softlockup_panic,
911 .maxlen = sizeof(int),
913 .proc_handler = proc_dointvec_minmax,
914 .extra1 = SYSCTL_ZERO,
915 .extra2 = SYSCTL_ONE,
919 .procname = "softlockup_all_cpu_backtrace",
920 .data = &sysctl_softlockup_all_cpu_backtrace,
921 .maxlen = sizeof(int),
923 .proc_handler = proc_dointvec_minmax,
924 .extra1 = SYSCTL_ZERO,
925 .extra2 = SYSCTL_ONE,
927 #endif /* CONFIG_SMP */
929 #ifdef CONFIG_HARDLOCKUP_DETECTOR
931 .procname = "hardlockup_panic",
932 .data = &hardlockup_panic,
933 .maxlen = sizeof(int),
935 .proc_handler = proc_dointvec_minmax,
936 .extra1 = SYSCTL_ZERO,
937 .extra2 = SYSCTL_ONE,
941 .procname = "hardlockup_all_cpu_backtrace",
942 .data = &sysctl_hardlockup_all_cpu_backtrace,
943 .maxlen = sizeof(int),
945 .proc_handler = proc_dointvec_minmax,
946 .extra1 = SYSCTL_ZERO,
947 .extra2 = SYSCTL_ONE,
949 #endif /* CONFIG_SMP */
954 static struct ctl_table watchdog_hardlockup_sysctl[] = {
956 .procname = "nmi_watchdog",
957 .data = &watchdog_hardlockup_user_enabled,
958 .maxlen = sizeof(int),
960 .proc_handler = proc_nmi_watchdog,
961 .extra1 = SYSCTL_ZERO,
962 .extra2 = SYSCTL_ONE,
967 static void __init watchdog_sysctl_init(void)
969 register_sysctl_init("kernel", watchdog_sysctls);
971 if (watchdog_hardlockup_available)
972 watchdog_hardlockup_sysctl[0].mode = 0644;
973 register_sysctl_init("kernel", watchdog_hardlockup_sysctl);
977 #define watchdog_sysctl_init() do { } while (0)
978 #endif /* CONFIG_SYSCTL */
980 static void __init lockup_detector_delay_init(struct work_struct *work);
981 static bool allow_lockup_detector_init_retry __initdata;
983 static struct work_struct detector_work __initdata =
984 __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
986 static void __init lockup_detector_delay_init(struct work_struct *work)
990 ret = watchdog_hardlockup_probe();
992 pr_info("Delayed init of the lockup detector failed: %d\n", ret);
993 pr_info("Hard watchdog permanently disabled\n");
997 allow_lockup_detector_init_retry = false;
999 watchdog_hardlockup_available = true;
1000 lockup_detector_setup();
1004 * lockup_detector_retry_init - retry init lockup detector if possible.
1006 * Retry hardlockup detector init. It is useful when it requires some
1007 * functionality that has to be initialized later on a particular
1010 void __init lockup_detector_retry_init(void)
1012 /* Must be called before late init calls */
1013 if (!allow_lockup_detector_init_retry)
1016 schedule_work(&detector_work);
1020 * Ensure that optional delayed hardlockup init is proceed before
1021 * the init code and memory is freed.
1023 static int __init lockup_detector_check(void)
1025 /* Prevent any later retry. */
1026 allow_lockup_detector_init_retry = false;
1028 /* Make sure no work is pending. */
1029 flush_work(&detector_work);
1031 watchdog_sysctl_init();
1036 late_initcall_sync(lockup_detector_check);
1038 void __init lockup_detector_init(void)
1040 if (tick_nohz_full_enabled())
1041 pr_info("Disabling watchdog on nohz_full cores by default\n");
1043 cpumask_copy(&watchdog_cpumask,
1044 housekeeping_cpumask(HK_TYPE_TIMER));
1046 if (!watchdog_hardlockup_probe())
1047 watchdog_hardlockup_available = true;
1049 allow_lockup_detector_init_retry = true;
1051 lockup_detector_setup();