1 /* Pseudo NMI support on sparc64 systems.
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
5 * The NMI watchdog support and infrastructure is based almost
6 * entirely upon the x86 NMI support code.
8 #include <linux/kernel.h>
9 #include <linux/param.h>
10 #include <linux/init.h>
11 #include <linux/percpu.h>
12 #include <linux/nmi.h>
13 #include <linux/export.h>
14 #include <linux/kprobes.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/kdebug.h>
19 #include <linux/delay.h>
20 #include <linux/smp.h>
22 #include <asm/perf_event.h>
23 #include <asm/ptrace.h>
28 /* We don't have a real NMI on sparc64, but we can fake one
29 * up using profiling counter overflow interrupts and interrupt
32 * The profile overflow interrupts at level 15, so we use
33 * level 14 as our IRQ off level.
36 static int panic_on_timeout;
39 * >0: the NMI watchdog is active, but can be disabled
40 * <0: the NMI watchdog has not been set up, and cannot be enabled
41 * 0: the NMI watchdog is disabled, but can be enabled
43 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
44 EXPORT_SYMBOL(nmi_active);
46 static unsigned int nmi_hz = HZ;
47 static DEFINE_PER_CPU(short, wd_enabled);
48 static int endflag __initdata;
50 static DEFINE_PER_CPU(unsigned int, last_irq_sum);
51 static DEFINE_PER_CPU(long, alert_counter);
52 static DEFINE_PER_CPU(int, nmi_touch);
54 void touch_nmi_watchdog(void)
56 if (atomic_read(&nmi_active)) {
59 for_each_present_cpu(cpu) {
60 if (per_cpu(nmi_touch, cpu) != 1)
61 per_cpu(nmi_touch, cpu) = 1;
65 touch_softlockup_watchdog();
67 EXPORT_SYMBOL(touch_nmi_watchdog);
69 static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
71 int this_cpu = smp_processor_id();
73 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
74 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
77 if (do_panic || panic_on_oops)
78 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
80 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
83 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
85 unsigned int sum, touched = 0;
88 clear_softint(1 << irq);
90 local_cpu_data().__nmi_count++;
94 orig_sp = set_hardirq_stack();
96 if (notify_die(DIE_NMI, "nmi", regs, 0,
97 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
100 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
102 sum = local_cpu_data().irq0_irqs;
103 if (__get_cpu_var(nmi_touch)) {
104 __get_cpu_var(nmi_touch) = 0;
107 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
108 __this_cpu_inc(alert_counter);
109 if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
110 die_nmi("BUG: NMI Watchdog detected LOCKUP",
111 regs, panic_on_timeout);
113 __get_cpu_var(last_irq_sum) = sum;
114 __this_cpu_write(alert_counter, 0);
116 if (__get_cpu_var(wd_enabled)) {
117 pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
118 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
121 restore_hardirq_stack(orig_sp);
126 static inline unsigned int get_nmi_count(int cpu)
128 return cpu_data(cpu).__nmi_count;
131 static __init void nmi_cpu_busy(void *data)
133 local_irq_enable_in_hardirq();
138 static void report_broken_nmi(int cpu, int *prev_nmi_count)
140 printk(KERN_CONT "\n");
143 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
144 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
147 "Please report this to bugzilla.kernel.org,\n");
149 "and attach the output of the 'dmesg' command.\n");
151 per_cpu(wd_enabled, cpu) = 0;
152 atomic_dec(&nmi_active);
155 void stop_nmi_watchdog(void *unused)
157 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
158 __get_cpu_var(wd_enabled) = 0;
159 atomic_dec(&nmi_active);
162 static int __init check_nmi_watchdog(void)
164 unsigned int *prev_nmi_count;
167 if (!atomic_read(&nmi_active))
170 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
171 if (!prev_nmi_count) {
176 printk(KERN_INFO "Testing NMI watchdog ... ");
178 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
180 for_each_possible_cpu(cpu)
181 prev_nmi_count[cpu] = get_nmi_count(cpu);
183 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
185 for_each_online_cpu(cpu) {
186 if (!per_cpu(wd_enabled, cpu))
188 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
189 report_broken_nmi(cpu, prev_nmi_count);
192 if (!atomic_read(&nmi_active)) {
193 kfree(prev_nmi_count);
194 atomic_set(&nmi_active, -1);
202 kfree(prev_nmi_count);
205 on_each_cpu(stop_nmi_watchdog, NULL, 1);
209 void start_nmi_watchdog(void *unused)
211 __get_cpu_var(wd_enabled) = 1;
212 atomic_inc(&nmi_active);
214 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
215 pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
217 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
220 static void nmi_adjust_hz_one(void *unused)
222 if (!__get_cpu_var(wd_enabled))
225 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
226 pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
228 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
231 void nmi_adjust_hz(unsigned int new_hz)
234 on_each_cpu(nmi_adjust_hz_one, NULL, 1);
236 EXPORT_SYMBOL_GPL(nmi_adjust_hz);
238 static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
240 on_each_cpu(stop_nmi_watchdog, NULL, 1);
244 static struct notifier_block nmi_reboot_notifier = {
245 .notifier_call = nmi_shutdown,
248 int __init nmi_init(void)
252 on_each_cpu(start_nmi_watchdog, NULL, 1);
254 err = check_nmi_watchdog();
256 err = register_reboot_notifier(&nmi_reboot_notifier);
258 on_each_cpu(stop_nmi_watchdog, NULL, 1);
259 atomic_set(&nmi_active, -1);
266 static int __init setup_nmi_watchdog(char *str)
268 if (!strncmp(str, "panic", 5))
269 panic_on_timeout = 1;
273 __setup("nmi_watchdog=", setup_nmi_watchdog);