2 * Machine check handler.
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
10 #include <linux/thread_info.h>
11 #include <linux/capability.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ratelimit.h>
14 #include <linux/kallsyms.h>
15 #include <linux/rcupdate.h>
16 #include <linux/smp_lock.h>
17 #include <linux/kobject.h>
18 #include <linux/kdebug.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/string.h>
22 #include <linux/sysdev.h>
23 #include <linux/ctype.h>
24 #include <linux/sched.h>
25 #include <linux/sysfs.h>
26 #include <linux/types.h>
27 #include <linux/init.h>
28 #include <linux/kmod.h>
29 #include <linux/poll.h>
30 #include <linux/cpu.h>
33 #include <asm/processor.h>
34 #include <asm/uaccess.h>
42 /* Handle unconfigured int18 (should never happen) */
43 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
45 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
49 /* Call the installed machine check handler for this CPU setup. */
50 void (*machine_check_vector)(struct pt_regs *, long error_code) =
51 unexpected_machine_check;
55 #ifdef CONFIG_X86_NEW_MCE
57 #define MISC_MCELOG_MINOR 227
63 * 0: always panic on uncorrected errors, log corrected errors
64 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
65 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
66 * 3: never panic or SIGBUS, log all errors (for testing only)
68 static int tolerant = 1;
71 static unsigned long notify_user;
73 static int mce_bootlog = -1;
74 static atomic_t mce_events;
76 static char trigger[128];
77 static char *trigger_argv[2] = { trigger, NULL };
79 static unsigned long dont_init_banks;
81 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
83 /* MCA banks polled by the period polling timer for corrected events */
84 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
85 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
88 static inline int skip_bank_init(int i)
90 return i < BITS_PER_LONG && test_bit(i, &dont_init_banks);
93 /* Do initial initialization of a struct mce */
94 void mce_setup(struct mce *m)
96 memset(m, 0, sizeof(struct mce));
97 m->cpu = smp_processor_id();
102 * Lockless MCE logging infrastructure.
103 * This avoids deadlocks on printk locks without having to break locks. Also
104 * separate MCEs from kernel messages to avoid bogus bug reports.
107 static struct mce_log mcelog = {
112 void mce_log(struct mce *mce)
114 unsigned next, entry;
116 atomic_inc(&mce_events);
120 entry = rcu_dereference(mcelog.next);
123 * When the buffer fills up discard new entries.
124 * Assume that the earlier errors are the more
127 if (entry >= MCE_LOG_LEN) {
128 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
131 /* Old left over entry. Skip: */
132 if (mcelog.entry[entry].finished) {
140 if (cmpxchg(&mcelog.next, entry, next) == entry)
143 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
145 mcelog.entry[entry].finished = 1;
148 set_bit(0, ¬ify_user);
151 static void print_mce(struct mce *m)
153 printk(KERN_EMERG "\n"
154 KERN_EMERG "HARDWARE ERROR\n"
156 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
157 m->cpu, m->mcgstatus, m->bank, m->status);
159 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
160 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
162 if (m->cs == __KERNEL_CS)
163 print_symbol("{%s}", m->ip);
166 printk(KERN_EMERG "TSC %llx ", m->tsc);
168 printk("ADDR %llx ", m->addr);
170 printk("MISC %llx ", m->misc);
172 printk(KERN_EMERG "This is not a software problem!\n");
173 printk(KERN_EMERG "Run through mcelog --ascii to decode "
174 "and contact your hardware vendor\n");
177 static void mce_panic(char *msg, struct mce *backup, u64 start)
183 for (i = 0; i < MCE_LOG_LEN; i++) {
184 u64 tsc = mcelog.entry[i].tsc;
186 if ((s64)(tsc - start) < 0)
188 print_mce(&mcelog.entry[i]);
189 if (backup && mcelog.entry[i].tsc == backup->tsc)
197 /* MSR access wrappers used for error injection */
198 static u64 mce_rdmsrl(u32 msr)
205 static void mce_wrmsrl(u32 msr, u64 v)
210 int mce_available(struct cpuinfo_x86 *c)
214 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
217 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
219 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
227 /* Assume the RIP in the MSR is exact. Is this true? */
228 m->mcgstatus |= MCG_STATUS_EIPV;
229 m->ip = mce_rdmsrl(rip_msr);
235 * Poll for corrected events or events that happened before reset.
236 * Those are just logged through /dev/mcelog.
238 * This is executed in standard interrupt context.
240 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
247 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
248 for (i = 0; i < banks; i++) {
249 if (!bank[i] || !test_bit(i, *b))
258 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
259 if (!(m.status & MCI_STATUS_VAL))
263 * Uncorrected events are handled by the exception handler
264 * when it is enabled. But when the exception is disabled log
267 * TBD do the same check for MCI_STATUS_EN here?
269 if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
272 if (m.status & MCI_STATUS_MISCV)
273 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
274 if (m.status & MCI_STATUS_ADDRV)
275 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
277 if (!(flags & MCP_TIMESTAMP))
280 * Don't get the IP here because it's unlikely to
281 * have anything to do with the actual error location.
283 if (!(flags & MCP_DONTLOG)) {
285 add_taint(TAINT_MACHINE_CHECK);
289 * Clear state for this bank.
291 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
295 * Don't clear MCG_STATUS here because it's only defined for
301 * The actual machine check handler. This only handles real
302 * exceptions when something got corrupted coming in through int 18.
304 * This is executed in NMI context not subject to normal locking rules. This
305 * implies that most kernel services cannot be safely used. Don't even
306 * think about putting a printk in there!
308 void do_machine_check(struct pt_regs *regs, long error_code)
310 struct mce m, panicm;
311 int panicm_found = 0;
315 * If no_way_out gets set, there is no safe way to recover from this
316 * MCE. If tolerant is cranked up, we'll try anyway.
320 * If kill_it gets set, there might be a way to recover from this
324 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
326 atomic_inc(&mce_entry);
328 if (notify_die(DIE_NMI, "machine check", regs, error_code,
329 18, SIGKILL) == NOTIFY_STOP)
336 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
338 /* if the restart IP is not valid, we're done for */
339 if (!(m.mcgstatus & MCG_STATUS_RIPV))
345 for (i = 0; i < banks; i++) {
346 __clear_bit(i, toclear);
354 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
355 if ((m.status & MCI_STATUS_VAL) == 0)
359 * Non uncorrected errors are handled by machine_check_poll
362 if ((m.status & MCI_STATUS_UC) == 0)
366 * Set taint even when machine check was not enabled.
368 add_taint(TAINT_MACHINE_CHECK);
370 __set_bit(i, toclear);
372 if (m.status & MCI_STATUS_EN) {
373 /* if PCC was set, there's no way out */
374 no_way_out |= !!(m.status & MCI_STATUS_PCC);
376 * If this error was uncorrectable and there was
377 * an overflow, we're in trouble. If no overflow,
378 * we might get away with just killing a task.
380 if (m.status & MCI_STATUS_UC) {
381 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
387 * Machine check event was not enabled. Clear, but
393 if (m.status & MCI_STATUS_MISCV)
394 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
395 if (m.status & MCI_STATUS_ADDRV)
396 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
398 mce_get_rip(&m, regs);
402 * Did this bank cause the exception?
404 * Assume that the bank with uncorrectable errors did it,
405 * and that there is only a single one:
407 if ((m.status & MCI_STATUS_UC) &&
408 (m.status & MCI_STATUS_EN)) {
415 * If we didn't find an uncorrectable error, pick
416 * the last one (shouldn't happen, just being safe).
422 * If we have decided that we just CAN'T continue, and the user
423 * has not set tolerant to an insane level, give up and die.
425 if (no_way_out && tolerant < 3)
426 mce_panic("Machine check", &panicm, mcestart);
429 * If the error seems to be unrecoverable, something should be
430 * done. Try to kill as little as possible. If we can kill just
431 * one task, do that. If the user has set the tolerance very
432 * high, don't try to do anything at all.
434 if (kill_it && tolerant < 3) {
438 * If the EIPV bit is set, it means the saved IP is the
439 * instruction which caused the MCE.
441 if (m.mcgstatus & MCG_STATUS_EIPV)
442 user_space = panicm.ip && (panicm.cs & 3);
445 * If we know that the error was in user space, send a
446 * SIGBUS. Otherwise, panic if tolerance is low.
448 * force_sig() takes an awful lot of locks and has a slight
449 * risk of deadlocking.
452 force_sig(SIGBUS, current);
453 } else if (panic_on_oops || tolerant < 2) {
454 mce_panic("Uncorrected machine check",
459 /* notify userspace ASAP */
460 set_thread_flag(TIF_MCE_NOTIFY);
462 /* the last thing we do is clear state */
463 for (i = 0; i < banks; i++) {
464 if (test_bit(i, toclear))
465 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
467 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
469 atomic_dec(&mce_entry);
472 #ifdef CONFIG_X86_MCE_INTEL
474 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
475 * @cpu: The CPU on which the event occurred.
476 * @status: Event status information
478 * This function should be called by the thermal interrupt after the
479 * event has been processed and the decision was made to log the event
482 * The status parameter will be saved to the 'status' field of 'struct mce'
483 * and historically has been the register value of the
484 * MSR_IA32_THERMAL_STATUS (Intel) msr.
486 void mce_log_therm_throt_event(__u64 status)
491 m.bank = MCE_THERMAL_BANK;
495 #endif /* CONFIG_X86_MCE_INTEL */
498 * Periodic polling timer for "silent" machine check errors. If the
499 * poller finds an MCE, poll 2x faster. When the poller finds no more
500 * errors, poll 2x slower (up to check_interval seconds).
502 static int check_interval = 5 * 60; /* 5 minutes */
504 static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
505 static DEFINE_PER_CPU(struct timer_list, mce_timer);
507 static void mcheck_timer(unsigned long data)
509 struct timer_list *t = &per_cpu(mce_timer, data);
512 WARN_ON(smp_processor_id() != data);
514 if (mce_available(¤t_cpu_data)) {
515 machine_check_poll(MCP_TIMESTAMP,
516 &__get_cpu_var(mce_poll_banks));
520 * Alert userspace if needed. If we logged an MCE, reduce the
521 * polling interval, otherwise increase the polling interval.
523 n = &__get_cpu_var(next_interval);
524 if (mce_notify_user()) {
525 *n = max(*n/2, HZ/100);
527 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
530 t->expires = jiffies + *n;
534 static void mce_do_trigger(struct work_struct *work)
536 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
539 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
542 * Notify the user(s) about new machine check events.
543 * Can be called from interrupt context, but not from machine check/NMI
546 int mce_notify_user(void)
548 /* Not more than two messages every minute */
549 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
551 clear_thread_flag(TIF_MCE_NOTIFY);
553 if (test_and_clear_bit(0, ¬ify_user)) {
554 wake_up_interruptible(&mce_wait);
557 * There is no risk of missing notifications because
558 * work_pending is always cleared before the function is
561 if (trigger[0] && !work_pending(&mce_trigger_work))
562 schedule_work(&mce_trigger_work);
564 if (__ratelimit(&ratelimit))
565 printk(KERN_INFO "Machine check events logged\n");
573 * Initialize Machine Checks for a CPU.
575 static int mce_cap_init(void)
580 rdmsrl(MSR_IA32_MCG_CAP, cap);
582 b = cap & MCG_BANKCNT_MASK;
583 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
585 if (b > MAX_NR_BANKS) {
587 "MCE: Using only %u machine check banks out of %u\n",
592 /* Don't support asymmetric configurations today */
593 WARN_ON(banks != 0 && b != banks);
596 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
599 memset(bank, 0xff, banks * sizeof(u64));
602 /* Use accurate RIP reporting if available. */
603 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
604 rip_msr = MSR_IA32_MCG_EIP;
609 static void mce_init(void *dummy)
611 mce_banks_t all_banks;
616 * Log the machine checks left over from the previous reset.
618 bitmap_fill(all_banks, MAX_NR_BANKS);
619 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
621 set_in_cr4(X86_CR4_MCE);
623 rdmsrl(MSR_IA32_MCG_CAP, cap);
625 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
627 for (i = 0; i < banks; i++) {
628 if (skip_bank_init(i))
630 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
631 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
635 /* Add per CPU specific workarounds here */
636 static void mce_cpu_quirks(struct cpuinfo_x86 *c)
638 /* This should be disabled by the BIOS, but isn't always */
639 if (c->x86_vendor == X86_VENDOR_AMD) {
640 if (c->x86 == 15 && banks > 4) {
642 * disable GART TBL walk error reporting, which
643 * trips off incorrectly with the IOMMU & 3ware
646 clear_bit(10, (unsigned long *)&bank[4]);
648 if (c->x86 <= 17 && mce_bootlog < 0) {
650 * Lots of broken BIOS around that don't clear them
651 * by default and leave crap in there. Don't log:
656 * Various K7s with broken bank 0 around. Always disable
663 if (c->x86_vendor == X86_VENDOR_INTEL) {
665 * SDM documents that on family 6 bank 0 should not be written
666 * because it aliases to another special BIOS controlled
668 * But it's not aliased anymore on model 0x1a+
669 * Don't ignore bank 0 completely because there could be a
670 * valid event later, merely don't write CTL0.
673 if (c->x86 == 6 && c->x86_model < 0x1A)
674 __set_bit(0, &dont_init_banks);
678 static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
682 switch (c->x86_vendor) {
683 case X86_VENDOR_INTEL:
684 if (mce_p5_enabled())
685 intel_p5_mcheck_init(c);
687 case X86_VENDOR_CENTAUR:
688 winchip_mcheck_init(c);
693 static void mce_cpu_features(struct cpuinfo_x86 *c)
695 switch (c->x86_vendor) {
696 case X86_VENDOR_INTEL:
697 mce_intel_feature_init(c);
700 mce_amd_feature_init(c);
707 static void mce_init_timer(void)
709 struct timer_list *t = &__get_cpu_var(mce_timer);
710 int *n = &__get_cpu_var(next_interval);
712 *n = check_interval * HZ;
715 setup_timer(t, mcheck_timer, smp_processor_id());
716 t->expires = round_jiffies(jiffies + *n);
721 * Called for each booted CPU to set up machine checks.
722 * Must be called with preempt off:
724 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
731 if (!mce_available(c))
734 if (mce_cap_init() < 0) {
740 machine_check_vector = do_machine_check;
748 * Character device to read and clear the MCE log.
751 static DEFINE_SPINLOCK(mce_state_lock);
752 static int open_count; /* #times opened */
753 static int open_exclu; /* already open exclusive? */
755 static int mce_open(struct inode *inode, struct file *file)
758 spin_lock(&mce_state_lock);
760 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
761 spin_unlock(&mce_state_lock);
767 if (file->f_flags & O_EXCL)
771 spin_unlock(&mce_state_lock);
774 return nonseekable_open(inode, file);
777 static int mce_release(struct inode *inode, struct file *file)
779 spin_lock(&mce_state_lock);
784 spin_unlock(&mce_state_lock);
789 static void collect_tscs(void *data)
791 unsigned long *cpu_tsc = (unsigned long *)data;
793 rdtscll(cpu_tsc[smp_processor_id()]);
796 static DEFINE_MUTEX(mce_read_mutex);
798 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
801 char __user *buf = ubuf;
802 unsigned long *cpu_tsc;
806 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
810 mutex_lock(&mce_read_mutex);
811 next = rcu_dereference(mcelog.next);
813 /* Only supports full reads right now */
814 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
815 mutex_unlock(&mce_read_mutex);
824 for (i = prev; i < next; i++) {
825 unsigned long start = jiffies;
827 while (!mcelog.entry[i].finished) {
828 if (time_after_eq(jiffies, start + 2)) {
829 memset(mcelog.entry + i, 0,
836 err |= copy_to_user(buf, mcelog.entry + i,
838 buf += sizeof(struct mce);
843 memset(mcelog.entry + prev, 0,
844 (next - prev) * sizeof(struct mce));
846 next = cmpxchg(&mcelog.next, prev, 0);
847 } while (next != prev);
852 * Collect entries that were still getting written before the
855 on_each_cpu(collect_tscs, cpu_tsc, 1);
857 for (i = next; i < MCE_LOG_LEN; i++) {
858 if (mcelog.entry[i].finished &&
859 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
860 err |= copy_to_user(buf, mcelog.entry+i,
863 buf += sizeof(struct mce);
864 memset(&mcelog.entry[i], 0, sizeof(struct mce));
867 mutex_unlock(&mce_read_mutex);
870 return err ? -EFAULT : buf - ubuf;
873 static unsigned int mce_poll(struct file *file, poll_table *wait)
875 poll_wait(file, &mce_wait, wait);
876 if (rcu_dereference(mcelog.next))
877 return POLLIN | POLLRDNORM;
881 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
883 int __user *p = (int __user *)arg;
885 if (!capable(CAP_SYS_ADMIN))
889 case MCE_GET_RECORD_LEN:
890 return put_user(sizeof(struct mce), p);
891 case MCE_GET_LOG_LEN:
892 return put_user(MCE_LOG_LEN, p);
893 case MCE_GETCLEAR_FLAGS: {
897 flags = mcelog.flags;
898 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
900 return put_user(flags, p);
907 static const struct file_operations mce_chrdev_ops = {
909 .release = mce_release,
912 .unlocked_ioctl = mce_ioctl,
915 static struct miscdevice mce_log_device = {
922 * mce=off disables machine check
923 * mce=TOLERANCELEVEL (number, see above)
924 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
925 * mce=nobootlog Don't log MCEs from before booting.
927 static int __init mcheck_enable(char *str)
933 if (!strcmp(str, "off"))
935 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
936 mce_bootlog = (str[0] == 'b');
937 else if (isdigit(str[0]))
938 get_option(&str, &tolerant);
940 printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
946 __setup("mce", mcheck_enable);
953 * Disable machine checks on suspend and shutdown. We can't really handle
956 static int mce_disable(void)
960 for (i = 0; i < banks; i++) {
961 if (!skip_bank_init(i))
962 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
967 static int mce_suspend(struct sys_device *dev, pm_message_t state)
969 return mce_disable();
972 static int mce_shutdown(struct sys_device *dev)
974 return mce_disable();
978 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
979 * Only one CPU is active at this time, the others get re-added later using
982 static int mce_resume(struct sys_device *dev)
985 mce_cpu_features(¤t_cpu_data);
990 static void mce_cpu_restart(void *data)
992 del_timer_sync(&__get_cpu_var(mce_timer));
993 if (mce_available(¤t_cpu_data))
998 /* Reinit MCEs after user configuration changes */
999 static void mce_restart(void)
1001 on_each_cpu(mce_cpu_restart, NULL, 1);
1004 static struct sysdev_class mce_sysclass = {
1005 .suspend = mce_suspend,
1006 .shutdown = mce_shutdown,
1007 .resume = mce_resume,
1008 .name = "machinecheck",
1011 DEFINE_PER_CPU(struct sys_device, mce_dev);
1014 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1016 /* Why are there no generic functions for this? */
1017 #define ACCESSOR(name, var, start) \
1018 static ssize_t show_ ## name(struct sys_device *s, \
1019 struct sysdev_attribute *attr, \
1021 return sprintf(buf, "%Lx\n", (u64)var); \
1023 static ssize_t set_ ## name(struct sys_device *s, \
1024 struct sysdev_attribute *attr, \
1025 const char *buf, size_t siz) { \
1027 u64 new = simple_strtoull(buf, &end, 0); \
1036 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
1038 static struct sysdev_attribute *bank_attrs;
1040 static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
1043 u64 b = bank[attr - bank_attrs];
1045 return sprintf(buf, "%llx\n", b);
1048 static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
1049 const char *buf, size_t siz)
1052 u64 new = simple_strtoull(buf, &end, 0);
1057 bank[attr - bank_attrs] = new;
1064 show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
1066 strcpy(buf, trigger);
1068 return strlen(trigger) + 1;
1071 static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
1072 const char *buf, size_t siz)
1077 strncpy(trigger, buf, sizeof(trigger));
1078 trigger[sizeof(trigger)-1] = 0;
1079 len = strlen(trigger);
1080 p = strchr(trigger, '\n');
1088 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
1089 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
1091 ACCESSOR(check_interval, check_interval, mce_restart())
1093 static struct sysdev_attribute *mce_attrs[] = {
1094 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
1098 static cpumask_var_t mce_dev_initialized;
1100 /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
1101 static __cpuinit int mce_create_device(unsigned int cpu)
1106 if (!mce_available(&boot_cpu_data))
1109 memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
1110 per_cpu(mce_dev, cpu).id = cpu;
1111 per_cpu(mce_dev, cpu).cls = &mce_sysclass;
1113 err = sysdev_register(&per_cpu(mce_dev, cpu));
1117 for (i = 0; mce_attrs[i]; i++) {
1118 err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1122 for (i = 0; i < banks; i++) {
1123 err = sysdev_create_file(&per_cpu(mce_dev, cpu),
1128 cpumask_set_cpu(cpu, mce_dev_initialized);
1133 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1136 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1138 sysdev_unregister(&per_cpu(mce_dev, cpu));
1143 static __cpuinit void mce_remove_device(unsigned int cpu)
1147 if (!cpumask_test_cpu(cpu, mce_dev_initialized))
1150 for (i = 0; mce_attrs[i]; i++)
1151 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1153 for (i = 0; i < banks; i++)
1154 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1156 sysdev_unregister(&per_cpu(mce_dev, cpu));
1157 cpumask_clear_cpu(cpu, mce_dev_initialized);
1160 /* Make sure there are no machine checks on offlined CPUs. */
1161 static void mce_disable_cpu(void *h)
1163 unsigned long action = *(unsigned long *)h;
1166 if (!mce_available(¤t_cpu_data))
1168 if (!(action & CPU_TASKS_FROZEN))
1170 for (i = 0; i < banks; i++) {
1171 if (!skip_bank_init(i))
1172 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1176 static void mce_reenable_cpu(void *h)
1178 unsigned long action = *(unsigned long *)h;
1181 if (!mce_available(¤t_cpu_data))
1184 if (!(action & CPU_TASKS_FROZEN))
1186 for (i = 0; i < banks; i++) {
1187 if (!skip_bank_init(i))
1188 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1192 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
1193 static int __cpuinit
1194 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1196 unsigned int cpu = (unsigned long)hcpu;
1197 struct timer_list *t = &per_cpu(mce_timer, cpu);
1201 case CPU_ONLINE_FROZEN:
1202 mce_create_device(cpu);
1203 if (threshold_cpu_callback)
1204 threshold_cpu_callback(action, cpu);
1207 case CPU_DEAD_FROZEN:
1208 if (threshold_cpu_callback)
1209 threshold_cpu_callback(action, cpu);
1210 mce_remove_device(cpu);
1212 case CPU_DOWN_PREPARE:
1213 case CPU_DOWN_PREPARE_FROZEN:
1215 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
1217 case CPU_DOWN_FAILED:
1218 case CPU_DOWN_FAILED_FROZEN:
1219 t->expires = round_jiffies(jiffies +
1220 __get_cpu_var(next_interval));
1221 add_timer_on(t, cpu);
1222 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1225 /* intentionally ignoring frozen here */
1226 cmci_rediscover(cpu);
1232 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
1233 .notifier_call = mce_cpu_callback,
1236 static __init int mce_init_banks(void)
1240 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1245 for (i = 0; i < banks; i++) {
1246 struct sysdev_attribute *a = &bank_attrs[i];
1248 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
1252 a->attr.mode = 0644;
1253 a->show = show_bank;
1254 a->store = set_bank;
1260 kfree(bank_attrs[i].attr.name);
1267 static __init int mce_init_device(void)
1272 if (!mce_available(&boot_cpu_data))
1275 alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
1277 err = mce_init_banks();
1281 err = sysdev_class_register(&mce_sysclass);
1285 for_each_online_cpu(i) {
1286 err = mce_create_device(i);
1291 register_hotcpu_notifier(&mce_cpu_notifier);
1292 misc_register(&mce_log_device);
1297 device_initcall(mce_init_device);
1299 #else /* CONFIG_X86_OLD_MCE: */
1302 EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
1304 /* This has to be run for each processor */
1305 void mcheck_init(struct cpuinfo_x86 *c)
1307 if (mce_disabled == 1)
1310 switch (c->x86_vendor) {
1311 case X86_VENDOR_AMD:
1315 case X86_VENDOR_INTEL:
1317 intel_p5_mcheck_init(c);
1319 intel_p6_mcheck_init(c);
1321 intel_p4_mcheck_init(c);
1324 case X86_VENDOR_CENTAUR:
1326 winchip_mcheck_init(c);
1332 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks);
1335 static int __init mcheck_enable(char *str)
1341 __setup("mce", mcheck_enable);
1343 #endif /* CONFIG_X86_OLD_MCE */
1346 * Old style boot options parsing. Only for compatibility.
1348 static int __init mcheck_disable(char *str)
1353 __setup("nomce", mcheck_disable);