2 * Machine check handler.
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/thread_info.h>
14 #include <linux/capability.h>
15 #include <linux/miscdevice.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kallsyms.h>
18 #include <linux/rcupdate.h>
19 #include <linux/kobject.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kernel.h>
23 #include <linux/percpu.h>
24 #include <linux/string.h>
25 #include <linux/device.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/delay.h>
28 #include <linux/ctype.h>
29 #include <linux/sched.h>
30 #include <linux/sysfs.h>
31 #include <linux/types.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/kmod.h>
35 #include <linux/poll.h>
36 #include <linux/nmi.h>
37 #include <linux/cpu.h>
38 #include <linux/smp.h>
41 #include <linux/debugfs.h>
42 #include <linux/irq_work.h>
43 #include <linux/export.h>
45 #include <asm/processor.h>
46 #include <asm/traps.h>
47 #include <asm/tlbflush.h>
51 #include "mce-internal.h"
53 static DEFINE_MUTEX(mce_chrdev_read_mutex);
55 #define mce_log_get_idx_check(p) \
57 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
58 !lockdep_is_held(&mce_chrdev_read_mutex), \
59 "suspicious mce_log_get_idx_check() usage"); \
60 smp_load_acquire(&(p)); \
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/mce.h>
66 #define SPINUNIT 100 /* 100ns */
68 DEFINE_PER_CPU(unsigned, mce_exception_count);
70 struct mce_bank *mce_banks __read_mostly;
71 struct mce_vendor_flags mce_flags __read_mostly;
73 struct mca_config mca_cfg __read_mostly = {
77 * 0: always panic on uncorrected errors, log corrected errors
78 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
79 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
80 * 3: never panic or SIGBUS, log all errors (for testing only)
86 /* User mode helper program triggered by machine check event */
87 static unsigned long mce_need_notify;
88 static char mce_helper[128];
89 static char *mce_helper_argv[2] = { mce_helper, NULL };
91 static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
93 static DEFINE_PER_CPU(struct mce, mces_seen);
94 static int cpu_missing;
97 * MCA banks polled by the period polling timer for corrected events.
98 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
100 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
101 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
105 * MCA banks controlled through firmware first for corrected errors.
106 * This is a global list of banks for which we won't enable CMCI and we
107 * won't poll. Firmware controls these banks and is responsible for
108 * reporting corrected errors through GHES. Uncorrected/recoverable
109 * errors are still notified through a machine check.
111 mce_banks_t mce_banks_ce_disabled;
113 static struct work_struct mce_work;
114 static struct irq_work mce_irq_work;
116 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
119 * CPU/chipset specific EDAC code can register a notifier call here to print
120 * MCE errors in a human-readable form.
122 ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
124 /* Do initial initialization of a struct mce */
125 void mce_setup(struct mce *m)
127 memset(m, 0, sizeof(struct mce));
128 m->cpu = m->extcpu = smp_processor_id();
130 /* We hope get_seconds stays lockless */
131 m->time = get_seconds();
132 m->cpuvendor = boot_cpu_data.x86_vendor;
133 m->cpuid = cpuid_eax(1);
134 m->socketid = cpu_data(m->extcpu).phys_proc_id;
135 m->apicid = cpu_data(m->extcpu).initial_apicid;
136 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
139 DEFINE_PER_CPU(struct mce, injectm);
140 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
143 * Lockless MCE logging infrastructure.
144 * This avoids deadlocks on printk locks without having to break locks. Also
145 * separate MCEs from kernel messages to avoid bogus bug reports.
148 static struct mce_log mcelog = {
149 .signature = MCE_LOG_SIGNATURE,
151 .recordlen = sizeof(struct mce),
154 void mce_log(struct mce *mce)
156 unsigned next, entry;
158 /* Emit the trace record: */
159 trace_mce_record(mce);
161 if (!mce_gen_pool_add(mce))
162 irq_work_queue(&mce_irq_work);
166 entry = mce_log_get_idx_check(mcelog.next);
170 * When the buffer fills up discard new entries.
171 * Assume that the earlier errors are the more
174 if (entry >= MCE_LOG_LEN) {
175 set_bit(MCE_OVERFLOW,
176 (unsigned long *)&mcelog.flags);
179 /* Old left over entry. Skip: */
180 if (mcelog.entry[entry].finished) {
188 if (cmpxchg(&mcelog.next, entry, next) == entry)
191 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
193 mcelog.entry[entry].finished = 1;
196 set_bit(0, &mce_need_notify);
199 void mce_inject_log(struct mce *m)
201 mutex_lock(&mce_chrdev_read_mutex);
203 mutex_unlock(&mce_chrdev_read_mutex);
205 EXPORT_SYMBOL_GPL(mce_inject_log);
207 static struct notifier_block mce_srao_nb;
209 void mce_register_decode_chain(struct notifier_block *nb)
211 /* Ensure SRAO notifier has the highest priority in the decode chain. */
212 if (nb != &mce_srao_nb && nb->priority == INT_MAX)
215 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
217 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
219 void mce_unregister_decode_chain(struct notifier_block *nb)
221 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
223 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
225 static inline u32 ctl_reg(int bank)
227 return MSR_IA32_MCx_CTL(bank);
230 static inline u32 status_reg(int bank)
232 return MSR_IA32_MCx_STATUS(bank);
235 static inline u32 addr_reg(int bank)
237 return MSR_IA32_MCx_ADDR(bank);
240 static inline u32 misc_reg(int bank)
242 return MSR_IA32_MCx_MISC(bank);
245 static inline u32 smca_ctl_reg(int bank)
247 return MSR_AMD64_SMCA_MCx_CTL(bank);
250 static inline u32 smca_status_reg(int bank)
252 return MSR_AMD64_SMCA_MCx_STATUS(bank);
255 static inline u32 smca_addr_reg(int bank)
257 return MSR_AMD64_SMCA_MCx_ADDR(bank);
260 static inline u32 smca_misc_reg(int bank)
262 return MSR_AMD64_SMCA_MCx_MISC(bank);
265 struct mca_msr_regs msr_ops = {
267 .status = status_reg,
272 static void print_mce(struct mce *m)
276 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
277 m->extcpu, m->mcgstatus, m->bank, m->status);
280 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
281 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
284 if (m->cs == __KERNEL_CS)
285 print_symbol("{%s}", m->ip);
289 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
291 pr_cont("ADDR %llx ", m->addr);
293 pr_cont("MISC %llx ", m->misc);
297 * Note this output is parsed by external tools and old fields
298 * should not be changed.
300 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
301 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
302 cpu_data(m->extcpu).microcode);
305 * Print out human-readable details about the MCE error,
306 * (if the CPU has an implementation for that)
308 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
309 if (ret == NOTIFY_STOP)
312 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
315 #define PANIC_TIMEOUT 5 /* 5 seconds */
317 static atomic_t mce_panicked;
319 static int fake_panic;
320 static atomic_t mce_fake_panicked;
322 /* Panic in progress. Enable interrupts and wait for final IPI */
323 static void wait_for_panic(void)
325 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
329 while (timeout-- > 0)
331 if (panic_timeout == 0)
332 panic_timeout = mca_cfg.panic_timeout;
333 panic("Panicing machine check CPU died");
336 static void mce_panic(const char *msg, struct mce *final, char *exp)
339 struct llist_node *pending;
340 struct mce_evt_llist *l;
344 * Make sure only one CPU runs in machine check panic
346 if (atomic_inc_return(&mce_panicked) > 1)
353 /* Don't log too much for fake panic */
354 if (atomic_inc_return(&mce_fake_panicked) > 1)
357 pending = mce_gen_pool_prepare_records();
358 /* First print corrected ones that are still unlogged */
359 llist_for_each_entry(l, pending, llnode) {
360 struct mce *m = &l->mce;
361 if (!(m->status & MCI_STATUS_UC)) {
364 apei_err = apei_write_mce(m);
367 /* Now print uncorrected but with the final one last */
368 llist_for_each_entry(l, pending, llnode) {
369 struct mce *m = &l->mce;
370 if (!(m->status & MCI_STATUS_UC))
372 if (!final || mce_cmp(m, final)) {
375 apei_err = apei_write_mce(m);
381 apei_err = apei_write_mce(final);
384 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
386 pr_emerg(HW_ERR "Machine check: %s\n", exp);
388 if (panic_timeout == 0)
389 panic_timeout = mca_cfg.panic_timeout;
392 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
395 /* Support code for software error injection */
397 static int msr_to_offset(u32 msr)
399 unsigned bank = __this_cpu_read(injectm.bank);
401 if (msr == mca_cfg.rip_msr)
402 return offsetof(struct mce, ip);
403 if (msr == msr_ops.status(bank))
404 return offsetof(struct mce, status);
405 if (msr == msr_ops.addr(bank))
406 return offsetof(struct mce, addr);
407 if (msr == msr_ops.misc(bank))
408 return offsetof(struct mce, misc);
409 if (msr == MSR_IA32_MCG_STATUS)
410 return offsetof(struct mce, mcgstatus);
414 /* MSR access wrappers used for error injection */
415 static u64 mce_rdmsrl(u32 msr)
419 if (__this_cpu_read(injectm.finished)) {
420 int offset = msr_to_offset(msr);
424 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
427 if (rdmsrl_safe(msr, &v)) {
428 WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
430 * Return zero in case the access faulted. This should
431 * not happen normally but can happen if the CPU does
432 * something weird, or if the code is buggy.
440 static void mce_wrmsrl(u32 msr, u64 v)
442 if (__this_cpu_read(injectm.finished)) {
443 int offset = msr_to_offset(msr);
446 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
453 * Collect all global (w.r.t. this processor) status about this machine
454 * check into our "mce" struct so that we can use it later to assess
455 * the severity of the problem as we read per-bank specific details.
457 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
461 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
464 * Get the address of the instruction at the time of
465 * the machine check error.
467 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
472 * When in VM86 mode make the cs look like ring 3
473 * always. This is a lie, but it's better than passing
474 * the additional vm86 bit around everywhere.
476 if (v8086_mode(regs))
479 /* Use accurate RIP reporting if available. */
481 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
485 int mce_available(struct cpuinfo_x86 *c)
487 if (mca_cfg.disabled)
489 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
492 static void mce_schedule_work(void)
494 if (!mce_gen_pool_empty() && keventd_up())
495 schedule_work(&mce_work);
498 static void mce_irq_work_cb(struct irq_work *entry)
504 static void mce_report_event(struct pt_regs *regs)
506 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
509 * Triggering the work queue here is just an insurance
510 * policy in case the syscall exit notify handler
511 * doesn't run soon enough or ends up running on the
512 * wrong CPU (can happen when audit sleeps)
518 irq_work_queue(&mce_irq_work);
522 * Check if the address reported by the CPU is in a format we can parse.
523 * It would be possible to add code for most other cases, but all would
524 * be somewhat complicated (e.g. segment offset would require an instruction
525 * parser). So only support physical addresses up to page granuality for now.
527 static int mce_usable_address(struct mce *m)
529 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
532 /* Checks after this one are Intel-specific: */
533 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
536 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
538 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
543 static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
546 struct mce *mce = (struct mce *)data;
552 if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
553 pfn = mce->addr >> PAGE_SHIFT;
554 memory_failure(pfn, MCE_VECTOR, 0);
559 static struct notifier_block mce_srao_nb = {
560 .notifier_call = srao_decode_notifier,
565 * Read ADDR and MISC registers.
567 static void mce_read_aux(struct mce *m, int i)
569 if (m->status & MCI_STATUS_MISCV)
570 m->misc = mce_rdmsrl(msr_ops.misc(i));
571 if (m->status & MCI_STATUS_ADDRV) {
572 m->addr = mce_rdmsrl(msr_ops.addr(i));
575 * Mask the reported address by the reported granularity.
577 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
578 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
585 static bool memory_error(struct mce *m)
587 struct cpuinfo_x86 *c = &boot_cpu_data;
589 if (c->x86_vendor == X86_VENDOR_AMD) {
590 /* ErrCodeExt[20:16] */
591 u8 xec = (m->status >> 16) & 0x1f;
593 return (xec == 0x0 || xec == 0x8);
594 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
596 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
598 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
599 * indicating a memory error. Bit 8 is used for indicating a
600 * cache hierarchy error. The combination of bit 2 and bit 3
601 * is used for indicating a `generic' cache hierarchy error
602 * But we can't just blindly check the above bits, because if
603 * bit 11 is set, then it is a bus/interconnect error - and
604 * either way the above bits just gives more detail on what
605 * bus/interconnect error happened. Note that bit 12 can be
606 * ignored, as it's the "filter" bit.
608 return (m->status & 0xef80) == BIT(7) ||
609 (m->status & 0xef00) == BIT(8) ||
610 (m->status & 0xeffc) == 0xc;
616 DEFINE_PER_CPU(unsigned, mce_poll_count);
619 * Poll for corrected events or events that happened before reset.
620 * Those are just logged through /dev/mcelog.
622 * This is executed in standard interrupt context.
624 * Note: spec recommends to panic for fatal unsignalled
625 * errors here. However this would be quite problematic --
626 * we would need to reimplement the Monarch handling and
627 * it would mess up the exclusion between exception handler
628 * and poll hander -- * so we skip this for now.
629 * These cases should not happen anyways, or only when the CPU
630 * is already totally * confused. In this case it's likely it will
631 * not fully execute the machine check handler either.
633 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
635 bool error_seen = false;
640 this_cpu_inc(mce_poll_count);
642 mce_gather_info(&m, NULL);
644 for (i = 0; i < mca_cfg.banks; i++) {
645 if (!mce_banks[i].ctl || !test_bit(i, *b))
654 m.status = mce_rdmsrl(msr_ops.status(i));
655 if (!(m.status & MCI_STATUS_VAL))
660 * Uncorrected or signalled events are handled by the exception
661 * handler when it is enabled, so don't process those here.
663 * TBD do the same check for MCI_STATUS_EN here?
665 if (!(flags & MCP_UC) &&
666 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
673 if (!(flags & MCP_TIMESTAMP))
676 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
678 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
679 if (m.status & MCI_STATUS_ADDRV)
680 m.severity = severity;
683 * Don't get the IP here because it's unlikely to
684 * have anything to do with the actual error location.
686 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
688 else if (mce_usable_address(&m)) {
690 * Although we skipped logging this, we still want
691 * to take action. Add to the pool so the registered
692 * notifiers will see it.
694 if (!mce_gen_pool_add(&m))
699 * Clear state for this bank.
701 mce_wrmsrl(msr_ops.status(i), 0);
705 * Don't clear MCG_STATUS here because it's only defined for
713 EXPORT_SYMBOL_GPL(machine_check_poll);
716 * Do a quick check if any of the events requires a panic.
717 * This decides if we keep the events around or clear them.
719 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
720 struct pt_regs *regs)
725 for (i = 0; i < mca_cfg.banks; i++) {
726 m->status = mce_rdmsrl(msr_ops.status(i));
727 if (m->status & MCI_STATUS_VAL) {
728 __set_bit(i, validp);
729 if (quirk_no_way_out)
730 quirk_no_way_out(i, m, regs);
733 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
742 * Variable to establish order between CPUs while scanning.
743 * Each CPU spins initially until executing is equal its number.
745 static atomic_t mce_executing;
748 * Defines order of CPUs on entry. First CPU becomes Monarch.
750 static atomic_t mce_callin;
753 * Check if a timeout waiting for other CPUs happened.
755 static int mce_timed_out(u64 *t, const char *msg)
758 * The others already did panic for some reason.
759 * Bail out like in a timeout.
760 * rmb() to tell the compiler that system_state
761 * might have been modified by someone else.
764 if (atomic_read(&mce_panicked))
766 if (!mca_cfg.monarch_timeout)
768 if ((s64)*t < SPINUNIT) {
769 if (mca_cfg.tolerant <= 1)
770 mce_panic(msg, NULL, NULL);
776 touch_nmi_watchdog();
781 * The Monarch's reign. The Monarch is the CPU who entered
782 * the machine check handler first. It waits for the others to
783 * raise the exception too and then grades them. When any
784 * error is fatal panic. Only then let the others continue.
786 * The other CPUs entering the MCE handler will be controlled by the
787 * Monarch. They are called Subjects.
789 * This way we prevent any potential data corruption in a unrecoverable case
790 * and also makes sure always all CPU's errors are examined.
792 * Also this detects the case of a machine check event coming from outer
793 * space (not detected by any CPUs) In this case some external agent wants
794 * us to shut down, so panic too.
796 * The other CPUs might still decide to panic if the handler happens
797 * in a unrecoverable place, but in this case the system is in a semi-stable
798 * state and won't corrupt anything by itself. It's ok to let the others
799 * continue for a bit first.
801 * All the spin loops have timeouts; when a timeout happens a CPU
802 * typically elects itself to be Monarch.
804 static void mce_reign(void)
807 struct mce *m = NULL;
808 int global_worst = 0;
813 * This CPU is the Monarch and the other CPUs have run
814 * through their handlers.
815 * Grade the severity of the errors of all the CPUs.
817 for_each_possible_cpu(cpu) {
818 int severity = mce_severity(&per_cpu(mces_seen, cpu),
821 if (severity > global_worst) {
823 global_worst = severity;
824 m = &per_cpu(mces_seen, cpu);
829 * Cannot recover? Panic here then.
830 * This dumps all the mces in the log buffer and stops the
833 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
834 mce_panic("Fatal machine check", m, msg);
837 * For UC somewhere we let the CPU who detects it handle it.
838 * Also must let continue the others, otherwise the handling
839 * CPU could deadlock on a lock.
843 * No machine check event found. Must be some external
844 * source or one CPU is hung. Panic.
846 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
847 mce_panic("Fatal machine check from unknown source", NULL, NULL);
850 * Now clear all the mces_seen so that they don't reappear on
853 for_each_possible_cpu(cpu)
854 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
857 static atomic_t global_nwo;
860 * Start of Monarch synchronization. This waits until all CPUs have
861 * entered the exception handler and then determines if any of them
862 * saw a fatal event that requires panic. Then it executes them
863 * in the entry order.
864 * TBD double check parallel CPU hotunplug
866 static int mce_start(int *no_way_out)
869 int cpus = num_online_cpus();
870 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
875 atomic_add(*no_way_out, &global_nwo);
877 * Rely on the implied barrier below, such that global_nwo
878 * is updated before mce_callin.
880 order = atomic_inc_return(&mce_callin);
885 while (atomic_read(&mce_callin) != cpus) {
886 if (mce_timed_out(&timeout,
887 "Timeout: Not all CPUs entered broadcast exception handler")) {
888 atomic_set(&global_nwo, 0);
895 * mce_callin should be read before global_nwo
901 * Monarch: Starts executing now, the others wait.
903 atomic_set(&mce_executing, 1);
906 * Subject: Now start the scanning loop one by one in
907 * the original callin order.
908 * This way when there are any shared banks it will be
909 * only seen by one CPU before cleared, avoiding duplicates.
911 while (atomic_read(&mce_executing) < order) {
912 if (mce_timed_out(&timeout,
913 "Timeout: Subject CPUs unable to finish machine check processing")) {
914 atomic_set(&global_nwo, 0);
922 * Cache the global no_way_out state.
924 *no_way_out = atomic_read(&global_nwo);
930 * Synchronize between CPUs after main scanning loop.
931 * This invokes the bulk of the Monarch processing.
933 static int mce_end(int order)
936 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
944 * Allow others to run.
946 atomic_inc(&mce_executing);
949 /* CHECKME: Can this race with a parallel hotplug? */
950 int cpus = num_online_cpus();
953 * Monarch: Wait for everyone to go through their scanning
956 while (atomic_read(&mce_executing) <= cpus) {
957 if (mce_timed_out(&timeout,
958 "Timeout: Monarch CPU unable to finish machine check processing"))
968 * Subject: Wait for Monarch to finish.
970 while (atomic_read(&mce_executing) != 0) {
971 if (mce_timed_out(&timeout,
972 "Timeout: Monarch CPU did not finish machine check processing"))
978 * Don't reset anything. That's done by the Monarch.
984 * Reset all global state.
987 atomic_set(&global_nwo, 0);
988 atomic_set(&mce_callin, 0);
992 * Let others run again.
994 atomic_set(&mce_executing, 0);
998 static void mce_clear_state(unsigned long *toclear)
1002 for (i = 0; i < mca_cfg.banks; i++) {
1003 if (test_bit(i, toclear))
1004 mce_wrmsrl(msr_ops.status(i), 0);
1008 static int do_memory_failure(struct mce *m)
1010 int flags = MF_ACTION_REQUIRED;
1013 pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
1014 if (!(m->mcgstatus & MCG_STATUS_RIPV))
1015 flags |= MF_MUST_KILL;
1016 ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
1018 pr_err("Memory error not recovered");
1023 * The actual machine check handler. This only handles real
1024 * exceptions when something got corrupted coming in through int 18.
1026 * This is executed in NMI context not subject to normal locking rules. This
1027 * implies that most kernel services cannot be safely used. Don't even
1028 * think about putting a printk in there!
1030 * On Intel systems this is entered on all CPUs in parallel through
1031 * MCE broadcast. However some CPUs might be broken beyond repair,
1032 * so be always careful when synchronizing with others.
1034 void do_machine_check(struct pt_regs *regs, long error_code)
1036 struct mca_config *cfg = &mca_cfg;
1037 struct mce m, *final;
1043 * Establish sequential order between the CPUs entering the machine
1048 * If no_way_out gets set, there is no safe way to recover from this
1049 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1053 * If kill_it gets set, there might be a way to recover from this
1057 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1058 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1059 char *msg = "Unknown";
1062 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1067 /* If this CPU is offline, just bail out. */
1068 if (cpu_is_offline(smp_processor_id())) {
1071 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1072 if (mcgstatus & MCG_STATUS_RIPV) {
1073 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1080 this_cpu_inc(mce_exception_count);
1085 mce_gather_info(&m, regs);
1087 final = this_cpu_ptr(&mces_seen);
1090 memset(valid_banks, 0, sizeof(valid_banks));
1091 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1096 * When no restart IP might need to kill or panic.
1097 * Assume the worst for now, but if we find the
1098 * severity is MCE_AR_SEVERITY we have other options.
1100 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1104 * Check if this MCE is signaled to only this logical processor,
1107 if (m.cpuvendor == X86_VENDOR_INTEL)
1108 lmce = m.mcgstatus & MCG_STATUS_LMCES;
1111 * Go through all banks in exclusion of the other CPUs. This way we
1112 * don't report duplicated events on shared banks because the first one
1113 * to see it will clear it. If this is a Local MCE, then no need to
1114 * perform rendezvous.
1117 order = mce_start(&no_way_out);
1119 for (i = 0; i < cfg->banks; i++) {
1120 __clear_bit(i, toclear);
1121 if (!test_bit(i, valid_banks))
1123 if (!mce_banks[i].ctl)
1130 m.status = mce_rdmsrl(msr_ops.status(i));
1131 if ((m.status & MCI_STATUS_VAL) == 0)
1135 * Non uncorrected or non signaled errors are handled by
1136 * machine_check_poll. Leave them alone, unless this panics.
1138 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1143 * Set taint even when machine check was not enabled.
1145 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1147 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1150 * When machine check was for corrected/deferred handler don't
1151 * touch, unless we're panicing.
1153 if ((severity == MCE_KEEP_SEVERITY ||
1154 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1156 __set_bit(i, toclear);
1157 if (severity == MCE_NO_SEVERITY) {
1159 * Machine check event was not enabled. Clear, but
1165 mce_read_aux(&m, i);
1167 /* assuming valid severity level != 0 */
1168 m.severity = severity;
1172 if (severity > worst) {
1178 /* mce_clear_state will clear *final, save locally for use later */
1182 mce_clear_state(toclear);
1185 * Do most of the synchronization with other CPUs.
1186 * When there's any problem use only local no_way_out state.
1189 if (mce_end(order) < 0)
1190 no_way_out = worst >= MCE_PANIC_SEVERITY;
1193 * Local MCE skipped calling mce_reign()
1194 * If we found a fatal error, we need to panic here.
1196 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1197 mce_panic("Machine check from unknown source",
1202 * If tolerant is at an insane level we drop requests to kill
1203 * processes and continue even when there is no way out.
1205 if (cfg->tolerant == 3)
1207 else if (no_way_out)
1208 mce_panic("Fatal machine check on current CPU", &m, msg);
1211 mce_report_event(regs);
1212 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1216 if (worst != MCE_AR_SEVERITY && !kill_it)
1219 /* Fault was in user mode and we need to take some action */
1220 if ((m.cs & 3) == 3) {
1221 ist_begin_non_atomic(regs);
1224 if (kill_it || do_memory_failure(&m))
1225 force_sig(SIGBUS, current);
1226 local_irq_disable();
1227 ist_end_non_atomic();
1229 if (!fixup_exception(regs, X86_TRAP_MC))
1230 mce_panic("Failed kernel mode recovery", &m, NULL);
1236 EXPORT_SYMBOL_GPL(do_machine_check);
1238 #ifndef CONFIG_MEMORY_FAILURE
1239 int memory_failure(unsigned long pfn, int vector, int flags)
1241 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1242 BUG_ON(flags & MF_ACTION_REQUIRED);
1243 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1244 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1252 * Action optional processing happens here (picking up
1253 * from the list of faulting pages that do_machine_check()
1254 * placed into the genpool).
1256 static void mce_process_work(struct work_struct *dummy)
1258 mce_gen_pool_process();
1261 #ifdef CONFIG_X86_MCE_INTEL
1263 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1264 * @cpu: The CPU on which the event occurred.
1265 * @status: Event status information
1267 * This function should be called by the thermal interrupt after the
1268 * event has been processed and the decision was made to log the event
1271 * The status parameter will be saved to the 'status' field of 'struct mce'
1272 * and historically has been the register value of the
1273 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1275 void mce_log_therm_throt_event(__u64 status)
1280 m.bank = MCE_THERMAL_BANK;
1284 #endif /* CONFIG_X86_MCE_INTEL */
1287 * Periodic polling timer for "silent" machine check errors. If the
1288 * poller finds an MCE, poll 2x faster. When the poller finds no more
1289 * errors, poll 2x slower (up to check_interval seconds).
1291 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1293 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1294 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1296 static unsigned long mce_adjust_timer_default(unsigned long interval)
1301 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1303 static void __restart_timer(struct timer_list *t, unsigned long interval)
1305 unsigned long when = jiffies + interval;
1306 unsigned long flags;
1308 local_irq_save(flags);
1310 if (timer_pending(t)) {
1311 if (time_before(when, t->expires))
1314 t->expires = round_jiffies(when);
1315 add_timer_on(t, smp_processor_id());
1318 local_irq_restore(flags);
1321 static void mce_timer_fn(unsigned long data)
1323 struct timer_list *t = this_cpu_ptr(&mce_timer);
1324 int cpu = smp_processor_id();
1327 WARN_ON(cpu != data);
1329 iv = __this_cpu_read(mce_next_interval);
1331 if (mce_available(this_cpu_ptr(&cpu_info))) {
1332 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1334 if (mce_intel_cmci_poll()) {
1335 iv = mce_adjust_timer(iv);
1341 * Alert userspace if needed. If we logged an MCE, reduce the polling
1342 * interval, otherwise increase the polling interval.
1344 if (mce_notify_irq())
1345 iv = max(iv / 2, (unsigned long) HZ/100);
1347 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1350 __this_cpu_write(mce_next_interval, iv);
1351 __restart_timer(t, iv);
1355 * Ensure that the timer is firing in @interval from now.
1357 void mce_timer_kick(unsigned long interval)
1359 struct timer_list *t = this_cpu_ptr(&mce_timer);
1360 unsigned long iv = __this_cpu_read(mce_next_interval);
1362 __restart_timer(t, interval);
1365 __this_cpu_write(mce_next_interval, interval);
1368 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1369 static void mce_timer_delete_all(void)
1373 for_each_online_cpu(cpu)
1374 del_timer_sync(&per_cpu(mce_timer, cpu));
1377 static void mce_do_trigger(struct work_struct *work)
1379 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1382 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1385 * Notify the user(s) about new machine check events.
1386 * Can be called from interrupt context, but not from machine check/NMI
1389 int mce_notify_irq(void)
1391 /* Not more than two messages every minute */
1392 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1394 if (test_and_clear_bit(0, &mce_need_notify)) {
1395 /* wake processes polling /dev/mcelog */
1396 wake_up_interruptible(&mce_chrdev_wait);
1399 schedule_work(&mce_trigger_work);
1401 if (__ratelimit(&ratelimit))
1402 pr_info(HW_ERR "Machine check events logged\n");
1408 EXPORT_SYMBOL_GPL(mce_notify_irq);
1410 static int __mcheck_cpu_mce_banks_init(void)
1413 u8 num_banks = mca_cfg.banks;
1415 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1419 for (i = 0; i < num_banks; i++) {
1420 struct mce_bank *b = &mce_banks[i];
1429 * Initialize Machine Checks for a CPU.
1431 static int __mcheck_cpu_cap_init(void)
1436 rdmsrl(MSR_IA32_MCG_CAP, cap);
1438 b = cap & MCG_BANKCNT_MASK;
1440 pr_info("CPU supports %d MCE banks\n", b);
1442 if (b > MAX_NR_BANKS) {
1443 pr_warn("Using only %u machine check banks out of %u\n",
1448 /* Don't support asymmetric configurations today */
1449 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1453 int err = __mcheck_cpu_mce_banks_init();
1459 /* Use accurate RIP reporting if available. */
1460 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1461 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1463 if (cap & MCG_SER_P)
1469 static void __mcheck_cpu_init_generic(void)
1471 enum mcp_flags m_fl = 0;
1472 mce_banks_t all_banks;
1475 if (!mca_cfg.bootlog)
1479 * Log the machine checks left over from the previous reset.
1481 bitmap_fill(all_banks, MAX_NR_BANKS);
1482 machine_check_poll(MCP_UC | m_fl, &all_banks);
1484 cr4_set_bits(X86_CR4_MCE);
1486 rdmsrl(MSR_IA32_MCG_CAP, cap);
1487 if (cap & MCG_CTL_P)
1488 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1491 static void __mcheck_cpu_init_clear_banks(void)
1495 for (i = 0; i < mca_cfg.banks; i++) {
1496 struct mce_bank *b = &mce_banks[i];
1500 wrmsrl(msr_ops.ctl(i), b->ctl);
1501 wrmsrl(msr_ops.status(i), 0);
1506 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1507 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1508 * Vol 3B Table 15-20). But this confuses both the code that determines
1509 * whether the machine check occurred in kernel or user mode, and also
1510 * the severity assessment code. Pretend that EIPV was set, and take the
1511 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1513 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1517 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1519 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1520 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1521 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1523 (MCI_STATUS_UC|MCI_STATUS_EN|
1524 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1525 MCI_STATUS_AR|MCACOD_INSTR))
1528 m->mcgstatus |= MCG_STATUS_EIPV;
1533 /* Add per CPU specific workarounds here */
1534 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1536 struct mca_config *cfg = &mca_cfg;
1538 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1539 pr_info("unknown CPU type - not enabling MCE support\n");
1543 /* This should be disabled by the BIOS, but isn't always */
1544 if (c->x86_vendor == X86_VENDOR_AMD) {
1545 if (c->x86 == 15 && cfg->banks > 4) {
1547 * disable GART TBL walk error reporting, which
1548 * trips off incorrectly with the IOMMU & 3ware
1551 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1553 if (c->x86 < 17 && cfg->bootlog < 0) {
1555 * Lots of broken BIOS around that don't clear them
1556 * by default and leave crap in there. Don't log:
1561 * Various K7s with broken bank 0 around. Always disable
1564 if (c->x86 == 6 && cfg->banks > 0)
1565 mce_banks[0].ctl = 0;
1568 * overflow_recov is supported for F15h Models 00h-0fh
1569 * even though we don't have a CPUID bit for it.
1571 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1572 mce_flags.overflow_recov = 1;
1575 * Turn off MC4_MISC thresholding banks on those models since
1576 * they're not supported there.
1578 if (c->x86 == 0x15 &&
1579 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1584 0x00000413, /* MC4_MISC0 */
1585 0xc0000408, /* MC4_MISC1 */
1588 rdmsrl(MSR_K7_HWCR, hwcr);
1590 /* McStatusWrEn has to be set */
1591 need_toggle = !(hwcr & BIT(18));
1594 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1596 /* Clear CntP bit safely */
1597 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1598 msr_clear_bit(msrs[i], 62);
1600 /* restore old settings */
1602 wrmsrl(MSR_K7_HWCR, hwcr);
1606 if (c->x86_vendor == X86_VENDOR_INTEL) {
1608 * SDM documents that on family 6 bank 0 should not be written
1609 * because it aliases to another special BIOS controlled
1611 * But it's not aliased anymore on model 0x1a+
1612 * Don't ignore bank 0 completely because there could be a
1613 * valid event later, merely don't write CTL0.
1616 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1617 mce_banks[0].init = 0;
1620 * All newer Intel systems support MCE broadcasting. Enable
1621 * synchronization with a one second timeout.
1623 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1624 cfg->monarch_timeout < 0)
1625 cfg->monarch_timeout = USEC_PER_SEC;
1628 * There are also broken BIOSes on some Pentium M and
1631 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1634 if (c->x86 == 6 && c->x86_model == 45)
1635 quirk_no_way_out = quirk_sandybridge_ifu;
1637 * MCG_CAP.MCG_SER_P is necessary but not sufficient to know
1638 * whether this processor will actually generate recoverable
1639 * machine checks. Check to see if this is an E7 model Xeon.
1640 * We can't do a model number check because E5 and E7 use the
1641 * same model number. E5 doesn't support recovery, E7 does.
1643 if (mca_cfg.recovery || (mca_cfg.ser &&
1644 !strncmp(c->x86_model_id,
1645 "Intel(R) Xeon(R) CPU E7-", 24)))
1646 set_cpu_cap(c, X86_FEATURE_MCE_RECOVERY);
1648 if (cfg->monarch_timeout < 0)
1649 cfg->monarch_timeout = 0;
1650 if (cfg->bootlog != 0)
1651 cfg->panic_timeout = 30;
1656 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1661 switch (c->x86_vendor) {
1662 case X86_VENDOR_INTEL:
1663 intel_p5_mcheck_init(c);
1666 case X86_VENDOR_CENTAUR:
1667 winchip_mcheck_init(c);
1677 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1679 switch (c->x86_vendor) {
1680 case X86_VENDOR_INTEL:
1681 mce_intel_feature_init(c);
1682 mce_adjust_timer = cmci_intel_adjust_timer;
1685 case X86_VENDOR_AMD: {
1686 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1687 mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
1688 mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
1691 * Install proper ops for Scalable MCA enabled processors
1693 if (mce_flags.smca) {
1694 msr_ops.ctl = smca_ctl_reg;
1695 msr_ops.status = smca_status_reg;
1696 msr_ops.addr = smca_addr_reg;
1697 msr_ops.misc = smca_misc_reg;
1699 mce_amd_feature_init(c);
1709 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1711 switch (c->x86_vendor) {
1712 case X86_VENDOR_INTEL:
1713 mce_intel_feature_clear(c);
1720 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1722 unsigned long iv = check_interval * HZ;
1724 if (mca_cfg.ignore_ce || !iv)
1727 per_cpu(mce_next_interval, cpu) = iv;
1729 t->expires = round_jiffies(jiffies + iv);
1730 add_timer_on(t, cpu);
1733 static void __mcheck_cpu_init_timer(void)
1735 struct timer_list *t = this_cpu_ptr(&mce_timer);
1736 unsigned int cpu = smp_processor_id();
1738 setup_pinned_timer(t, mce_timer_fn, cpu);
1739 mce_start_timer(cpu, t);
1742 /* Handle unconfigured int18 (should never happen) */
1743 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1745 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1746 smp_processor_id());
1749 /* Call the installed machine check handler for this CPU setup. */
1750 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1751 unexpected_machine_check;
1754 * Called for each booted CPU to set up machine checks.
1755 * Must be called with preempt off:
1757 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1759 if (mca_cfg.disabled)
1762 if (__mcheck_cpu_ancient_init(c))
1765 if (!mce_available(c))
1768 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1769 mca_cfg.disabled = true;
1773 if (mce_gen_pool_init()) {
1774 mca_cfg.disabled = true;
1775 pr_emerg("Couldn't allocate MCE records pool!\n");
1779 machine_check_vector = do_machine_check;
1781 __mcheck_cpu_init_generic();
1782 __mcheck_cpu_init_vendor(c);
1783 __mcheck_cpu_init_clear_banks();
1784 __mcheck_cpu_init_timer();
1788 * Called for each booted CPU to clear some machine checks opt-ins
1790 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1792 if (mca_cfg.disabled)
1795 if (!mce_available(c))
1799 * Possibly to clear general settings generic to x86
1800 * __mcheck_cpu_clear_generic(c);
1802 __mcheck_cpu_clear_vendor(c);
1807 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1810 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1811 static int mce_chrdev_open_count; /* #times opened */
1812 static int mce_chrdev_open_exclu; /* already open exclusive? */
1814 static int mce_chrdev_open(struct inode *inode, struct file *file)
1816 spin_lock(&mce_chrdev_state_lock);
1818 if (mce_chrdev_open_exclu ||
1819 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1820 spin_unlock(&mce_chrdev_state_lock);
1825 if (file->f_flags & O_EXCL)
1826 mce_chrdev_open_exclu = 1;
1827 mce_chrdev_open_count++;
1829 spin_unlock(&mce_chrdev_state_lock);
1831 return nonseekable_open(inode, file);
1834 static int mce_chrdev_release(struct inode *inode, struct file *file)
1836 spin_lock(&mce_chrdev_state_lock);
1838 mce_chrdev_open_count--;
1839 mce_chrdev_open_exclu = 0;
1841 spin_unlock(&mce_chrdev_state_lock);
1846 static void collect_tscs(void *data)
1848 unsigned long *cpu_tsc = (unsigned long *)data;
1850 cpu_tsc[smp_processor_id()] = rdtsc();
1853 static int mce_apei_read_done;
1855 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1856 static int __mce_read_apei(char __user **ubuf, size_t usize)
1862 if (usize < sizeof(struct mce))
1865 rc = apei_read_mce(&m, &record_id);
1866 /* Error or no more MCE record */
1868 mce_apei_read_done = 1;
1870 * When ERST is disabled, mce_chrdev_read() should return
1871 * "no record" instead of "no device."
1878 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1881 * In fact, we should have cleared the record after that has
1882 * been flushed to the disk or sent to network in
1883 * /sbin/mcelog, but we have no interface to support that now,
1884 * so just clear it to avoid duplication.
1886 rc = apei_clear_mce(record_id);
1888 mce_apei_read_done = 1;
1891 *ubuf += sizeof(struct mce);
1896 static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1897 size_t usize, loff_t *off)
1899 char __user *buf = ubuf;
1900 unsigned long *cpu_tsc;
1901 unsigned prev, next;
1904 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1908 mutex_lock(&mce_chrdev_read_mutex);
1910 if (!mce_apei_read_done) {
1911 err = __mce_read_apei(&buf, usize);
1912 if (err || buf != ubuf)
1916 next = mce_log_get_idx_check(mcelog.next);
1918 /* Only supports full reads right now */
1920 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1926 for (i = prev; i < next; i++) {
1927 unsigned long start = jiffies;
1928 struct mce *m = &mcelog.entry[i];
1930 while (!m->finished) {
1931 if (time_after_eq(jiffies, start + 2)) {
1932 memset(m, 0, sizeof(*m));
1938 err |= copy_to_user(buf, m, sizeof(*m));
1944 memset(mcelog.entry + prev, 0,
1945 (next - prev) * sizeof(struct mce));
1947 next = cmpxchg(&mcelog.next, prev, 0);
1948 } while (next != prev);
1950 synchronize_sched();
1953 * Collect entries that were still getting written before the
1956 on_each_cpu(collect_tscs, cpu_tsc, 1);
1958 for (i = next; i < MCE_LOG_LEN; i++) {
1959 struct mce *m = &mcelog.entry[i];
1961 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1962 err |= copy_to_user(buf, m, sizeof(*m));
1965 memset(m, 0, sizeof(*m));
1973 mutex_unlock(&mce_chrdev_read_mutex);
1976 return err ? err : buf - ubuf;
1979 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1981 poll_wait(file, &mce_chrdev_wait, wait);
1982 if (READ_ONCE(mcelog.next))
1983 return POLLIN | POLLRDNORM;
1984 if (!mce_apei_read_done && apei_check_mce())
1985 return POLLIN | POLLRDNORM;
1989 static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1992 int __user *p = (int __user *)arg;
1994 if (!capable(CAP_SYS_ADMIN))
1998 case MCE_GET_RECORD_LEN:
1999 return put_user(sizeof(struct mce), p);
2000 case MCE_GET_LOG_LEN:
2001 return put_user(MCE_LOG_LEN, p);
2002 case MCE_GETCLEAR_FLAGS: {
2006 flags = mcelog.flags;
2007 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
2009 return put_user(flags, p);
2016 static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
2017 size_t usize, loff_t *off);
2019 void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
2020 const char __user *ubuf,
2021 size_t usize, loff_t *off))
2025 EXPORT_SYMBOL_GPL(register_mce_write_callback);
2027 static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
2028 size_t usize, loff_t *off)
2031 return mce_write(filp, ubuf, usize, off);
2036 static const struct file_operations mce_chrdev_ops = {
2037 .open = mce_chrdev_open,
2038 .release = mce_chrdev_release,
2039 .read = mce_chrdev_read,
2040 .write = mce_chrdev_write,
2041 .poll = mce_chrdev_poll,
2042 .unlocked_ioctl = mce_chrdev_ioctl,
2043 .llseek = no_llseek,
2046 static struct miscdevice mce_chrdev_device = {
2052 static void __mce_disable_bank(void *arg)
2054 int bank = *((int *)arg);
2055 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2056 cmci_disable_bank(bank);
2059 void mce_disable_bank(int bank)
2061 if (bank >= mca_cfg.banks) {
2063 "Ignoring request to disable invalid MCA bank %d.\n",
2067 set_bit(bank, mce_banks_ce_disabled);
2068 on_each_cpu(__mce_disable_bank, &bank, 1);
2072 * mce=off Disables machine check
2073 * mce=no_cmci Disables CMCI
2074 * mce=no_lmce Disables LMCE
2075 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2076 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2077 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2078 * monarchtimeout is how long to wait for other CPUs on machine
2079 * check, or 0 to not wait
2080 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
2081 * mce=nobootlog Don't log MCEs from before booting.
2082 * mce=bios_cmci_threshold Don't program the CMCI threshold
2084 static int __init mcheck_enable(char *str)
2086 struct mca_config *cfg = &mca_cfg;
2094 if (!strcmp(str, "off"))
2095 cfg->disabled = true;
2096 else if (!strcmp(str, "no_cmci"))
2097 cfg->cmci_disabled = true;
2098 else if (!strcmp(str, "no_lmce"))
2099 cfg->lmce_disabled = true;
2100 else if (!strcmp(str, "dont_log_ce"))
2101 cfg->dont_log_ce = true;
2102 else if (!strcmp(str, "ignore_ce"))
2103 cfg->ignore_ce = true;
2104 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2105 cfg->bootlog = (str[0] == 'b');
2106 else if (!strcmp(str, "bios_cmci_threshold"))
2107 cfg->bios_cmci_threshold = true;
2108 else if (!strcmp(str, "recovery"))
2109 cfg->recovery = true;
2110 else if (isdigit(str[0])) {
2111 if (get_option(&str, &cfg->tolerant) == 2)
2112 get_option(&str, &(cfg->monarch_timeout));
2114 pr_info("mce argument %s ignored. Please use /sys\n", str);
2119 __setup("mce", mcheck_enable);
2121 int __init mcheck_init(void)
2123 mcheck_intel_therm_init();
2124 mce_register_decode_chain(&mce_srao_nb);
2125 mcheck_vendor_init_severity();
2127 INIT_WORK(&mce_work, mce_process_work);
2128 init_irq_work(&mce_irq_work, mce_irq_work_cb);
2134 * mce_syscore: PM support
2138 * Disable machine checks on suspend and shutdown. We can't really handle
2141 static void mce_disable_error_reporting(void)
2145 for (i = 0; i < mca_cfg.banks; i++) {
2146 struct mce_bank *b = &mce_banks[i];
2149 wrmsrl(msr_ops.ctl(i), 0);
2154 static void vendor_disable_error_reporting(void)
2157 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
2158 * Disabling them for just a single offlined CPU is bad, since it will
2159 * inhibit reporting for all shared resources on the socket like the
2160 * last level cache (LLC), the integrated memory controller (iMC), etc.
2162 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2165 mce_disable_error_reporting();
2168 static int mce_syscore_suspend(void)
2170 vendor_disable_error_reporting();
2174 static void mce_syscore_shutdown(void)
2176 vendor_disable_error_reporting();
2180 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2181 * Only one CPU is active at this time, the others get re-added later using
2184 static void mce_syscore_resume(void)
2186 __mcheck_cpu_init_generic();
2187 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2188 __mcheck_cpu_init_clear_banks();
2191 static struct syscore_ops mce_syscore_ops = {
2192 .suspend = mce_syscore_suspend,
2193 .shutdown = mce_syscore_shutdown,
2194 .resume = mce_syscore_resume,
2198 * mce_device: Sysfs support
2201 static void mce_cpu_restart(void *data)
2203 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2205 __mcheck_cpu_init_generic();
2206 __mcheck_cpu_init_clear_banks();
2207 __mcheck_cpu_init_timer();
2210 /* Reinit MCEs after user configuration changes */
2211 static void mce_restart(void)
2213 mce_timer_delete_all();
2214 on_each_cpu(mce_cpu_restart, NULL, 1);
2217 /* Toggle features for corrected errors */
2218 static void mce_disable_cmci(void *data)
2220 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2225 static void mce_enable_ce(void *all)
2227 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2232 __mcheck_cpu_init_timer();
2235 static struct bus_type mce_subsys = {
2236 .name = "machinecheck",
2237 .dev_name = "machinecheck",
2240 DEFINE_PER_CPU(struct device *, mce_device);
2242 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2244 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2246 return container_of(attr, struct mce_bank, attr);
2249 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2252 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2255 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2256 const char *buf, size_t size)
2260 if (kstrtou64(buf, 0, &new) < 0)
2263 attr_to_bank(attr)->ctl = new;
2270 show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2272 strcpy(buf, mce_helper);
2274 return strlen(mce_helper) + 1;
2277 static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2278 const char *buf, size_t siz)
2282 strncpy(mce_helper, buf, sizeof(mce_helper));
2283 mce_helper[sizeof(mce_helper)-1] = 0;
2284 p = strchr(mce_helper, '\n');
2289 return strlen(mce_helper) + !!p;
2292 static ssize_t set_ignore_ce(struct device *s,
2293 struct device_attribute *attr,
2294 const char *buf, size_t size)
2298 if (kstrtou64(buf, 0, &new) < 0)
2301 if (mca_cfg.ignore_ce ^ !!new) {
2303 /* disable ce features */
2304 mce_timer_delete_all();
2305 on_each_cpu(mce_disable_cmci, NULL, 1);
2306 mca_cfg.ignore_ce = true;
2308 /* enable ce features */
2309 mca_cfg.ignore_ce = false;
2310 on_each_cpu(mce_enable_ce, (void *)1, 1);
2316 static ssize_t set_cmci_disabled(struct device *s,
2317 struct device_attribute *attr,
2318 const char *buf, size_t size)
2322 if (kstrtou64(buf, 0, &new) < 0)
2325 if (mca_cfg.cmci_disabled ^ !!new) {
2328 on_each_cpu(mce_disable_cmci, NULL, 1);
2329 mca_cfg.cmci_disabled = true;
2332 mca_cfg.cmci_disabled = false;
2333 on_each_cpu(mce_enable_ce, NULL, 1);
2339 static ssize_t store_int_with_restart(struct device *s,
2340 struct device_attribute *attr,
2341 const char *buf, size_t size)
2343 ssize_t ret = device_store_int(s, attr, buf, size);
2348 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2349 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2350 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2351 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2353 static struct dev_ext_attribute dev_attr_check_interval = {
2354 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2358 static struct dev_ext_attribute dev_attr_ignore_ce = {
2359 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2363 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2364 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2365 &mca_cfg.cmci_disabled
2368 static struct device_attribute *mce_device_attrs[] = {
2369 &dev_attr_tolerant.attr,
2370 &dev_attr_check_interval.attr,
2372 &dev_attr_monarch_timeout.attr,
2373 &dev_attr_dont_log_ce.attr,
2374 &dev_attr_ignore_ce.attr,
2375 &dev_attr_cmci_disabled.attr,
2379 static cpumask_var_t mce_device_initialized;
2381 static void mce_device_release(struct device *dev)
2386 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2387 static int mce_device_create(unsigned int cpu)
2393 if (!mce_available(&boot_cpu_data))
2396 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2400 dev->bus = &mce_subsys;
2401 dev->release = &mce_device_release;
2403 err = device_register(dev);
2409 for (i = 0; mce_device_attrs[i]; i++) {
2410 err = device_create_file(dev, mce_device_attrs[i]);
2414 for (j = 0; j < mca_cfg.banks; j++) {
2415 err = device_create_file(dev, &mce_banks[j].attr);
2419 cpumask_set_cpu(cpu, mce_device_initialized);
2420 per_cpu(mce_device, cpu) = dev;
2425 device_remove_file(dev, &mce_banks[j].attr);
2428 device_remove_file(dev, mce_device_attrs[i]);
2430 device_unregister(dev);
2435 static void mce_device_remove(unsigned int cpu)
2437 struct device *dev = per_cpu(mce_device, cpu);
2440 if (!cpumask_test_cpu(cpu, mce_device_initialized))
2443 for (i = 0; mce_device_attrs[i]; i++)
2444 device_remove_file(dev, mce_device_attrs[i]);
2446 for (i = 0; i < mca_cfg.banks; i++)
2447 device_remove_file(dev, &mce_banks[i].attr);
2449 device_unregister(dev);
2450 cpumask_clear_cpu(cpu, mce_device_initialized);
2451 per_cpu(mce_device, cpu) = NULL;
2454 /* Make sure there are no machine checks on offlined CPUs. */
2455 static void mce_disable_cpu(void *h)
2457 unsigned long action = *(unsigned long *)h;
2459 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2462 if (!(action & CPU_TASKS_FROZEN))
2465 vendor_disable_error_reporting();
2468 static void mce_reenable_cpu(void *h)
2470 unsigned long action = *(unsigned long *)h;
2473 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2476 if (!(action & CPU_TASKS_FROZEN))
2478 for (i = 0; i < mca_cfg.banks; i++) {
2479 struct mce_bank *b = &mce_banks[i];
2482 wrmsrl(msr_ops.ctl(i), b->ctl);
2486 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2488 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2490 unsigned int cpu = (unsigned long)hcpu;
2491 struct timer_list *t = &per_cpu(mce_timer, cpu);
2493 switch (action & ~CPU_TASKS_FROZEN) {
2495 mce_device_create(cpu);
2496 if (threshold_cpu_callback)
2497 threshold_cpu_callback(action, cpu);
2500 if (threshold_cpu_callback)
2501 threshold_cpu_callback(action, cpu);
2502 mce_device_remove(cpu);
2503 mce_intel_hcpu_update(cpu);
2505 /* intentionally ignoring frozen here */
2506 if (!(action & CPU_TASKS_FROZEN))
2509 case CPU_DOWN_PREPARE:
2510 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2513 case CPU_DOWN_FAILED:
2514 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2515 mce_start_timer(cpu, t);
2522 static struct notifier_block mce_cpu_notifier = {
2523 .notifier_call = mce_cpu_callback,
2526 static __init void mce_init_banks(void)
2530 for (i = 0; i < mca_cfg.banks; i++) {
2531 struct mce_bank *b = &mce_banks[i];
2532 struct device_attribute *a = &b->attr;
2534 sysfs_attr_init(&a->attr);
2535 a->attr.name = b->attrname;
2536 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2538 a->attr.mode = 0644;
2539 a->show = show_bank;
2540 a->store = set_bank;
2544 static __init int mcheck_init_device(void)
2549 if (!mce_available(&boot_cpu_data)) {
2554 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2561 err = subsys_system_register(&mce_subsys, NULL);
2565 cpu_notifier_register_begin();
2566 for_each_online_cpu(i) {
2567 err = mce_device_create(i);
2570 * Register notifier anyway (and do not unreg it) so
2571 * that we don't leave undeleted timers, see notifier
2574 __register_hotcpu_notifier(&mce_cpu_notifier);
2575 cpu_notifier_register_done();
2576 goto err_device_create;
2580 __register_hotcpu_notifier(&mce_cpu_notifier);
2581 cpu_notifier_register_done();
2583 register_syscore_ops(&mce_syscore_ops);
2585 /* register character device /dev/mcelog */
2586 err = misc_register(&mce_chrdev_device);
2593 unregister_syscore_ops(&mce_syscore_ops);
2597 * We didn't keep track of which devices were created above, but
2598 * even if we had, the set of online cpus might have changed.
2599 * Play safe and remove for every possible cpu, since
2600 * mce_device_remove() will do the right thing.
2602 for_each_possible_cpu(i)
2603 mce_device_remove(i);
2606 free_cpumask_var(mce_device_initialized);
2609 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
2613 device_initcall_sync(mcheck_init_device);
2616 * Old style boot options parsing. Only for compatibility.
2618 static int __init mcheck_disable(char *str)
2620 mca_cfg.disabled = true;
2623 __setup("nomce", mcheck_disable);
2625 #ifdef CONFIG_DEBUG_FS
2626 struct dentry *mce_get_debugfs_dir(void)
2628 static struct dentry *dmce;
2631 dmce = debugfs_create_dir("mce", NULL);
2636 static void mce_reset(void)
2639 atomic_set(&mce_fake_panicked, 0);
2640 atomic_set(&mce_executing, 0);
2641 atomic_set(&mce_callin, 0);
2642 atomic_set(&global_nwo, 0);
2645 static int fake_panic_get(void *data, u64 *val)
2651 static int fake_panic_set(void *data, u64 val)
2658 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2659 fake_panic_set, "%llu\n");
2661 static int __init mcheck_debugfs_init(void)
2663 struct dentry *dmce, *ffake_panic;
2665 dmce = mce_get_debugfs_dir();
2668 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2676 static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2679 static int __init mcheck_late_init(void)
2681 mcheck_debugfs_init();
2684 * Flush out everything that has been logged during early boot, now that
2685 * everything has been initialized (workqueues, decoders, ...).
2687 mce_schedule_work();
2691 late_initcall(mcheck_late_init);