Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Machine check handler. | |
e9eee03e | 3 | * |
1da177e4 | 4 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
d88203d1 TG |
5 | * Rest from unknown author(s). |
6 | * 2004 Andi Kleen. Rewrote most of it. | |
b79109c3 AK |
7 | * Copyright 2008 Intel Corporation |
8 | * Author: Andi Kleen | |
1da177e4 | 9 | */ |
e9eee03e IM |
10 | #include <linux/thread_info.h> |
11 | #include <linux/capability.h> | |
12 | #include <linux/miscdevice.h> | |
ccc3c319 | 13 | #include <linux/interrupt.h> |
e9eee03e IM |
14 | #include <linux/ratelimit.h> |
15 | #include <linux/kallsyms.h> | |
16 | #include <linux/rcupdate.h> | |
e9eee03e | 17 | #include <linux/kobject.h> |
14a02530 | 18 | #include <linux/uaccess.h> |
e9eee03e IM |
19 | #include <linux/kdebug.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/percpu.h> | |
1da177e4 | 22 | #include <linux/string.h> |
1da177e4 | 23 | #include <linux/sysdev.h> |
3c079792 | 24 | #include <linux/delay.h> |
8c566ef5 | 25 | #include <linux/ctype.h> |
e9eee03e | 26 | #include <linux/sched.h> |
0d7482e3 | 27 | #include <linux/sysfs.h> |
e9eee03e IM |
28 | #include <linux/types.h> |
29 | #include <linux/init.h> | |
30 | #include <linux/kmod.h> | |
31 | #include <linux/poll.h> | |
3c079792 | 32 | #include <linux/nmi.h> |
e9eee03e | 33 | #include <linux/cpu.h> |
14a02530 | 34 | #include <linux/smp.h> |
e9eee03e | 35 | #include <linux/fs.h> |
9b1beaf2 | 36 | #include <linux/mm.h> |
e9eee03e | 37 | |
d88203d1 | 38 | #include <asm/processor.h> |
ccc3c319 AK |
39 | #include <asm/hw_irq.h> |
40 | #include <asm/apic.h> | |
e02e68d3 | 41 | #include <asm/idle.h> |
ccc3c319 | 42 | #include <asm/ipi.h> |
e9eee03e IM |
43 | #include <asm/mce.h> |
44 | #include <asm/msr.h> | |
1da177e4 | 45 | |
bd19a5e6 | 46 | #include "mce-internal.h" |
711c2e48 IM |
47 | #include "mce.h" |
48 | ||
5d727926 AK |
49 | /* Handle unconfigured int18 (should never happen) */ |
50 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) | |
51 | { | |
52 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", | |
53 | smp_processor_id()); | |
54 | } | |
55 | ||
56 | /* Call the installed machine check handler for this CPU setup. */ | |
57 | void (*machine_check_vector)(struct pt_regs *, long error_code) = | |
58 | unexpected_machine_check; | |
04b2b1a4 AK |
59 | |
60 | int mce_disabled; | |
61 | ||
4efc0670 | 62 | #ifdef CONFIG_X86_NEW_MCE |
711c2e48 | 63 | |
e9eee03e | 64 | #define MISC_MCELOG_MINOR 227 |
0d7482e3 | 65 | |
3c079792 AK |
66 | #define SPINUNIT 100 /* 100ns */ |
67 | ||
553f265f AK |
68 | atomic_t mce_entry; |
69 | ||
01ca79f1 AK |
70 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
71 | ||
bd78432c TH |
72 | /* |
73 | * Tolerant levels: | |
74 | * 0: always panic on uncorrected errors, log corrected errors | |
75 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors | |
76 | * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors | |
77 | * 3: never panic or SIGBUS, log all errors (for testing only) | |
78 | */ | |
e9eee03e IM |
79 | static int tolerant = 1; |
80 | static int banks; | |
81 | static u64 *bank; | |
82 | static unsigned long notify_user; | |
83 | static int rip_msr; | |
84 | static int mce_bootlog = -1; | |
3c079792 | 85 | static int monarch_timeout = -1; |
29b0f591 | 86 | static int mce_panic_timeout; |
ed7290d0 | 87 | int mce_ser; |
a98f0dd3 | 88 | |
e9eee03e IM |
89 | static char trigger[128]; |
90 | static char *trigger_argv[2] = { trigger, NULL }; | |
1da177e4 | 91 | |
06b7a7a5 AK |
92 | static unsigned long dont_init_banks; |
93 | ||
e02e68d3 | 94 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); |
3c079792 AK |
95 | static DEFINE_PER_CPU(struct mce, mces_seen); |
96 | static int cpu_missing; | |
97 | ||
e02e68d3 | 98 | |
ee031c31 AK |
99 | /* MCA banks polled by the period polling timer for corrected events */ |
100 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | |
101 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | |
102 | }; | |
103 | ||
06b7a7a5 AK |
104 | static inline int skip_bank_init(int i) |
105 | { | |
106 | return i < BITS_PER_LONG && test_bit(i, &dont_init_banks); | |
107 | } | |
108 | ||
9b1beaf2 AK |
109 | static DEFINE_PER_CPU(struct work_struct, mce_work); |
110 | ||
b5f2fa4e AK |
111 | /* Do initial initialization of a struct mce */ |
112 | void mce_setup(struct mce *m) | |
113 | { | |
114 | memset(m, 0, sizeof(struct mce)); | |
d620c67f | 115 | m->cpu = m->extcpu = smp_processor_id(); |
b5f2fa4e | 116 | rdtscll(m->tsc); |
8ee08347 AK |
117 | /* We hope get_seconds stays lockless */ |
118 | m->time = get_seconds(); | |
119 | m->cpuvendor = boot_cpu_data.x86_vendor; | |
120 | m->cpuid = cpuid_eax(1); | |
121 | #ifdef CONFIG_SMP | |
122 | m->socketid = cpu_data(m->extcpu).phys_proc_id; | |
123 | #endif | |
124 | m->apicid = cpu_data(m->extcpu).initial_apicid; | |
125 | rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); | |
b5f2fa4e AK |
126 | } |
127 | ||
ea149b36 AK |
128 | DEFINE_PER_CPU(struct mce, injectm); |
129 | EXPORT_PER_CPU_SYMBOL_GPL(injectm); | |
130 | ||
1da177e4 LT |
131 | /* |
132 | * Lockless MCE logging infrastructure. | |
133 | * This avoids deadlocks on printk locks without having to break locks. Also | |
134 | * separate MCEs from kernel messages to avoid bogus bug reports. | |
135 | */ | |
136 | ||
231fd906 | 137 | static struct mce_log mcelog = { |
f6fb0ac0 AK |
138 | .signature = MCE_LOG_SIGNATURE, |
139 | .len = MCE_LOG_LEN, | |
140 | .recordlen = sizeof(struct mce), | |
d88203d1 | 141 | }; |
1da177e4 LT |
142 | |
143 | void mce_log(struct mce *mce) | |
144 | { | |
145 | unsigned next, entry; | |
e9eee03e | 146 | |
1da177e4 | 147 | mce->finished = 0; |
7644143c | 148 | wmb(); |
1da177e4 LT |
149 | for (;;) { |
150 | entry = rcu_dereference(mcelog.next); | |
673242c1 | 151 | for (;;) { |
e9eee03e IM |
152 | /* |
153 | * When the buffer fills up discard new entries. | |
154 | * Assume that the earlier errors are the more | |
155 | * interesting ones: | |
156 | */ | |
673242c1 | 157 | if (entry >= MCE_LOG_LEN) { |
14a02530 HS |
158 | set_bit(MCE_OVERFLOW, |
159 | (unsigned long *)&mcelog.flags); | |
673242c1 AK |
160 | return; |
161 | } | |
e9eee03e | 162 | /* Old left over entry. Skip: */ |
673242c1 AK |
163 | if (mcelog.entry[entry].finished) { |
164 | entry++; | |
165 | continue; | |
166 | } | |
7644143c | 167 | break; |
1da177e4 | 168 | } |
1da177e4 LT |
169 | smp_rmb(); |
170 | next = entry + 1; | |
171 | if (cmpxchg(&mcelog.next, entry, next) == entry) | |
172 | break; | |
173 | } | |
174 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); | |
7644143c | 175 | wmb(); |
1da177e4 | 176 | mcelog.entry[entry].finished = 1; |
7644143c | 177 | wmb(); |
1da177e4 | 178 | |
a0189c70 | 179 | mce->finished = 1; |
e02e68d3 | 180 | set_bit(0, ¬ify_user); |
1da177e4 LT |
181 | } |
182 | ||
86503560 | 183 | static void print_mce(struct mce *m, int *first) |
1da177e4 | 184 | { |
86503560 AK |
185 | if (*first) { |
186 | printk(KERN_EMERG "\n" KERN_EMERG "HARDWARE ERROR\n"); | |
187 | *first = 0; | |
188 | } | |
189 | printk(KERN_EMERG | |
1da177e4 | 190 | "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", |
d620c67f | 191 | m->extcpu, m->mcgstatus, m->bank, m->status); |
65ea5b03 | 192 | if (m->ip) { |
d88203d1 | 193 | printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", |
1da177e4 | 194 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", |
65ea5b03 | 195 | m->cs, m->ip); |
1da177e4 | 196 | if (m->cs == __KERNEL_CS) |
65ea5b03 | 197 | print_symbol("{%s}", m->ip); |
1da177e4 LT |
198 | printk("\n"); |
199 | } | |
f6d1826d | 200 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
1da177e4 | 201 | if (m->addr) |
f6d1826d | 202 | printk("ADDR %llx ", m->addr); |
1da177e4 | 203 | if (m->misc) |
f6d1826d | 204 | printk("MISC %llx ", m->misc); |
1da177e4 | 205 | printk("\n"); |
8ee08347 AK |
206 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", |
207 | m->cpuvendor, m->cpuid, m->time, m->socketid, | |
208 | m->apicid); | |
86503560 AK |
209 | } |
210 | ||
211 | static void print_mce_tail(void) | |
212 | { | |
213 | printk(KERN_EMERG "This is not a software problem!\n" | |
214 | KERN_EMERG "Run through mcelog --ascii to decode and contact your hardware vendor\n"); | |
1da177e4 LT |
215 | } |
216 | ||
f94b61c2 AK |
217 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
218 | ||
219 | static atomic_t mce_paniced; | |
220 | ||
221 | /* Panic in progress. Enable interrupts and wait for final IPI */ | |
222 | static void wait_for_panic(void) | |
223 | { | |
224 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; | |
225 | preempt_disable(); | |
226 | local_irq_enable(); | |
227 | while (timeout-- > 0) | |
228 | udelay(1); | |
29b0f591 AK |
229 | if (panic_timeout == 0) |
230 | panic_timeout = mce_panic_timeout; | |
f94b61c2 AK |
231 | panic("Panicing machine check CPU died"); |
232 | } | |
233 | ||
bd19a5e6 | 234 | static void mce_panic(char *msg, struct mce *final, char *exp) |
d88203d1 | 235 | { |
1da177e4 | 236 | int i; |
86503560 | 237 | int first = 1; |
e02e68d3 | 238 | |
f94b61c2 AK |
239 | /* |
240 | * Make sure only one CPU runs in machine check panic | |
241 | */ | |
242 | if (atomic_add_return(1, &mce_paniced) > 1) | |
243 | wait_for_panic(); | |
244 | barrier(); | |
245 | ||
d896a940 AK |
246 | bust_spinlocks(1); |
247 | console_verbose(); | |
a0189c70 | 248 | /* First print corrected ones that are still unlogged */ |
1da177e4 | 249 | for (i = 0; i < MCE_LOG_LEN; i++) { |
a0189c70 AK |
250 | struct mce *m = &mcelog.entry[i]; |
251 | if ((m->status & MCI_STATUS_VAL) && | |
252 | !(m->status & MCI_STATUS_UC)) | |
86503560 | 253 | print_mce(m, &first); |
a0189c70 AK |
254 | } |
255 | /* Now print uncorrected but with the final one last */ | |
256 | for (i = 0; i < MCE_LOG_LEN; i++) { | |
257 | struct mce *m = &mcelog.entry[i]; | |
258 | if (!(m->status & MCI_STATUS_VAL)) | |
1da177e4 | 259 | continue; |
a0189c70 | 260 | if (!final || memcmp(m, final, sizeof(struct mce))) |
86503560 | 261 | print_mce(m, &first); |
1da177e4 | 262 | } |
a0189c70 | 263 | if (final) |
86503560 | 264 | print_mce(final, &first); |
3c079792 AK |
265 | if (cpu_missing) |
266 | printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n"); | |
86503560 | 267 | print_mce_tail(); |
bd19a5e6 AK |
268 | if (exp) |
269 | printk(KERN_EMERG "Machine check: %s\n", exp); | |
29b0f591 AK |
270 | if (panic_timeout == 0) |
271 | panic_timeout = mce_panic_timeout; | |
e02e68d3 | 272 | panic(msg); |
d88203d1 | 273 | } |
1da177e4 | 274 | |
ea149b36 AK |
275 | /* Support code for software error injection */ |
276 | ||
277 | static int msr_to_offset(u32 msr) | |
278 | { | |
279 | unsigned bank = __get_cpu_var(injectm.bank); | |
280 | if (msr == rip_msr) | |
281 | return offsetof(struct mce, ip); | |
282 | if (msr == MSR_IA32_MC0_STATUS + bank*4) | |
283 | return offsetof(struct mce, status); | |
284 | if (msr == MSR_IA32_MC0_ADDR + bank*4) | |
285 | return offsetof(struct mce, addr); | |
286 | if (msr == MSR_IA32_MC0_MISC + bank*4) | |
287 | return offsetof(struct mce, misc); | |
288 | if (msr == MSR_IA32_MCG_STATUS) | |
289 | return offsetof(struct mce, mcgstatus); | |
290 | return -1; | |
291 | } | |
292 | ||
5f8c1a54 AK |
293 | /* MSR access wrappers used for error injection */ |
294 | static u64 mce_rdmsrl(u32 msr) | |
295 | { | |
296 | u64 v; | |
ea149b36 AK |
297 | if (__get_cpu_var(injectm).finished) { |
298 | int offset = msr_to_offset(msr); | |
299 | if (offset < 0) | |
300 | return 0; | |
301 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | |
302 | } | |
5f8c1a54 AK |
303 | rdmsrl(msr, v); |
304 | return v; | |
305 | } | |
306 | ||
307 | static void mce_wrmsrl(u32 msr, u64 v) | |
308 | { | |
ea149b36 AK |
309 | if (__get_cpu_var(injectm).finished) { |
310 | int offset = msr_to_offset(msr); | |
311 | if (offset >= 0) | |
312 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | |
313 | return; | |
314 | } | |
5f8c1a54 AK |
315 | wrmsrl(msr, v); |
316 | } | |
317 | ||
9b1beaf2 AK |
318 | /* |
319 | * Simple lockless ring to communicate PFNs from the exception handler with the | |
320 | * process context work function. This is vastly simplified because there's | |
321 | * only a single reader and a single writer. | |
322 | */ | |
323 | #define MCE_RING_SIZE 16 /* we use one entry less */ | |
324 | ||
325 | struct mce_ring { | |
326 | unsigned short start; | |
327 | unsigned short end; | |
328 | unsigned long ring[MCE_RING_SIZE]; | |
329 | }; | |
330 | static DEFINE_PER_CPU(struct mce_ring, mce_ring); | |
331 | ||
332 | /* Runs with CPU affinity in workqueue */ | |
333 | static int mce_ring_empty(void) | |
334 | { | |
335 | struct mce_ring *r = &__get_cpu_var(mce_ring); | |
336 | ||
337 | return r->start == r->end; | |
338 | } | |
339 | ||
340 | static int mce_ring_get(unsigned long *pfn) | |
341 | { | |
342 | struct mce_ring *r; | |
343 | int ret = 0; | |
344 | ||
345 | *pfn = 0; | |
346 | get_cpu(); | |
347 | r = &__get_cpu_var(mce_ring); | |
348 | if (r->start == r->end) | |
349 | goto out; | |
350 | *pfn = r->ring[r->start]; | |
351 | r->start = (r->start + 1) % MCE_RING_SIZE; | |
352 | ret = 1; | |
353 | out: | |
354 | put_cpu(); | |
355 | return ret; | |
356 | } | |
357 | ||
358 | /* Always runs in MCE context with preempt off */ | |
359 | static int mce_ring_add(unsigned long pfn) | |
360 | { | |
361 | struct mce_ring *r = &__get_cpu_var(mce_ring); | |
362 | unsigned next; | |
363 | ||
364 | next = (r->end + 1) % MCE_RING_SIZE; | |
365 | if (next == r->start) | |
366 | return -1; | |
367 | r->ring[r->end] = pfn; | |
368 | wmb(); | |
369 | r->end = next; | |
370 | return 0; | |
371 | } | |
372 | ||
88ccbedd | 373 | int mce_available(struct cpuinfo_x86 *c) |
1da177e4 | 374 | { |
04b2b1a4 | 375 | if (mce_disabled) |
5b4408fd | 376 | return 0; |
3d1712c9 | 377 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
1da177e4 LT |
378 | } |
379 | ||
9b1beaf2 AK |
380 | static void mce_schedule_work(void) |
381 | { | |
382 | if (!mce_ring_empty()) { | |
383 | struct work_struct *work = &__get_cpu_var(mce_work); | |
384 | if (!work_pending(work)) | |
385 | schedule_work(work); | |
386 | } | |
387 | } | |
388 | ||
1b2797dc HY |
389 | /* |
390 | * Get the address of the instruction at the time of the machine check | |
391 | * error. | |
392 | */ | |
94ad8474 AK |
393 | static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) |
394 | { | |
1b2797dc HY |
395 | |
396 | if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) { | |
65ea5b03 | 397 | m->ip = regs->ip; |
94ad8474 AK |
398 | m->cs = regs->cs; |
399 | } else { | |
65ea5b03 | 400 | m->ip = 0; |
94ad8474 AK |
401 | m->cs = 0; |
402 | } | |
1b2797dc | 403 | if (rip_msr) |
5f8c1a54 | 404 | m->ip = mce_rdmsrl(rip_msr); |
94ad8474 AK |
405 | } |
406 | ||
ccc3c319 AK |
407 | #ifdef CONFIG_X86_LOCAL_APIC |
408 | /* | |
409 | * Called after interrupts have been reenabled again | |
410 | * when a MCE happened during an interrupts off region | |
411 | * in the kernel. | |
412 | */ | |
413 | asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs) | |
414 | { | |
415 | ack_APIC_irq(); | |
416 | exit_idle(); | |
417 | irq_enter(); | |
9ff36ee9 | 418 | mce_notify_irq(); |
9b1beaf2 | 419 | mce_schedule_work(); |
ccc3c319 AK |
420 | irq_exit(); |
421 | } | |
422 | #endif | |
423 | ||
424 | static void mce_report_event(struct pt_regs *regs) | |
425 | { | |
426 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { | |
9ff36ee9 | 427 | mce_notify_irq(); |
9b1beaf2 AK |
428 | /* |
429 | * Triggering the work queue here is just an insurance | |
430 | * policy in case the syscall exit notify handler | |
431 | * doesn't run soon enough or ends up running on the | |
432 | * wrong CPU (can happen when audit sleeps) | |
433 | */ | |
434 | mce_schedule_work(); | |
ccc3c319 AK |
435 | return; |
436 | } | |
437 | ||
438 | #ifdef CONFIG_X86_LOCAL_APIC | |
439 | /* | |
440 | * Without APIC do not notify. The event will be picked | |
441 | * up eventually. | |
442 | */ | |
443 | if (!cpu_has_apic) | |
444 | return; | |
445 | ||
446 | /* | |
447 | * When interrupts are disabled we cannot use | |
448 | * kernel services safely. Trigger an self interrupt | |
449 | * through the APIC to instead do the notification | |
450 | * after interrupts are reenabled again. | |
451 | */ | |
452 | apic->send_IPI_self(MCE_SELF_VECTOR); | |
453 | ||
454 | /* | |
455 | * Wait for idle afterwards again so that we don't leave the | |
456 | * APIC in a non idle state because the normal APIC writes | |
457 | * cannot exclude us. | |
458 | */ | |
459 | apic_wait_icr_idle(); | |
460 | #endif | |
461 | } | |
462 | ||
ca84f696 AK |
463 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
464 | ||
d88203d1 | 465 | /* |
b79109c3 AK |
466 | * Poll for corrected events or events that happened before reset. |
467 | * Those are just logged through /dev/mcelog. | |
468 | * | |
469 | * This is executed in standard interrupt context. | |
ed7290d0 AK |
470 | * |
471 | * Note: spec recommends to panic for fatal unsignalled | |
472 | * errors here. However this would be quite problematic -- | |
473 | * we would need to reimplement the Monarch handling and | |
474 | * it would mess up the exclusion between exception handler | |
475 | * and poll hander -- * so we skip this for now. | |
476 | * These cases should not happen anyways, or only when the CPU | |
477 | * is already totally * confused. In this case it's likely it will | |
478 | * not fully execute the machine check handler either. | |
b79109c3 | 479 | */ |
ee031c31 | 480 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
b79109c3 AK |
481 | { |
482 | struct mce m; | |
483 | int i; | |
484 | ||
ca84f696 AK |
485 | __get_cpu_var(mce_poll_count)++; |
486 | ||
b79109c3 AK |
487 | mce_setup(&m); |
488 | ||
5f8c1a54 | 489 | m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
b79109c3 | 490 | for (i = 0; i < banks; i++) { |
ee031c31 | 491 | if (!bank[i] || !test_bit(i, *b)) |
b79109c3 AK |
492 | continue; |
493 | ||
494 | m.misc = 0; | |
495 | m.addr = 0; | |
496 | m.bank = i; | |
497 | m.tsc = 0; | |
498 | ||
499 | barrier(); | |
5f8c1a54 | 500 | m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); |
b79109c3 AK |
501 | if (!(m.status & MCI_STATUS_VAL)) |
502 | continue; | |
503 | ||
504 | /* | |
ed7290d0 AK |
505 | * Uncorrected or signalled events are handled by the exception |
506 | * handler when it is enabled, so don't process those here. | |
b79109c3 AK |
507 | * |
508 | * TBD do the same check for MCI_STATUS_EN here? | |
509 | */ | |
ed7290d0 AK |
510 | if (!(flags & MCP_UC) && |
511 | (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC))) | |
b79109c3 AK |
512 | continue; |
513 | ||
514 | if (m.status & MCI_STATUS_MISCV) | |
5f8c1a54 | 515 | m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); |
b79109c3 | 516 | if (m.status & MCI_STATUS_ADDRV) |
5f8c1a54 | 517 | m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); |
b79109c3 AK |
518 | |
519 | if (!(flags & MCP_TIMESTAMP)) | |
520 | m.tsc = 0; | |
521 | /* | |
522 | * Don't get the IP here because it's unlikely to | |
523 | * have anything to do with the actual error location. | |
524 | */ | |
5679af4c AK |
525 | if (!(flags & MCP_DONTLOG)) { |
526 | mce_log(&m); | |
527 | add_taint(TAINT_MACHINE_CHECK); | |
528 | } | |
b79109c3 AK |
529 | |
530 | /* | |
531 | * Clear state for this bank. | |
532 | */ | |
5f8c1a54 | 533 | mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
b79109c3 AK |
534 | } |
535 | ||
536 | /* | |
537 | * Don't clear MCG_STATUS here because it's only defined for | |
538 | * exceptions. | |
539 | */ | |
88921be3 AK |
540 | |
541 | sync_core(); | |
b79109c3 | 542 | } |
ea149b36 | 543 | EXPORT_SYMBOL_GPL(machine_check_poll); |
b79109c3 | 544 | |
bd19a5e6 AK |
545 | /* |
546 | * Do a quick check if any of the events requires a panic. | |
547 | * This decides if we keep the events around or clear them. | |
548 | */ | |
549 | static int mce_no_way_out(struct mce *m, char **msg) | |
550 | { | |
551 | int i; | |
552 | ||
553 | for (i = 0; i < banks; i++) { | |
554 | m->status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); | |
555 | if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) | |
556 | return 1; | |
557 | } | |
558 | return 0; | |
559 | } | |
560 | ||
3c079792 AK |
561 | /* |
562 | * Variable to establish order between CPUs while scanning. | |
563 | * Each CPU spins initially until executing is equal its number. | |
564 | */ | |
565 | static atomic_t mce_executing; | |
566 | ||
567 | /* | |
568 | * Defines order of CPUs on entry. First CPU becomes Monarch. | |
569 | */ | |
570 | static atomic_t mce_callin; | |
571 | ||
572 | /* | |
573 | * Check if a timeout waiting for other CPUs happened. | |
574 | */ | |
575 | static int mce_timed_out(u64 *t) | |
576 | { | |
577 | /* | |
578 | * The others already did panic for some reason. | |
579 | * Bail out like in a timeout. | |
580 | * rmb() to tell the compiler that system_state | |
581 | * might have been modified by someone else. | |
582 | */ | |
583 | rmb(); | |
584 | if (atomic_read(&mce_paniced)) | |
585 | wait_for_panic(); | |
586 | if (!monarch_timeout) | |
587 | goto out; | |
588 | if ((s64)*t < SPINUNIT) { | |
589 | /* CHECKME: Make panic default for 1 too? */ | |
590 | if (tolerant < 1) | |
591 | mce_panic("Timeout synchronizing machine check over CPUs", | |
592 | NULL, NULL); | |
593 | cpu_missing = 1; | |
594 | return 1; | |
595 | } | |
596 | *t -= SPINUNIT; | |
597 | out: | |
598 | touch_nmi_watchdog(); | |
599 | return 0; | |
600 | } | |
601 | ||
602 | /* | |
603 | * The Monarch's reign. The Monarch is the CPU who entered | |
604 | * the machine check handler first. It waits for the others to | |
605 | * raise the exception too and then grades them. When any | |
606 | * error is fatal panic. Only then let the others continue. | |
607 | * | |
608 | * The other CPUs entering the MCE handler will be controlled by the | |
609 | * Monarch. They are called Subjects. | |
610 | * | |
611 | * This way we prevent any potential data corruption in a unrecoverable case | |
612 | * and also makes sure always all CPU's errors are examined. | |
613 | * | |
614 | * Also this detects the case of an machine check event coming from outer | |
615 | * space (not detected by any CPUs) In this case some external agent wants | |
616 | * us to shut down, so panic too. | |
617 | * | |
618 | * The other CPUs might still decide to panic if the handler happens | |
619 | * in a unrecoverable place, but in this case the system is in a semi-stable | |
620 | * state and won't corrupt anything by itself. It's ok to let the others | |
621 | * continue for a bit first. | |
622 | * | |
623 | * All the spin loops have timeouts; when a timeout happens a CPU | |
624 | * typically elects itself to be Monarch. | |
625 | */ | |
626 | static void mce_reign(void) | |
627 | { | |
628 | int cpu; | |
629 | struct mce *m = NULL; | |
630 | int global_worst = 0; | |
631 | char *msg = NULL; | |
632 | char *nmsg = NULL; | |
633 | ||
634 | /* | |
635 | * This CPU is the Monarch and the other CPUs have run | |
636 | * through their handlers. | |
637 | * Grade the severity of the errors of all the CPUs. | |
638 | */ | |
639 | for_each_possible_cpu(cpu) { | |
640 | int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant, | |
641 | &nmsg); | |
642 | if (severity > global_worst) { | |
643 | msg = nmsg; | |
644 | global_worst = severity; | |
645 | m = &per_cpu(mces_seen, cpu); | |
646 | } | |
647 | } | |
648 | ||
649 | /* | |
650 | * Cannot recover? Panic here then. | |
651 | * This dumps all the mces in the log buffer and stops the | |
652 | * other CPUs. | |
653 | */ | |
654 | if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3) | |
ac960375 | 655 | mce_panic("Fatal Machine check", m, msg); |
3c079792 AK |
656 | |
657 | /* | |
658 | * For UC somewhere we let the CPU who detects it handle it. | |
659 | * Also must let continue the others, otherwise the handling | |
660 | * CPU could deadlock on a lock. | |
661 | */ | |
662 | ||
663 | /* | |
664 | * No machine check event found. Must be some external | |
665 | * source or one CPU is hung. Panic. | |
666 | */ | |
667 | if (!m && tolerant < 3) | |
668 | mce_panic("Machine check from unknown source", NULL, NULL); | |
669 | ||
670 | /* | |
671 | * Now clear all the mces_seen so that they don't reappear on | |
672 | * the next mce. | |
673 | */ | |
674 | for_each_possible_cpu(cpu) | |
675 | memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); | |
676 | } | |
677 | ||
678 | static atomic_t global_nwo; | |
679 | ||
680 | /* | |
681 | * Start of Monarch synchronization. This waits until all CPUs have | |
682 | * entered the exception handler and then determines if any of them | |
683 | * saw a fatal event that requires panic. Then it executes them | |
684 | * in the entry order. | |
685 | * TBD double check parallel CPU hotunplug | |
686 | */ | |
687 | static int mce_start(int no_way_out, int *order) | |
688 | { | |
689 | int nwo; | |
690 | int cpus = num_online_cpus(); | |
691 | u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; | |
692 | ||
693 | if (!timeout) { | |
694 | *order = -1; | |
695 | return no_way_out; | |
696 | } | |
697 | ||
698 | atomic_add(no_way_out, &global_nwo); | |
699 | ||
700 | /* | |
701 | * Wait for everyone. | |
702 | */ | |
703 | while (atomic_read(&mce_callin) != cpus) { | |
704 | if (mce_timed_out(&timeout)) { | |
705 | atomic_set(&global_nwo, 0); | |
706 | *order = -1; | |
707 | return no_way_out; | |
708 | } | |
709 | ndelay(SPINUNIT); | |
710 | } | |
711 | ||
712 | /* | |
713 | * Cache the global no_way_out state. | |
714 | */ | |
715 | nwo = atomic_read(&global_nwo); | |
716 | ||
717 | /* | |
718 | * Monarch starts executing now, the others wait. | |
719 | */ | |
720 | if (*order == 1) { | |
721 | atomic_set(&mce_executing, 1); | |
722 | return nwo; | |
723 | } | |
724 | ||
725 | /* | |
726 | * Now start the scanning loop one by one | |
727 | * in the original callin order. | |
728 | * This way when there are any shared banks it will | |
729 | * be only seen by one CPU before cleared, avoiding duplicates. | |
730 | */ | |
731 | while (atomic_read(&mce_executing) < *order) { | |
732 | if (mce_timed_out(&timeout)) { | |
733 | atomic_set(&global_nwo, 0); | |
734 | *order = -1; | |
735 | return no_way_out; | |
736 | } | |
737 | ndelay(SPINUNIT); | |
738 | } | |
739 | return nwo; | |
740 | } | |
741 | ||
742 | /* | |
743 | * Synchronize between CPUs after main scanning loop. | |
744 | * This invokes the bulk of the Monarch processing. | |
745 | */ | |
746 | static int mce_end(int order) | |
747 | { | |
748 | int ret = -1; | |
749 | u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; | |
750 | ||
751 | if (!timeout) | |
752 | goto reset; | |
753 | if (order < 0) | |
754 | goto reset; | |
755 | ||
756 | /* | |
757 | * Allow others to run. | |
758 | */ | |
759 | atomic_inc(&mce_executing); | |
760 | ||
761 | if (order == 1) { | |
762 | /* CHECKME: Can this race with a parallel hotplug? */ | |
763 | int cpus = num_online_cpus(); | |
764 | ||
765 | /* | |
766 | * Monarch: Wait for everyone to go through their scanning | |
767 | * loops. | |
768 | */ | |
769 | while (atomic_read(&mce_executing) <= cpus) { | |
770 | if (mce_timed_out(&timeout)) | |
771 | goto reset; | |
772 | ndelay(SPINUNIT); | |
773 | } | |
774 | ||
775 | mce_reign(); | |
776 | barrier(); | |
777 | ret = 0; | |
778 | } else { | |
779 | /* | |
780 | * Subject: Wait for Monarch to finish. | |
781 | */ | |
782 | while (atomic_read(&mce_executing) != 0) { | |
783 | if (mce_timed_out(&timeout)) | |
784 | goto reset; | |
785 | ndelay(SPINUNIT); | |
786 | } | |
787 | ||
788 | /* | |
789 | * Don't reset anything. That's done by the Monarch. | |
790 | */ | |
791 | return 0; | |
792 | } | |
793 | ||
794 | /* | |
795 | * Reset all global state. | |
796 | */ | |
797 | reset: | |
798 | atomic_set(&global_nwo, 0); | |
799 | atomic_set(&mce_callin, 0); | |
800 | barrier(); | |
801 | ||
802 | /* | |
803 | * Let others run again. | |
804 | */ | |
805 | atomic_set(&mce_executing, 0); | |
806 | return ret; | |
807 | } | |
808 | ||
9b1beaf2 AK |
809 | /* |
810 | * Check if the address reported by the CPU is in a format we can parse. | |
811 | * It would be possible to add code for most other cases, but all would | |
812 | * be somewhat complicated (e.g. segment offset would require an instruction | |
813 | * parser). So only support physical addresses upto page granuality for now. | |
814 | */ | |
815 | static int mce_usable_address(struct mce *m) | |
816 | { | |
817 | if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) | |
818 | return 0; | |
819 | if ((m->misc & 0x3f) > PAGE_SHIFT) | |
820 | return 0; | |
821 | if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS) | |
822 | return 0; | |
823 | return 1; | |
824 | } | |
825 | ||
3c079792 AK |
826 | static void mce_clear_state(unsigned long *toclear) |
827 | { | |
828 | int i; | |
829 | ||
830 | for (i = 0; i < banks; i++) { | |
831 | if (test_bit(i, toclear)) | |
832 | mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | |
833 | } | |
834 | } | |
835 | ||
b79109c3 AK |
836 | /* |
837 | * The actual machine check handler. This only handles real | |
838 | * exceptions when something got corrupted coming in through int 18. | |
839 | * | |
840 | * This is executed in NMI context not subject to normal locking rules. This | |
841 | * implies that most kernel services cannot be safely used. Don't even | |
842 | * think about putting a printk in there! | |
3c079792 AK |
843 | * |
844 | * On Intel systems this is entered on all CPUs in parallel through | |
845 | * MCE broadcast. However some CPUs might be broken beyond repair, | |
846 | * so be always careful when synchronizing with others. | |
1da177e4 | 847 | */ |
e9eee03e | 848 | void do_machine_check(struct pt_regs *regs, long error_code) |
1da177e4 | 849 | { |
3c079792 | 850 | struct mce m, *final; |
1da177e4 | 851 | int i; |
3c079792 AK |
852 | int worst = 0; |
853 | int severity; | |
854 | /* | |
855 | * Establish sequential order between the CPUs entering the machine | |
856 | * check handler. | |
857 | */ | |
858 | int order; | |
859 | ||
bd78432c TH |
860 | /* |
861 | * If no_way_out gets set, there is no safe way to recover from this | |
862 | * MCE. If tolerant is cranked up, we'll try anyway. | |
863 | */ | |
864 | int no_way_out = 0; | |
865 | /* | |
866 | * If kill_it gets set, there might be a way to recover from this | |
867 | * error. | |
868 | */ | |
869 | int kill_it = 0; | |
b79109c3 | 870 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); |
bd19a5e6 | 871 | char *msg = "Unknown"; |
1da177e4 | 872 | |
553f265f AK |
873 | atomic_inc(&mce_entry); |
874 | ||
01ca79f1 AK |
875 | __get_cpu_var(mce_exception_count)++; |
876 | ||
b79109c3 | 877 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
22f5991c | 878 | 18, SIGKILL) == NOTIFY_STOP) |
32561696 | 879 | goto out; |
b79109c3 | 880 | if (!banks) |
32561696 | 881 | goto out; |
1da177e4 | 882 | |
3c079792 | 883 | order = atomic_add_return(1, &mce_callin); |
b5f2fa4e AK |
884 | mce_setup(&m); |
885 | ||
5f8c1a54 | 886 | m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
bd19a5e6 | 887 | no_way_out = mce_no_way_out(&m, &msg); |
d88203d1 | 888 | |
3c079792 AK |
889 | final = &__get_cpu_var(mces_seen); |
890 | *final = m; | |
891 | ||
1da177e4 LT |
892 | barrier(); |
893 | ||
ed7290d0 AK |
894 | /* |
895 | * When no restart IP must always kill or panic. | |
896 | */ | |
897 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | |
898 | kill_it = 1; | |
899 | ||
3c079792 AK |
900 | /* |
901 | * Go through all the banks in exclusion of the other CPUs. | |
902 | * This way we don't report duplicated events on shared banks | |
903 | * because the first one to see it will clear it. | |
904 | */ | |
905 | no_way_out = mce_start(no_way_out, &order); | |
1da177e4 | 906 | for (i = 0; i < banks; i++) { |
b79109c3 | 907 | __clear_bit(i, toclear); |
0d7482e3 | 908 | if (!bank[i]) |
1da177e4 | 909 | continue; |
d88203d1 TG |
910 | |
911 | m.misc = 0; | |
1da177e4 LT |
912 | m.addr = 0; |
913 | m.bank = i; | |
1da177e4 | 914 | |
5f8c1a54 | 915 | m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); |
1da177e4 LT |
916 | if ((m.status & MCI_STATUS_VAL) == 0) |
917 | continue; | |
918 | ||
b79109c3 | 919 | /* |
ed7290d0 AK |
920 | * Non uncorrected or non signaled errors are handled by |
921 | * machine_check_poll. Leave them alone, unless this panics. | |
b79109c3 | 922 | */ |
ed7290d0 AK |
923 | if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) && |
924 | !no_way_out) | |
b79109c3 AK |
925 | continue; |
926 | ||
927 | /* | |
928 | * Set taint even when machine check was not enabled. | |
929 | */ | |
930 | add_taint(TAINT_MACHINE_CHECK); | |
931 | ||
ed7290d0 | 932 | severity = mce_severity(&m, tolerant, NULL); |
b79109c3 | 933 | |
ed7290d0 AK |
934 | /* |
935 | * When machine check was for corrected handler don't touch, | |
936 | * unless we're panicing. | |
937 | */ | |
938 | if (severity == MCE_KEEP_SEVERITY && !no_way_out) | |
939 | continue; | |
940 | __set_bit(i, toclear); | |
941 | if (severity == MCE_NO_SEVERITY) { | |
b79109c3 AK |
942 | /* |
943 | * Machine check event was not enabled. Clear, but | |
944 | * ignore. | |
945 | */ | |
946 | continue; | |
1da177e4 LT |
947 | } |
948 | ||
ed7290d0 AK |
949 | /* |
950 | * Kill on action required. | |
951 | */ | |
952 | if (severity == MCE_AR_SEVERITY) | |
953 | kill_it = 1; | |
954 | ||
1da177e4 | 955 | if (m.status & MCI_STATUS_MISCV) |
5f8c1a54 | 956 | m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); |
1da177e4 | 957 | if (m.status & MCI_STATUS_ADDRV) |
5f8c1a54 | 958 | m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); |
1da177e4 | 959 | |
9b1beaf2 AK |
960 | /* |
961 | * Action optional error. Queue address for later processing. | |
962 | * When the ring overflows we just ignore the AO error. | |
963 | * RED-PEN add some logging mechanism when | |
964 | * usable_address or mce_add_ring fails. | |
965 | * RED-PEN don't ignore overflow for tolerant == 0 | |
966 | */ | |
967 | if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) | |
968 | mce_ring_add(m.addr >> PAGE_SHIFT); | |
969 | ||
94ad8474 | 970 | mce_get_rip(&m, regs); |
b79109c3 | 971 | mce_log(&m); |
1da177e4 | 972 | |
3c079792 AK |
973 | if (severity > worst) { |
974 | *final = m; | |
975 | worst = severity; | |
1da177e4 | 976 | } |
1da177e4 LT |
977 | } |
978 | ||
3c079792 AK |
979 | if (!no_way_out) |
980 | mce_clear_state(toclear); | |
981 | ||
e9eee03e | 982 | /* |
3c079792 AK |
983 | * Do most of the synchronization with other CPUs. |
984 | * When there's any problem use only local no_way_out state. | |
e9eee03e | 985 | */ |
3c079792 AK |
986 | if (mce_end(order) < 0) |
987 | no_way_out = worst >= MCE_PANIC_SEVERITY; | |
bd78432c TH |
988 | |
989 | /* | |
990 | * If we have decided that we just CAN'T continue, and the user | |
e9eee03e | 991 | * has not set tolerant to an insane level, give up and die. |
3c079792 AK |
992 | * |
993 | * This is mainly used in the case when the system doesn't | |
994 | * support MCE broadcasting or it has been disabled. | |
bd78432c TH |
995 | */ |
996 | if (no_way_out && tolerant < 3) | |
ac960375 | 997 | mce_panic("Fatal machine check on current CPU", final, msg); |
bd78432c TH |
998 | |
999 | /* | |
1000 | * If the error seems to be unrecoverable, something should be | |
1001 | * done. Try to kill as little as possible. If we can kill just | |
1002 | * one task, do that. If the user has set the tolerance very | |
1003 | * high, don't try to do anything at all. | |
1004 | */ | |
bd78432c | 1005 | |
ed7290d0 AK |
1006 | if (kill_it && tolerant < 3) |
1007 | force_sig(SIGBUS, current); | |
1da177e4 | 1008 | |
e02e68d3 TH |
1009 | /* notify userspace ASAP */ |
1010 | set_thread_flag(TIF_MCE_NOTIFY); | |
1011 | ||
3c079792 AK |
1012 | if (worst > 0) |
1013 | mce_report_event(regs); | |
5f8c1a54 | 1014 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
32561696 | 1015 | out: |
553f265f | 1016 | atomic_dec(&mce_entry); |
88921be3 | 1017 | sync_core(); |
1da177e4 | 1018 | } |
ea149b36 | 1019 | EXPORT_SYMBOL_GPL(do_machine_check); |
1da177e4 | 1020 | |
9b1beaf2 AK |
1021 | /* dummy to break dependency. actual code is in mm/memory-failure.c */ |
1022 | void __attribute__((weak)) memory_failure(unsigned long pfn, int vector) | |
1023 | { | |
1024 | printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn); | |
1025 | } | |
1026 | ||
1027 | /* | |
1028 | * Called after mce notification in process context. This code | |
1029 | * is allowed to sleep. Call the high level VM handler to process | |
1030 | * any corrupted pages. | |
1031 | * Assume that the work queue code only calls this one at a time | |
1032 | * per CPU. | |
1033 | * Note we don't disable preemption, so this code might run on the wrong | |
1034 | * CPU. In this case the event is picked up by the scheduled work queue. | |
1035 | * This is merely a fast path to expedite processing in some common | |
1036 | * cases. | |
1037 | */ | |
1038 | void mce_notify_process(void) | |
1039 | { | |
1040 | unsigned long pfn; | |
1041 | mce_notify_irq(); | |
1042 | while (mce_ring_get(&pfn)) | |
1043 | memory_failure(pfn, MCE_VECTOR); | |
1044 | } | |
1045 | ||
1046 | static void mce_process_work(struct work_struct *dummy) | |
1047 | { | |
1048 | mce_notify_process(); | |
1049 | } | |
1050 | ||
15d5f839 DZ |
1051 | #ifdef CONFIG_X86_MCE_INTEL |
1052 | /*** | |
1053 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog | |
676b1855 | 1054 | * @cpu: The CPU on which the event occurred. |
15d5f839 DZ |
1055 | * @status: Event status information |
1056 | * | |
1057 | * This function should be called by the thermal interrupt after the | |
1058 | * event has been processed and the decision was made to log the event | |
1059 | * further. | |
1060 | * | |
1061 | * The status parameter will be saved to the 'status' field of 'struct mce' | |
1062 | * and historically has been the register value of the | |
1063 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | |
1064 | */ | |
b5f2fa4e | 1065 | void mce_log_therm_throt_event(__u64 status) |
15d5f839 DZ |
1066 | { |
1067 | struct mce m; | |
1068 | ||
b5f2fa4e | 1069 | mce_setup(&m); |
15d5f839 DZ |
1070 | m.bank = MCE_THERMAL_BANK; |
1071 | m.status = status; | |
15d5f839 DZ |
1072 | mce_log(&m); |
1073 | } | |
1074 | #endif /* CONFIG_X86_MCE_INTEL */ | |
1075 | ||
1da177e4 | 1076 | /* |
8a336b0a TH |
1077 | * Periodic polling timer for "silent" machine check errors. If the |
1078 | * poller finds an MCE, poll 2x faster. When the poller finds no more | |
1079 | * errors, poll 2x slower (up to check_interval seconds). | |
1da177e4 | 1080 | */ |
1da177e4 | 1081 | static int check_interval = 5 * 60; /* 5 minutes */ |
e9eee03e | 1082 | |
6298c512 | 1083 | static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ |
52d168e2 | 1084 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
1da177e4 | 1085 | |
52d168e2 | 1086 | static void mcheck_timer(unsigned long data) |
1da177e4 | 1087 | { |
52d168e2 | 1088 | struct timer_list *t = &per_cpu(mce_timer, data); |
6298c512 | 1089 | int *n; |
52d168e2 AK |
1090 | |
1091 | WARN_ON(smp_processor_id() != data); | |
1092 | ||
e9eee03e | 1093 | if (mce_available(¤t_cpu_data)) { |
ee031c31 AK |
1094 | machine_check_poll(MCP_TIMESTAMP, |
1095 | &__get_cpu_var(mce_poll_banks)); | |
e9eee03e | 1096 | } |
1da177e4 LT |
1097 | |
1098 | /* | |
e02e68d3 TH |
1099 | * Alert userspace if needed. If we logged an MCE, reduce the |
1100 | * polling interval, otherwise increase the polling interval. | |
1da177e4 | 1101 | */ |
6298c512 | 1102 | n = &__get_cpu_var(next_interval); |
9ff36ee9 | 1103 | if (mce_notify_irq()) |
6298c512 | 1104 | *n = max(*n/2, HZ/100); |
14a02530 | 1105 | else |
6298c512 | 1106 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); |
e02e68d3 | 1107 | |
6298c512 | 1108 | t->expires = jiffies + *n; |
52d168e2 | 1109 | add_timer(t); |
e02e68d3 TH |
1110 | } |
1111 | ||
9bd98405 AK |
1112 | static void mce_do_trigger(struct work_struct *work) |
1113 | { | |
1114 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | |
1115 | } | |
1116 | ||
1117 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | |
1118 | ||
e02e68d3 | 1119 | /* |
9bd98405 AK |
1120 | * Notify the user(s) about new machine check events. |
1121 | * Can be called from interrupt context, but not from machine check/NMI | |
1122 | * context. | |
e02e68d3 | 1123 | */ |
9ff36ee9 | 1124 | int mce_notify_irq(void) |
e02e68d3 | 1125 | { |
8457c84d AK |
1126 | /* Not more than two messages every minute */ |
1127 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | |
1128 | ||
e02e68d3 | 1129 | clear_thread_flag(TIF_MCE_NOTIFY); |
e9eee03e | 1130 | |
e02e68d3 | 1131 | if (test_and_clear_bit(0, ¬ify_user)) { |
e02e68d3 | 1132 | wake_up_interruptible(&mce_wait); |
9bd98405 AK |
1133 | |
1134 | /* | |
1135 | * There is no risk of missing notifications because | |
1136 | * work_pending is always cleared before the function is | |
1137 | * executed. | |
1138 | */ | |
1139 | if (trigger[0] && !work_pending(&mce_trigger_work)) | |
1140 | schedule_work(&mce_trigger_work); | |
e02e68d3 | 1141 | |
8457c84d | 1142 | if (__ratelimit(&ratelimit)) |
8a336b0a | 1143 | printk(KERN_INFO "Machine check events logged\n"); |
e02e68d3 TH |
1144 | |
1145 | return 1; | |
1da177e4 | 1146 | } |
e02e68d3 TH |
1147 | return 0; |
1148 | } | |
9ff36ee9 | 1149 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
8a336b0a | 1150 | |
d88203d1 | 1151 | /* |
1da177e4 LT |
1152 | * Initialize Machine Checks for a CPU. |
1153 | */ | |
0d7482e3 | 1154 | static int mce_cap_init(void) |
1da177e4 | 1155 | { |
0d7482e3 | 1156 | unsigned b; |
e9eee03e | 1157 | u64 cap; |
1da177e4 LT |
1158 | |
1159 | rdmsrl(MSR_IA32_MCG_CAP, cap); | |
01c6680a TG |
1160 | |
1161 | b = cap & MCG_BANKCNT_MASK; | |
b659294b IM |
1162 | printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b); |
1163 | ||
0d7482e3 AK |
1164 | if (b > MAX_NR_BANKS) { |
1165 | printk(KERN_WARNING | |
1166 | "MCE: Using only %u machine check banks out of %u\n", | |
1167 | MAX_NR_BANKS, b); | |
1168 | b = MAX_NR_BANKS; | |
1169 | } | |
1170 | ||
1171 | /* Don't support asymmetric configurations today */ | |
1172 | WARN_ON(banks != 0 && b != banks); | |
1173 | banks = b; | |
1174 | if (!bank) { | |
1175 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | |
1176 | if (!bank) | |
1177 | return -ENOMEM; | |
1178 | memset(bank, 0xff, banks * sizeof(u64)); | |
1da177e4 | 1179 | } |
0d7482e3 | 1180 | |
94ad8474 | 1181 | /* Use accurate RIP reporting if available. */ |
01c6680a | 1182 | if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) |
94ad8474 | 1183 | rip_msr = MSR_IA32_MCG_EIP; |
1da177e4 | 1184 | |
ed7290d0 AK |
1185 | if (cap & MCG_SER_P) |
1186 | mce_ser = 1; | |
1187 | ||
0d7482e3 AK |
1188 | return 0; |
1189 | } | |
1190 | ||
8be91105 | 1191 | static void mce_init(void) |
0d7482e3 | 1192 | { |
e9eee03e | 1193 | mce_banks_t all_banks; |
0d7482e3 AK |
1194 | u64 cap; |
1195 | int i; | |
1196 | ||
b79109c3 AK |
1197 | /* |
1198 | * Log the machine checks left over from the previous reset. | |
1199 | */ | |
ee031c31 | 1200 | bitmap_fill(all_banks, MAX_NR_BANKS); |
5679af4c | 1201 | machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); |
1da177e4 LT |
1202 | |
1203 | set_in_cr4(X86_CR4_MCE); | |
1204 | ||
0d7482e3 | 1205 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
1da177e4 LT |
1206 | if (cap & MCG_CTL_P) |
1207 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | |
1208 | ||
1209 | for (i = 0; i < banks; i++) { | |
06b7a7a5 AK |
1210 | if (skip_bank_init(i)) |
1211 | continue; | |
0d7482e3 | 1212 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); |
1da177e4 | 1213 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
d88203d1 | 1214 | } |
1da177e4 LT |
1215 | } |
1216 | ||
1217 | /* Add per CPU specific workarounds here */ | |
ec5b3d32 | 1218 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) |
d88203d1 | 1219 | { |
1da177e4 | 1220 | /* This should be disabled by the BIOS, but isn't always */ |
911f6a7b | 1221 | if (c->x86_vendor == X86_VENDOR_AMD) { |
e9eee03e IM |
1222 | if (c->x86 == 15 && banks > 4) { |
1223 | /* | |
1224 | * disable GART TBL walk error reporting, which | |
1225 | * trips off incorrectly with the IOMMU & 3ware | |
1226 | * & Cerberus: | |
1227 | */ | |
0d7482e3 | 1228 | clear_bit(10, (unsigned long *)&bank[4]); |
e9eee03e IM |
1229 | } |
1230 | if (c->x86 <= 17 && mce_bootlog < 0) { | |
1231 | /* | |
1232 | * Lots of broken BIOS around that don't clear them | |
1233 | * by default and leave crap in there. Don't log: | |
1234 | */ | |
911f6a7b | 1235 | mce_bootlog = 0; |
e9eee03e | 1236 | } |
2e6f694f AK |
1237 | /* |
1238 | * Various K7s with broken bank 0 around. Always disable | |
1239 | * by default. | |
1240 | */ | |
1241 | if (c->x86 == 6) | |
1242 | bank[0] = 0; | |
1da177e4 | 1243 | } |
e583538f | 1244 | |
06b7a7a5 AK |
1245 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
1246 | /* | |
1247 | * SDM documents that on family 6 bank 0 should not be written | |
1248 | * because it aliases to another special BIOS controlled | |
1249 | * register. | |
1250 | * But it's not aliased anymore on model 0x1a+ | |
1251 | * Don't ignore bank 0 completely because there could be a | |
1252 | * valid event later, merely don't write CTL0. | |
1253 | */ | |
1254 | ||
1255 | if (c->x86 == 6 && c->x86_model < 0x1A) | |
1256 | __set_bit(0, &dont_init_banks); | |
3c079792 AK |
1257 | |
1258 | /* | |
1259 | * All newer Intel systems support MCE broadcasting. Enable | |
1260 | * synchronization with a one second timeout. | |
1261 | */ | |
1262 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && | |
1263 | monarch_timeout < 0) | |
1264 | monarch_timeout = USEC_PER_SEC; | |
06b7a7a5 | 1265 | } |
3c079792 AK |
1266 | if (monarch_timeout < 0) |
1267 | monarch_timeout = 0; | |
29b0f591 AK |
1268 | if (mce_bootlog != 0) |
1269 | mce_panic_timeout = 30; | |
d88203d1 | 1270 | } |
1da177e4 | 1271 | |
4efc0670 AK |
1272 | static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) |
1273 | { | |
1274 | if (c->x86 != 5) | |
1275 | return; | |
1276 | switch (c->x86_vendor) { | |
1277 | case X86_VENDOR_INTEL: | |
1278 | if (mce_p5_enabled()) | |
1279 | intel_p5_mcheck_init(c); | |
1280 | break; | |
1281 | case X86_VENDOR_CENTAUR: | |
1282 | winchip_mcheck_init(c); | |
1283 | break; | |
1284 | } | |
1285 | } | |
1286 | ||
cc3ca220 | 1287 | static void mce_cpu_features(struct cpuinfo_x86 *c) |
1da177e4 LT |
1288 | { |
1289 | switch (c->x86_vendor) { | |
1290 | case X86_VENDOR_INTEL: | |
1291 | mce_intel_feature_init(c); | |
1292 | break; | |
89b831ef JS |
1293 | case X86_VENDOR_AMD: |
1294 | mce_amd_feature_init(c); | |
1295 | break; | |
1da177e4 LT |
1296 | default: |
1297 | break; | |
1298 | } | |
1299 | } | |
1300 | ||
52d168e2 AK |
1301 | static void mce_init_timer(void) |
1302 | { | |
1303 | struct timer_list *t = &__get_cpu_var(mce_timer); | |
6298c512 | 1304 | int *n = &__get_cpu_var(next_interval); |
52d168e2 | 1305 | |
6298c512 AK |
1306 | *n = check_interval * HZ; |
1307 | if (!*n) | |
52d168e2 AK |
1308 | return; |
1309 | setup_timer(t, mcheck_timer, smp_processor_id()); | |
6298c512 | 1310 | t->expires = round_jiffies(jiffies + *n); |
52d168e2 AK |
1311 | add_timer(t); |
1312 | } | |
1313 | ||
d88203d1 | 1314 | /* |
1da177e4 | 1315 | * Called for each booted CPU to set up machine checks. |
e9eee03e | 1316 | * Must be called with preempt off: |
1da177e4 | 1317 | */ |
e6982c67 | 1318 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
1da177e4 | 1319 | { |
4efc0670 AK |
1320 | if (mce_disabled) |
1321 | return; | |
1322 | ||
1323 | mce_ancient_init(c); | |
1324 | ||
5b4408fd | 1325 | if (!mce_available(c)) |
1da177e4 LT |
1326 | return; |
1327 | ||
0d7482e3 | 1328 | if (mce_cap_init() < 0) { |
04b2b1a4 | 1329 | mce_disabled = 1; |
0d7482e3 AK |
1330 | return; |
1331 | } | |
1332 | mce_cpu_quirks(c); | |
1333 | ||
5d727926 AK |
1334 | machine_check_vector = do_machine_check; |
1335 | ||
8be91105 | 1336 | mce_init(); |
1da177e4 | 1337 | mce_cpu_features(c); |
52d168e2 | 1338 | mce_init_timer(); |
9b1beaf2 | 1339 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); |
1da177e4 LT |
1340 | } |
1341 | ||
1342 | /* | |
1343 | * Character device to read and clear the MCE log. | |
1344 | */ | |
1345 | ||
f528e7ba | 1346 | static DEFINE_SPINLOCK(mce_state_lock); |
e9eee03e IM |
1347 | static int open_count; /* #times opened */ |
1348 | static int open_exclu; /* already open exclusive? */ | |
f528e7ba TH |
1349 | |
1350 | static int mce_open(struct inode *inode, struct file *file) | |
1351 | { | |
1352 | spin_lock(&mce_state_lock); | |
1353 | ||
1354 | if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { | |
1355 | spin_unlock(&mce_state_lock); | |
e9eee03e | 1356 | |
f528e7ba TH |
1357 | return -EBUSY; |
1358 | } | |
1359 | ||
1360 | if (file->f_flags & O_EXCL) | |
1361 | open_exclu = 1; | |
1362 | open_count++; | |
1363 | ||
1364 | spin_unlock(&mce_state_lock); | |
1365 | ||
bd78432c | 1366 | return nonseekable_open(inode, file); |
f528e7ba TH |
1367 | } |
1368 | ||
1369 | static int mce_release(struct inode *inode, struct file *file) | |
1370 | { | |
1371 | spin_lock(&mce_state_lock); | |
1372 | ||
1373 | open_count--; | |
1374 | open_exclu = 0; | |
1375 | ||
1376 | spin_unlock(&mce_state_lock); | |
1377 | ||
1378 | return 0; | |
1379 | } | |
1380 | ||
d88203d1 TG |
1381 | static void collect_tscs(void *data) |
1382 | { | |
1da177e4 | 1383 | unsigned long *cpu_tsc = (unsigned long *)data; |
d88203d1 | 1384 | |
1da177e4 | 1385 | rdtscll(cpu_tsc[smp_processor_id()]); |
d88203d1 | 1386 | } |
1da177e4 | 1387 | |
e9eee03e IM |
1388 | static DEFINE_MUTEX(mce_read_mutex); |
1389 | ||
d88203d1 TG |
1390 | static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, |
1391 | loff_t *off) | |
1da177e4 | 1392 | { |
e9eee03e | 1393 | char __user *buf = ubuf; |
f0de53bb | 1394 | unsigned long *cpu_tsc; |
ef41df43 | 1395 | unsigned prev, next; |
1da177e4 LT |
1396 | int i, err; |
1397 | ||
6bca67f9 | 1398 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); |
f0de53bb AK |
1399 | if (!cpu_tsc) |
1400 | return -ENOMEM; | |
1401 | ||
8c8b8859 | 1402 | mutex_lock(&mce_read_mutex); |
1da177e4 LT |
1403 | next = rcu_dereference(mcelog.next); |
1404 | ||
1405 | /* Only supports full reads right now */ | |
d88203d1 | 1406 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { |
8c8b8859 | 1407 | mutex_unlock(&mce_read_mutex); |
f0de53bb | 1408 | kfree(cpu_tsc); |
e9eee03e | 1409 | |
1da177e4 LT |
1410 | return -EINVAL; |
1411 | } | |
1412 | ||
1413 | err = 0; | |
ef41df43 HY |
1414 | prev = 0; |
1415 | do { | |
1416 | for (i = prev; i < next; i++) { | |
1417 | unsigned long start = jiffies; | |
1418 | ||
1419 | while (!mcelog.entry[i].finished) { | |
1420 | if (time_after_eq(jiffies, start + 2)) { | |
1421 | memset(mcelog.entry + i, 0, | |
1422 | sizeof(struct mce)); | |
1423 | goto timeout; | |
1424 | } | |
1425 | cpu_relax(); | |
673242c1 | 1426 | } |
ef41df43 HY |
1427 | smp_rmb(); |
1428 | err |= copy_to_user(buf, mcelog.entry + i, | |
1429 | sizeof(struct mce)); | |
1430 | buf += sizeof(struct mce); | |
1431 | timeout: | |
1432 | ; | |
673242c1 | 1433 | } |
1da177e4 | 1434 | |
ef41df43 HY |
1435 | memset(mcelog.entry + prev, 0, |
1436 | (next - prev) * sizeof(struct mce)); | |
1437 | prev = next; | |
1438 | next = cmpxchg(&mcelog.next, prev, 0); | |
1439 | } while (next != prev); | |
1da177e4 | 1440 | |
b2b18660 | 1441 | synchronize_sched(); |
1da177e4 | 1442 | |
d88203d1 TG |
1443 | /* |
1444 | * Collect entries that were still getting written before the | |
1445 | * synchronize. | |
1446 | */ | |
15c8b6c1 | 1447 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
e9eee03e | 1448 | |
d88203d1 TG |
1449 | for (i = next; i < MCE_LOG_LEN; i++) { |
1450 | if (mcelog.entry[i].finished && | |
1451 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | |
1452 | err |= copy_to_user(buf, mcelog.entry+i, | |
1453 | sizeof(struct mce)); | |
1da177e4 LT |
1454 | smp_rmb(); |
1455 | buf += sizeof(struct mce); | |
1456 | memset(&mcelog.entry[i], 0, sizeof(struct mce)); | |
1457 | } | |
d88203d1 | 1458 | } |
8c8b8859 | 1459 | mutex_unlock(&mce_read_mutex); |
f0de53bb | 1460 | kfree(cpu_tsc); |
e9eee03e | 1461 | |
d88203d1 | 1462 | return err ? -EFAULT : buf - ubuf; |
1da177e4 LT |
1463 | } |
1464 | ||
e02e68d3 TH |
1465 | static unsigned int mce_poll(struct file *file, poll_table *wait) |
1466 | { | |
1467 | poll_wait(file, &mce_wait, wait); | |
1468 | if (rcu_dereference(mcelog.next)) | |
1469 | return POLLIN | POLLRDNORM; | |
1470 | return 0; | |
1471 | } | |
1472 | ||
c68461b6 | 1473 | static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
1da177e4 LT |
1474 | { |
1475 | int __user *p = (int __user *)arg; | |
d88203d1 | 1476 | |
1da177e4 | 1477 | if (!capable(CAP_SYS_ADMIN)) |
d88203d1 | 1478 | return -EPERM; |
e9eee03e | 1479 | |
1da177e4 | 1480 | switch (cmd) { |
d88203d1 | 1481 | case MCE_GET_RECORD_LEN: |
1da177e4 LT |
1482 | return put_user(sizeof(struct mce), p); |
1483 | case MCE_GET_LOG_LEN: | |
d88203d1 | 1484 | return put_user(MCE_LOG_LEN, p); |
1da177e4 LT |
1485 | case MCE_GETCLEAR_FLAGS: { |
1486 | unsigned flags; | |
d88203d1 TG |
1487 | |
1488 | do { | |
1da177e4 | 1489 | flags = mcelog.flags; |
d88203d1 | 1490 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); |
e9eee03e | 1491 | |
d88203d1 | 1492 | return put_user(flags, p); |
1da177e4 LT |
1493 | } |
1494 | default: | |
d88203d1 TG |
1495 | return -ENOTTY; |
1496 | } | |
1da177e4 LT |
1497 | } |
1498 | ||
a1ff41bf | 1499 | /* Modified in mce-inject.c, so not static or const */ |
ea149b36 | 1500 | struct file_operations mce_chrdev_ops = { |
e9eee03e IM |
1501 | .open = mce_open, |
1502 | .release = mce_release, | |
1503 | .read = mce_read, | |
1504 | .poll = mce_poll, | |
1505 | .unlocked_ioctl = mce_ioctl, | |
1da177e4 | 1506 | }; |
ea149b36 | 1507 | EXPORT_SYMBOL_GPL(mce_chrdev_ops); |
1da177e4 LT |
1508 | |
1509 | static struct miscdevice mce_log_device = { | |
1510 | MISC_MCELOG_MINOR, | |
1511 | "mcelog", | |
1512 | &mce_chrdev_ops, | |
1513 | }; | |
1514 | ||
13503fa9 HS |
1515 | /* |
1516 | * mce=off disables machine check | |
3c079792 AK |
1517 | * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) |
1518 | * monarchtimeout is how long to wait for other CPUs on machine | |
1519 | * check, or 0 to not wait | |
13503fa9 HS |
1520 | * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
1521 | * mce=nobootlog Don't log MCEs from before booting. | |
1522 | */ | |
1da177e4 LT |
1523 | static int __init mcheck_enable(char *str) |
1524 | { | |
4efc0670 AK |
1525 | if (*str == 0) |
1526 | enable_p5_mce(); | |
1527 | if (*str == '=') | |
1528 | str++; | |
1da177e4 | 1529 | if (!strcmp(str, "off")) |
04b2b1a4 | 1530 | mce_disabled = 1; |
13503fa9 HS |
1531 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) |
1532 | mce_bootlog = (str[0] == 'b'); | |
3c079792 | 1533 | else if (isdigit(str[0])) { |
8c566ef5 | 1534 | get_option(&str, &tolerant); |
3c079792 AK |
1535 | if (*str == ',') { |
1536 | ++str; | |
1537 | get_option(&str, &monarch_timeout); | |
1538 | } | |
1539 | } else { | |
4efc0670 | 1540 | printk(KERN_INFO "mce argument %s ignored. Please use /sys\n", |
13503fa9 HS |
1541 | str); |
1542 | return 0; | |
1543 | } | |
9b41046c | 1544 | return 1; |
1da177e4 | 1545 | } |
4efc0670 | 1546 | __setup("mce", mcheck_enable); |
1da177e4 | 1547 | |
d88203d1 | 1548 | /* |
1da177e4 | 1549 | * Sysfs support |
d88203d1 | 1550 | */ |
1da177e4 | 1551 | |
973a2dd1 AK |
1552 | /* |
1553 | * Disable machine checks on suspend and shutdown. We can't really handle | |
1554 | * them later. | |
1555 | */ | |
1556 | static int mce_disable(void) | |
1557 | { | |
1558 | int i; | |
1559 | ||
06b7a7a5 AK |
1560 | for (i = 0; i < banks; i++) { |
1561 | if (!skip_bank_init(i)) | |
1562 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | |
1563 | } | |
973a2dd1 AK |
1564 | return 0; |
1565 | } | |
1566 | ||
1567 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | |
1568 | { | |
1569 | return mce_disable(); | |
1570 | } | |
1571 | ||
1572 | static int mce_shutdown(struct sys_device *dev) | |
1573 | { | |
1574 | return mce_disable(); | |
1575 | } | |
1576 | ||
e9eee03e IM |
1577 | /* |
1578 | * On resume clear all MCE state. Don't want to see leftovers from the BIOS. | |
1579 | * Only one CPU is active at this time, the others get re-added later using | |
1580 | * CPU hotplug: | |
1581 | */ | |
1da177e4 LT |
1582 | static int mce_resume(struct sys_device *dev) |
1583 | { | |
8be91105 | 1584 | mce_init(); |
6ec68bff | 1585 | mce_cpu_features(¤t_cpu_data); |
e9eee03e | 1586 | |
1da177e4 LT |
1587 | return 0; |
1588 | } | |
1589 | ||
52d168e2 AK |
1590 | static void mce_cpu_restart(void *data) |
1591 | { | |
1592 | del_timer_sync(&__get_cpu_var(mce_timer)); | |
1593 | if (mce_available(¤t_cpu_data)) | |
8be91105 | 1594 | mce_init(); |
52d168e2 AK |
1595 | mce_init_timer(); |
1596 | } | |
1597 | ||
1da177e4 | 1598 | /* Reinit MCEs after user configuration changes */ |
d88203d1 TG |
1599 | static void mce_restart(void) |
1600 | { | |
52d168e2 | 1601 | on_each_cpu(mce_cpu_restart, NULL, 1); |
1da177e4 LT |
1602 | } |
1603 | ||
1604 | static struct sysdev_class mce_sysclass = { | |
e9eee03e IM |
1605 | .suspend = mce_suspend, |
1606 | .shutdown = mce_shutdown, | |
1607 | .resume = mce_resume, | |
1608 | .name = "machinecheck", | |
1da177e4 LT |
1609 | }; |
1610 | ||
cb491fca | 1611 | DEFINE_PER_CPU(struct sys_device, mce_dev); |
e9eee03e IM |
1612 | |
1613 | __cpuinitdata | |
1614 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | |
1da177e4 | 1615 | |
0d7482e3 AK |
1616 | static struct sysdev_attribute *bank_attrs; |
1617 | ||
1618 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, | |
1619 | char *buf) | |
1620 | { | |
1621 | u64 b = bank[attr - bank_attrs]; | |
e9eee03e | 1622 | |
f6d1826d | 1623 | return sprintf(buf, "%llx\n", b); |
0d7482e3 AK |
1624 | } |
1625 | ||
1626 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, | |
9319cec8 | 1627 | const char *buf, size_t size) |
0d7482e3 | 1628 | { |
9319cec8 | 1629 | u64 new; |
e9eee03e | 1630 | |
9319cec8 | 1631 | if (strict_strtoull(buf, 0, &new) < 0) |
0d7482e3 | 1632 | return -EINVAL; |
e9eee03e | 1633 | |
0d7482e3 AK |
1634 | bank[attr - bank_attrs] = new; |
1635 | mce_restart(); | |
e9eee03e | 1636 | |
9319cec8 | 1637 | return size; |
0d7482e3 | 1638 | } |
a98f0dd3 | 1639 | |
e9eee03e IM |
1640 | static ssize_t |
1641 | show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf) | |
a98f0dd3 AK |
1642 | { |
1643 | strcpy(buf, trigger); | |
1644 | strcat(buf, "\n"); | |
1645 | return strlen(trigger) + 1; | |
1646 | } | |
1647 | ||
4a0b2b4d | 1648 | static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, |
e9eee03e | 1649 | const char *buf, size_t siz) |
a98f0dd3 AK |
1650 | { |
1651 | char *p; | |
1652 | int len; | |
e9eee03e | 1653 | |
a98f0dd3 AK |
1654 | strncpy(trigger, buf, sizeof(trigger)); |
1655 | trigger[sizeof(trigger)-1] = 0; | |
1656 | len = strlen(trigger); | |
1657 | p = strchr(trigger, '\n'); | |
e9eee03e IM |
1658 | |
1659 | if (*p) | |
1660 | *p = 0; | |
1661 | ||
a98f0dd3 AK |
1662 | return len; |
1663 | } | |
1664 | ||
b56f642d AK |
1665 | static ssize_t store_int_with_restart(struct sys_device *s, |
1666 | struct sysdev_attribute *attr, | |
1667 | const char *buf, size_t size) | |
1668 | { | |
1669 | ssize_t ret = sysdev_store_int(s, attr, buf, size); | |
1670 | mce_restart(); | |
1671 | return ret; | |
1672 | } | |
1673 | ||
a98f0dd3 | 1674 | static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); |
d95d62c0 | 1675 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); |
3c079792 | 1676 | static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout); |
e9eee03e | 1677 | |
b56f642d AK |
1678 | static struct sysdev_ext_attribute attr_check_interval = { |
1679 | _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int, | |
1680 | store_int_with_restart), | |
1681 | &check_interval | |
1682 | }; | |
e9eee03e | 1683 | |
cb491fca | 1684 | static struct sysdev_attribute *mce_attrs[] = { |
b56f642d | 1685 | &attr_tolerant.attr, &attr_check_interval.attr, &attr_trigger, |
3c079792 | 1686 | &attr_monarch_timeout.attr, |
a98f0dd3 AK |
1687 | NULL |
1688 | }; | |
1da177e4 | 1689 | |
cb491fca | 1690 | static cpumask_var_t mce_dev_initialized; |
bae19fe0 | 1691 | |
e9eee03e | 1692 | /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */ |
91c6d400 | 1693 | static __cpuinit int mce_create_device(unsigned int cpu) |
1da177e4 LT |
1694 | { |
1695 | int err; | |
73ca5358 | 1696 | int i; |
92cb7612 | 1697 | |
90367556 | 1698 | if (!mce_available(&boot_cpu_data)) |
91c6d400 AK |
1699 | return -EIO; |
1700 | ||
cb491fca IM |
1701 | memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject)); |
1702 | per_cpu(mce_dev, cpu).id = cpu; | |
1703 | per_cpu(mce_dev, cpu).cls = &mce_sysclass; | |
91c6d400 | 1704 | |
cb491fca | 1705 | err = sysdev_register(&per_cpu(mce_dev, cpu)); |
d435d862 AM |
1706 | if (err) |
1707 | return err; | |
1708 | ||
cb491fca IM |
1709 | for (i = 0; mce_attrs[i]; i++) { |
1710 | err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); | |
d435d862 AM |
1711 | if (err) |
1712 | goto error; | |
1713 | } | |
0d7482e3 | 1714 | for (i = 0; i < banks; i++) { |
cb491fca | 1715 | err = sysdev_create_file(&per_cpu(mce_dev, cpu), |
0d7482e3 AK |
1716 | &bank_attrs[i]); |
1717 | if (err) | |
1718 | goto error2; | |
1719 | } | |
cb491fca | 1720 | cpumask_set_cpu(cpu, mce_dev_initialized); |
91c6d400 | 1721 | |
d435d862 | 1722 | return 0; |
0d7482e3 | 1723 | error2: |
cb491fca IM |
1724 | while (--i >= 0) |
1725 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]); | |
d435d862 | 1726 | error: |
cb491fca IM |
1727 | while (--i >= 0) |
1728 | sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); | |
1729 | ||
1730 | sysdev_unregister(&per_cpu(mce_dev, cpu)); | |
d435d862 | 1731 | |
91c6d400 AK |
1732 | return err; |
1733 | } | |
1734 | ||
2d9cd6c2 | 1735 | static __cpuinit void mce_remove_device(unsigned int cpu) |
91c6d400 | 1736 | { |
73ca5358 SL |
1737 | int i; |
1738 | ||
cb491fca | 1739 | if (!cpumask_test_cpu(cpu, mce_dev_initialized)) |
bae19fe0 AH |
1740 | return; |
1741 | ||
cb491fca IM |
1742 | for (i = 0; mce_attrs[i]; i++) |
1743 | sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); | |
1744 | ||
0d7482e3 | 1745 | for (i = 0; i < banks; i++) |
cb491fca IM |
1746 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]); |
1747 | ||
1748 | sysdev_unregister(&per_cpu(mce_dev, cpu)); | |
1749 | cpumask_clear_cpu(cpu, mce_dev_initialized); | |
91c6d400 | 1750 | } |
91c6d400 | 1751 | |
d6b75584 | 1752 | /* Make sure there are no machine checks on offlined CPUs. */ |
ec5b3d32 | 1753 | static void mce_disable_cpu(void *h) |
d6b75584 | 1754 | { |
88ccbedd | 1755 | unsigned long action = *(unsigned long *)h; |
cb491fca | 1756 | int i; |
d6b75584 AK |
1757 | |
1758 | if (!mce_available(¤t_cpu_data)) | |
1759 | return; | |
88ccbedd AK |
1760 | if (!(action & CPU_TASKS_FROZEN)) |
1761 | cmci_clear(); | |
06b7a7a5 AK |
1762 | for (i = 0; i < banks; i++) { |
1763 | if (!skip_bank_init(i)) | |
1764 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | |
1765 | } | |
d6b75584 AK |
1766 | } |
1767 | ||
ec5b3d32 | 1768 | static void mce_reenable_cpu(void *h) |
d6b75584 | 1769 | { |
88ccbedd | 1770 | unsigned long action = *(unsigned long *)h; |
e9eee03e | 1771 | int i; |
d6b75584 AK |
1772 | |
1773 | if (!mce_available(¤t_cpu_data)) | |
1774 | return; | |
e9eee03e | 1775 | |
88ccbedd AK |
1776 | if (!(action & CPU_TASKS_FROZEN)) |
1777 | cmci_reenable(); | |
06b7a7a5 AK |
1778 | for (i = 0; i < banks; i++) { |
1779 | if (!skip_bank_init(i)) | |
1780 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | |
1781 | } | |
d6b75584 AK |
1782 | } |
1783 | ||
91c6d400 | 1784 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
e9eee03e IM |
1785 | static int __cpuinit |
1786 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |
91c6d400 AK |
1787 | { |
1788 | unsigned int cpu = (unsigned long)hcpu; | |
52d168e2 | 1789 | struct timer_list *t = &per_cpu(mce_timer, cpu); |
91c6d400 AK |
1790 | |
1791 | switch (action) { | |
bae19fe0 AH |
1792 | case CPU_ONLINE: |
1793 | case CPU_ONLINE_FROZEN: | |
1794 | mce_create_device(cpu); | |
8735728e RW |
1795 | if (threshold_cpu_callback) |
1796 | threshold_cpu_callback(action, cpu); | |
91c6d400 | 1797 | break; |
91c6d400 | 1798 | case CPU_DEAD: |
8bb78442 | 1799 | case CPU_DEAD_FROZEN: |
8735728e RW |
1800 | if (threshold_cpu_callback) |
1801 | threshold_cpu_callback(action, cpu); | |
91c6d400 AK |
1802 | mce_remove_device(cpu); |
1803 | break; | |
52d168e2 AK |
1804 | case CPU_DOWN_PREPARE: |
1805 | case CPU_DOWN_PREPARE_FROZEN: | |
1806 | del_timer_sync(t); | |
88ccbedd | 1807 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); |
52d168e2 AK |
1808 | break; |
1809 | case CPU_DOWN_FAILED: | |
1810 | case CPU_DOWN_FAILED_FROZEN: | |
6298c512 AK |
1811 | t->expires = round_jiffies(jiffies + |
1812 | __get_cpu_var(next_interval)); | |
52d168e2 | 1813 | add_timer_on(t, cpu); |
88ccbedd AK |
1814 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
1815 | break; | |
1816 | case CPU_POST_DEAD: | |
1817 | /* intentionally ignoring frozen here */ | |
1818 | cmci_rediscover(cpu); | |
52d168e2 | 1819 | break; |
91c6d400 | 1820 | } |
bae19fe0 | 1821 | return NOTIFY_OK; |
91c6d400 AK |
1822 | } |
1823 | ||
1e35669d | 1824 | static struct notifier_block mce_cpu_notifier __cpuinitdata = { |
91c6d400 AK |
1825 | .notifier_call = mce_cpu_callback, |
1826 | }; | |
1827 | ||
0d7482e3 AK |
1828 | static __init int mce_init_banks(void) |
1829 | { | |
1830 | int i; | |
1831 | ||
1832 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | |
1833 | GFP_KERNEL); | |
1834 | if (!bank_attrs) | |
1835 | return -ENOMEM; | |
1836 | ||
1837 | for (i = 0; i < banks; i++) { | |
1838 | struct sysdev_attribute *a = &bank_attrs[i]; | |
e9eee03e IM |
1839 | |
1840 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | |
0d7482e3 AK |
1841 | if (!a->attr.name) |
1842 | goto nomem; | |
e9eee03e IM |
1843 | |
1844 | a->attr.mode = 0644; | |
1845 | a->show = show_bank; | |
1846 | a->store = set_bank; | |
0d7482e3 AK |
1847 | } |
1848 | return 0; | |
1849 | ||
1850 | nomem: | |
1851 | while (--i >= 0) | |
1852 | kfree(bank_attrs[i].attr.name); | |
1853 | kfree(bank_attrs); | |
1854 | bank_attrs = NULL; | |
e9eee03e | 1855 | |
0d7482e3 AK |
1856 | return -ENOMEM; |
1857 | } | |
1858 | ||
91c6d400 AK |
1859 | static __init int mce_init_device(void) |
1860 | { | |
1861 | int err; | |
1862 | int i = 0; | |
1863 | ||
1da177e4 LT |
1864 | if (!mce_available(&boot_cpu_data)) |
1865 | return -EIO; | |
0d7482e3 | 1866 | |
cb491fca | 1867 | alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL); |
996867d0 | 1868 | |
0d7482e3 AK |
1869 | err = mce_init_banks(); |
1870 | if (err) | |
1871 | return err; | |
1872 | ||
1da177e4 | 1873 | err = sysdev_class_register(&mce_sysclass); |
d435d862 AM |
1874 | if (err) |
1875 | return err; | |
91c6d400 AK |
1876 | |
1877 | for_each_online_cpu(i) { | |
d435d862 AM |
1878 | err = mce_create_device(i); |
1879 | if (err) | |
1880 | return err; | |
91c6d400 AK |
1881 | } |
1882 | ||
be6b5a35 | 1883 | register_hotcpu_notifier(&mce_cpu_notifier); |
1da177e4 | 1884 | misc_register(&mce_log_device); |
e9eee03e | 1885 | |
1da177e4 | 1886 | return err; |
1da177e4 | 1887 | } |
91c6d400 | 1888 | |
1da177e4 | 1889 | device_initcall(mce_init_device); |
a988d334 | 1890 | |
4efc0670 | 1891 | #else /* CONFIG_X86_OLD_MCE: */ |
a988d334 | 1892 | |
a988d334 IM |
1893 | int nr_mce_banks; |
1894 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ | |
1895 | ||
a988d334 IM |
1896 | /* This has to be run for each processor */ |
1897 | void mcheck_init(struct cpuinfo_x86 *c) | |
1898 | { | |
1899 | if (mce_disabled == 1) | |
1900 | return; | |
1901 | ||
1902 | switch (c->x86_vendor) { | |
1903 | case X86_VENDOR_AMD: | |
1904 | amd_mcheck_init(c); | |
1905 | break; | |
1906 | ||
1907 | case X86_VENDOR_INTEL: | |
1908 | if (c->x86 == 5) | |
1909 | intel_p5_mcheck_init(c); | |
1910 | if (c->x86 == 6) | |
1911 | intel_p6_mcheck_init(c); | |
1912 | if (c->x86 == 15) | |
1913 | intel_p4_mcheck_init(c); | |
1914 | break; | |
1915 | ||
1916 | case X86_VENDOR_CENTAUR: | |
1917 | if (c->x86 == 5) | |
1918 | winchip_mcheck_init(c); | |
1919 | break; | |
1920 | ||
1921 | default: | |
1922 | break; | |
1923 | } | |
b659294b | 1924 | printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks); |
a988d334 IM |
1925 | } |
1926 | ||
a988d334 IM |
1927 | static int __init mcheck_enable(char *str) |
1928 | { | |
1929 | mce_disabled = -1; | |
1930 | return 1; | |
1931 | } | |
1932 | ||
a988d334 IM |
1933 | __setup("mce", mcheck_enable); |
1934 | ||
d7c3c9a6 AK |
1935 | #endif /* CONFIG_X86_OLD_MCE */ |
1936 | ||
1937 | /* | |
1938 | * Old style boot options parsing. Only for compatibility. | |
1939 | */ | |
1940 | static int __init mcheck_disable(char *str) | |
1941 | { | |
1942 | mce_disabled = 1; | |
1943 | return 1; | |
1944 | } | |
1945 | __setup("nomce", mcheck_disable); |